]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd: rework handling of MOSDAlive.
authorRadoslaw Zarzynski <rzarzyns@redhat.com>
Tue, 21 Apr 2020 18:38:31 +0000 (20:38 +0200)
committerRadoslaw Zarzynski <rzarzyns@redhat.com>
Mon, 13 Jul 2020 14:23:54 +0000 (16:23 +0200)
1. Migrate `MOSDAlive` crafting from `OSD` to `ShardServices`.
2. `send_alive()` takes the `want` epoch from callers
   instead of assuming it matches current OSDMap.
   Callers pass `pg_history_t::same_interval_since`.
   The passed epoch is handled similarly to `queue_want_up_thru()`
   in the classical.
   The message is sent once and when the desired epoch is greater
   than the one returned by `OSDMap::get_up_thru()`.
3. Send the message also when completing the `PeeringEvent`.

Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
src/crimson/osd/osd.cc
src/crimson/osd/osd.h
src/crimson/osd/osd_operations/peering_event.cc
src/crimson/osd/osd_operations/pg_advance_map.cc
src/crimson/osd/pg.h
src/crimson/osd/shard_services.cc
src/crimson/osd/shard_services.h

index 3d60c2a8c61248f59fce23593d8ebf78144c28dd..5725ad7b46d9d3c58e142b75740a27e6b47fa5f3 100644 (file)
@@ -16,7 +16,6 @@
 #include "include/util.h"
 
 #include "messages/MCommand.h"
-#include "messages/MOSDAlive.h"
 #include "messages/MOSDBeacon.h"
 #include "messages/MOSDBoot.h"
 #include "messages/MOSDMap.h"
@@ -83,7 +82,7 @@ OSD::OSD(int id, uint32_t nonce,
       local_conf().get_val<std::string>("osd_objectstore"),
       local_conf().get_val<std::string>("osd_data"),
       local_conf().get_config_values())},
-    shard_services{*this, *cluster_msgr, *public_msgr, *monc, *mgrc, *store},
+    shard_services{*this, whoami, *cluster_msgr, *public_msgr, *monc, *mgrc, *store},
     heartbeat{new Heartbeat{whoami, shard_services, *monc, hb_front_msgr, hb_back_msgr}},
     // do this in background
     heartbeat_timer{[this] { update_heartbeat_peers(); }},
@@ -406,27 +405,6 @@ seastar::future<> OSD::_add_me_to_crush()
   });
 }
 
-seastar::future<> OSD::_send_alive()
-{
-  auto want = osdmap->get_epoch();
-  logger().info(
-    "{} want {} up_thru_wanted {}",
-    __func__,
-    want,
-    up_thru_wanted);
-  if (!osdmap->exists(whoami)) {
-    logger().warn("{} DNE", __func__);
-    return seastar::now();
-  } else if (want <= up_thru_wanted) {
-    logger().debug("{} {} <= {}", __func__, want, up_thru_wanted);
-    return seastar::now();
-  } else {
-    up_thru_wanted = want;
-    auto m = make_message<MOSDAlive>(osdmap->get_epoch(), want);
-    return monc->send_message(std::move(m));
-  }
-}
-
 seastar::future<> OSD::handle_command(crimson::net::Connection* conn,
                                      Ref<MCommand> m)
 {
index afee53d69dece4a5a6e5d8c3f6843ac25e6703b4..93f58eb65cb82d8ce70d1d1478ad823e2c3aa9cf 100644 (file)
@@ -155,9 +155,6 @@ private:
   seastar::future<Ref<PG>> load_pg(spg_t pgid);
   seastar::future<> load_pgs();
 
-  epoch_t up_thru_wanted = 0;
-  seastar::future<> _send_alive();
-
   // OSDMapService methods
   epoch_t get_up_epoch() const final {
     return up_epoch;
index 8c4cfbc89b6ad3a79da30352aa7938b6c5482250..b9eb6ec7a559a884e7c5f35d8b0781cf5e60092a 100644 (file)
@@ -80,6 +80,9 @@ seastar::future<> PeeringEvent::start()
        pg->do_peering_event(evt, ctx);
        handle.exit();
        return complete_rctx(pg);
+      }).then([this, pg] {
+       return pg->get_need_up_thru() ? shard_services.send_alive(pg->get_same_interval_since())
+                               : seastar::now();
       });
     }
   }).then([this, ref=std::move(ref)] {
index 64eacea6806408cc009aa019c7c9c2f153e4b9b6..a1092e9b375f22c4c8d6b19d446a85a099ff81a9 100644 (file)
@@ -80,7 +80,9 @@ seastar::future<> PGAdvanceMap::start()
            logger().info("PGAdvanceMap::start new pg {}", *pg);
          }
          return seastar::when_all_succeed(
-           pg->get_need_up_thru() ? osd._send_alive() : seastar::now(),
+           pg->get_need_up_thru() \
+              ? osd.shard_services.send_alive(pg->get_same_interval_since())
+              : seastar::now(),
            osd.shard_services.dispatch_context(
              pg->get_collection_ref(),
              std::move(rctx)));
index 9dc10cf3d6c40127eb52f86aadddf5cb69f04d87..8c94590ec1bacf97660d603f8a0481bb14e6b01b 100644 (file)
@@ -449,6 +449,9 @@ public:
   bool get_need_up_thru() const {
     return peering_state.get_need_up_thru();
   }
+  epoch_t get_same_interval_since() const {
+    return get_info().history.same_interval_since;
+  }
 
   const auto& get_pool() const {
     return peering_state.get_pool();
index 7aa8ede6beddfb038ef0fd83e7b46908132eadd2..b52cdfb7ecbbfffdb8b06cffd2206887c2f93b2d 100644 (file)
@@ -3,6 +3,8 @@
 
 #include "crimson/osd/shard_services.h"
 
+#include "messages/MOSDAlive.h"
+
 #include "osd/osd_perf_counters.h"
 #include "osd/PeeringState.h"
 #include "crimson/common/config_proxy.h"
@@ -28,12 +30,14 @@ namespace crimson::osd {
 
 ShardServices::ShardServices(
   OSDMapService &osdmap_service,
+  const int whoami,
   crimson::net::Messenger &cluster_msgr,
   crimson::net::Messenger &public_msgr,
   crimson::mon::Client &monc,
   crimson::mgr::Client &mgrc,
   crimson::os::FuturizedStore &store)
     : osdmap_service(osdmap_service),
+      whoami(whoami),
       cluster_msgr(cluster_msgr),
       public_msgr(public_msgr),
       monc(monc),
@@ -283,4 +287,33 @@ HeartbeatStampsRef ShardServices::get_hb_stamps(int peer)
   return stamps->second;
 }
 
+seastar::future<> ShardServices::send_alive(const epoch_t want)
+{
+  logger().info(
+    "{} want={} up_thru_wanted={}",
+    __func__,
+    want,
+    up_thru_wanted);
+
+  if (want > up_thru_wanted) {
+    up_thru_wanted = want;
+  } else {
+    logger().debug("{} want={} <= up_thru_wanted={}; skipping",
+                   __func__, want, up_thru_wanted);
+    return seastar::now();
+  }
+  if (!osdmap->exists(whoami)) {
+    logger().warn("{} DNE", __func__);
+    return seastar::now();
+  } if (const epoch_t up_thru = osdmap->get_up_thru(whoami);
+        up_thru_wanted > up_thru) {
+    logger().debug("{} up_thru_wanted={} up_thru={}", __func__, want, up_thru);
+    return monc.send_message(
+      make_message<MOSDAlive>(osdmap->get_epoch(), want));
+  } else {
+    logger().debug("{} {} <= {}", __func__, want, osdmap->get_up_thru(whoami));
+    return seastar::now();
+  }
+}
+
 };
index 38d5d192ebd1b865684ff7cce7731fc3bd2ed943..934e91437113aaae8158b5152f297ba38ee46cf9 100644 (file)
@@ -44,6 +44,7 @@ namespace crimson::osd {
 class ShardServices : public md_config_obs_t {
   using cached_map_t = boost::local_shared_ptr<const OSDMap>;
   OSDMapService &osdmap_service;
+  const int whoami;
   crimson::net::Messenger &cluster_msgr;
   crimson::net::Messenger &public_msgr;
   crimson::mon::Client &monc;
@@ -61,6 +62,7 @@ class ShardServices : public md_config_obs_t {
 public:
   ShardServices(
     OSDMapService &osdmap_service,
+    const int whoami,
     crimson::net::Messenger &cluster_msgr,
     crimson::net::Messenger &public_msgr,
     crimson::mon::Client &monc,
@@ -204,6 +206,11 @@ private:
 public:
   AsyncReserver<spg_t, DirectFinisher> local_reserver;
   AsyncReserver<spg_t, DirectFinisher> remote_reserver;
+
+private:
+  epoch_t up_thru_wanted = 0;
+public:
+  seastar::future<> send_alive(epoch_t want);
 };
 
 }