1. Migrate `MOSDAlive` crafting from `OSD` to `ShardServices`.
2. `send_alive()` takes the `want` epoch from callers
instead of assuming it matches current OSDMap.
Callers pass `pg_history_t::same_interval_since`.
The passed epoch is handled similarly to `queue_want_up_thru()`
in the classical.
The message is sent once and when the desired epoch is greater
than the one returned by `OSDMap::get_up_thru()`.
3. Send the message also when completing the `PeeringEvent`.
Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
#include "include/util.h"
#include "messages/MCommand.h"
-#include "messages/MOSDAlive.h"
#include "messages/MOSDBeacon.h"
#include "messages/MOSDBoot.h"
#include "messages/MOSDMap.h"
local_conf().get_val<std::string>("osd_objectstore"),
local_conf().get_val<std::string>("osd_data"),
local_conf().get_config_values())},
- shard_services{*this, *cluster_msgr, *public_msgr, *monc, *mgrc, *store},
+ shard_services{*this, whoami, *cluster_msgr, *public_msgr, *monc, *mgrc, *store},
heartbeat{new Heartbeat{whoami, shard_services, *monc, hb_front_msgr, hb_back_msgr}},
// do this in background
heartbeat_timer{[this] { update_heartbeat_peers(); }},
});
}
-seastar::future<> OSD::_send_alive()
-{
- auto want = osdmap->get_epoch();
- logger().info(
- "{} want {} up_thru_wanted {}",
- __func__,
- want,
- up_thru_wanted);
- if (!osdmap->exists(whoami)) {
- logger().warn("{} DNE", __func__);
- return seastar::now();
- } else if (want <= up_thru_wanted) {
- logger().debug("{} {} <= {}", __func__, want, up_thru_wanted);
- return seastar::now();
- } else {
- up_thru_wanted = want;
- auto m = make_message<MOSDAlive>(osdmap->get_epoch(), want);
- return monc->send_message(std::move(m));
- }
-}
-
seastar::future<> OSD::handle_command(crimson::net::Connection* conn,
Ref<MCommand> m)
{
seastar::future<Ref<PG>> load_pg(spg_t pgid);
seastar::future<> load_pgs();
- epoch_t up_thru_wanted = 0;
- seastar::future<> _send_alive();
-
// OSDMapService methods
epoch_t get_up_epoch() const final {
return up_epoch;
pg->do_peering_event(evt, ctx);
handle.exit();
return complete_rctx(pg);
+ }).then([this, pg] {
+ return pg->get_need_up_thru() ? shard_services.send_alive(pg->get_same_interval_since())
+ : seastar::now();
});
}
}).then([this, ref=std::move(ref)] {
logger().info("PGAdvanceMap::start new pg {}", *pg);
}
return seastar::when_all_succeed(
- pg->get_need_up_thru() ? osd._send_alive() : seastar::now(),
+ pg->get_need_up_thru() \
+ ? osd.shard_services.send_alive(pg->get_same_interval_since())
+ : seastar::now(),
osd.shard_services.dispatch_context(
pg->get_collection_ref(),
std::move(rctx)));
bool get_need_up_thru() const {
return peering_state.get_need_up_thru();
}
+ epoch_t get_same_interval_since() const {
+ return get_info().history.same_interval_since;
+ }
const auto& get_pool() const {
return peering_state.get_pool();
#include "crimson/osd/shard_services.h"
+#include "messages/MOSDAlive.h"
+
#include "osd/osd_perf_counters.h"
#include "osd/PeeringState.h"
#include "crimson/common/config_proxy.h"
ShardServices::ShardServices(
OSDMapService &osdmap_service,
+ const int whoami,
crimson::net::Messenger &cluster_msgr,
crimson::net::Messenger &public_msgr,
crimson::mon::Client &monc,
crimson::mgr::Client &mgrc,
crimson::os::FuturizedStore &store)
: osdmap_service(osdmap_service),
+ whoami(whoami),
cluster_msgr(cluster_msgr),
public_msgr(public_msgr),
monc(monc),
return stamps->second;
}
+seastar::future<> ShardServices::send_alive(const epoch_t want)
+{
+ logger().info(
+ "{} want={} up_thru_wanted={}",
+ __func__,
+ want,
+ up_thru_wanted);
+
+ if (want > up_thru_wanted) {
+ up_thru_wanted = want;
+ } else {
+ logger().debug("{} want={} <= up_thru_wanted={}; skipping",
+ __func__, want, up_thru_wanted);
+ return seastar::now();
+ }
+ if (!osdmap->exists(whoami)) {
+ logger().warn("{} DNE", __func__);
+ return seastar::now();
+ } if (const epoch_t up_thru = osdmap->get_up_thru(whoami);
+ up_thru_wanted > up_thru) {
+ logger().debug("{} up_thru_wanted={} up_thru={}", __func__, want, up_thru);
+ return monc.send_message(
+ make_message<MOSDAlive>(osdmap->get_epoch(), want));
+ } else {
+ logger().debug("{} {} <= {}", __func__, want, osdmap->get_up_thru(whoami));
+ return seastar::now();
+ }
+}
+
};
class ShardServices : public md_config_obs_t {
using cached_map_t = boost::local_shared_ptr<const OSDMap>;
OSDMapService &osdmap_service;
+ const int whoami;
crimson::net::Messenger &cluster_msgr;
crimson::net::Messenger &public_msgr;
crimson::mon::Client &monc;
public:
ShardServices(
OSDMapService &osdmap_service,
+ const int whoami,
crimson::net::Messenger &cluster_msgr,
crimson::net::Messenger &public_msgr,
crimson::mon::Client &monc,
public:
AsyncReserver<spg_t, DirectFinisher> local_reserver;
AsyncReserver<spg_t, DirectFinisher> remote_reserver;
+
+private:
+ epoch_t up_thru_wanted = 0;
+public:
+ seastar::future<> send_alive(epoch_t want);
};
}