}
Heartbeat::Heartbeat(osd_id_t whoami,
- const crimson::osd::ShardServices& service,
+ crimson::osd::ShardServices& service,
crimson::mon::Client& monc,
crimson::net::Messenger &front_msgr,
crimson::net::Messenger &back_msgr)
service.get_mnow(),
service.get_up_epoch(),
min_message);
- return conn->send(std::move(reply));
+ return conn->send(std::move(reply)
+ ).then([this, m, conn] {
+ return maybe_share_osdmap(conn, m);
+ });
+}
+
+seastar::future<> Heartbeat::maybe_share_osdmap(
+ crimson::net::ConnectionRef conn,
+ Ref<MOSDPing> m)
+{
+ const osd_id_t from = m->get_source().num();
+ const epoch_t osdmap_epoch = service.get_map()->get_epoch();
+ logger().info("{} peer id: {} epoch is {} while osdmap is {}",
+ __func__ , from, m->map_epoch, osdmap_epoch);
+ if (osdmap_epoch > m->map_epoch) {
+ logger().debug("{} sharing osdmap epoch of {} with peer id {}",
+ __func__, osdmap_epoch, from);
+ // Peer's newest map is m->map_epoch. Therfore it misses
+ // the osdmaps in the range of `m->map_epoch` to `osdmap_epoch`.
+ return service.send_incremental_map_to_osd(from, m->map_epoch);
+ }
+ return seastar::now();
}
seastar::future<> Heartbeat::handle_reply(crimson::net::ConnectionRef conn,
return seastar::now();
}
auto& peer = found->second;
- return peer.handle_reply(conn, m);
+ return peer.handle_reply(conn, m
+ ).then([this, conn, m] {
+ return maybe_share_osdmap(conn, m);
+ });
}
seastar::future<> Heartbeat::handle_you_died()
using osd_id_t = int;
Heartbeat(osd_id_t whoami,
- const crimson::osd::ShardServices& service,
+ crimson::osd::ShardServices& service,
crimson::mon::Client& monc,
crimson::net::Messenger &front_msgr,
crimson::net::Messenger &back_msgr);
seastar::future<> start_messenger(crimson::net::Messenger& msgr,
const entity_addrvec_t& addrs);
+ seastar::future<> maybe_share_osdmap(crimson::net::ConnectionRef,
+ Ref<MOSDPing> m);
private:
const osd_id_t whoami;
- const crimson::osd::ShardServices& service;
+ crimson::osd::ShardServices& service;
crimson::mon::Client& monc;
crimson::net::Messenger &front_msgr;
crimson::net::Messenger &back_msgr;
}
}
-
+seastar::future<> OSDSingletonState::send_incremental_map_to_osd(
+ int osd,
+ epoch_t first)
+{
+ if (osdmap->is_down(osd)) {
+ logger().info("{}: osd.{} is_down", __func__, osd);
+ return seastar::now();
+ } else {
+ auto conn = cluster_msgr.connect(
+ osdmap->get_cluster_addrs(osd).front(), CEPH_ENTITY_TYPE_OSD);
+ return send_incremental_map(*conn, first);
+ }
+}
};
crimson::net::Connection &conn,
epoch_t first);
+ seastar::future<> send_incremental_map_to_osd(int osd, epoch_t first);
+
auto get_pool_info(int64_t poolid) {
return get_meta_coll().load_final_pool_info(poolid);
}
FORWARD(with_throttle_while, with_throttle_while, local_state.throttler)
FORWARD_TO_OSD_SINGLETON(send_incremental_map)
+ FORWARD_TO_OSD_SINGLETON(send_incremental_map_to_osd)
FORWARD_TO_OSD_SINGLETON(osdmap_subscribe)
FORWARD_TO_OSD_SINGLETON(queue_want_pg_temp)