They need to know what type of message(s) to queue.
Signed-off-by: Sage Weil <sage@redhat.com>
PeeringCtx OSD::create_context()
{
- return PeeringCtx();
+ return PeeringCtx(get_osdmap()->require_osd_release);
}
void OSD::dispatch_context(PeeringCtx &ctx, PG *pg, OSDMapRef curmap,
// init pool options
store->set_collection_opts(ch, pool.info.opts);
- PeeringCtx rctx;
+ PeeringCtx rctx(ceph_release_t::unknown);
handle_initialize(rctx);
// note: we don't activate here because we know the OSD will advance maps
// during boot.
#define dout_context cct
#define dout_subsys ceph_subsys_osd
-BufferedRecoveryMessages::BufferedRecoveryMessages(PeeringCtx &ctx)
- : message_map(std::move(ctx.message_map))
-{
- ctx.message_map.clear();
+BufferedRecoveryMessages::BufferedRecoveryMessages(
+ ceph_release_t r,
+ PeeringCtx &ctx)
+ : require_osd_release(r) {
+ // steal messages from ctx
+ message_map.swap(ctx.message_map);
}
void BufferedRecoveryMessages::send_notify(int to, const pg_notify_t &n)
ceph_assert(!messages_pending_flush);
ceph_assert(orig_ctx);
ceph_assert(rctx);
- messages_pending_flush = BufferedRecoveryMessages();
+ messages_pending_flush = BufferedRecoveryMessages(
+ orig_ctx->require_osd_release);
rctx.emplace(*messages_pending_flush, *orig_ctx);
}
// [primary only] content recovery state
struct BufferedRecoveryMessages {
+ ceph_release_t require_osd_release;
map<int, vector<MessageRef>> message_map;
- BufferedRecoveryMessages() = default;
- BufferedRecoveryMessages(PeeringCtx &);
+ BufferedRecoveryMessages(ceph_release_t r)
+ : require_osd_release(r) {
+ }
+ BufferedRecoveryMessages(ceph_release_t r, PeeringCtx &ctx);
void accept_buffered_messages(BufferedRecoveryMessages &m) {
for (auto &[target, ls] : m.message_map) {
ObjectStore::Transaction transaction;
HBHandle* handle = nullptr;
- PeeringCtx() = default;
+ PeeringCtx(ceph_release_t r)
+ : BufferedRecoveryMessages(r) {}
PeeringCtx(const PeeringCtx &) = delete;
PeeringCtx &operator=(const PeeringCtx &) = delete;