From 5e36acdb6f9ac3c6469e937f6b4f916bfd217ab7 Mon Sep 17 00:00:00 2001 From: Samuel Just Date: Fri, 12 Apr 2019 11:08:54 -0700 Subject: [PATCH] osd/: mechanically rename RecoveryMachine/State/Ctx to Peering* I'm going to extract this logic and reuse it in crimson. Recovery* has always been a confusing name as it implements neither log-based recovery nor backfill. Rather, it's mainly the buisiness logic for agreeing on an authoritative log and some ancillary things such as scrub/backfill reservation. $ for i in $(git grep -l 'RecoveryMachine'); do sed -i 's/RecoveryMachine/PeeringMachine/g' $i; done $ for i in $(git grep -l 'RecoveryState'); do sed -i 's/RecoveryState/PeeringState/g' $i; done $ for i in $(git grep -l 'RecoveryCtx'); do sed -i 's/RecoveryCtx/PeeringCtx/g' $i; done Signed-off-by: Samuel Just --- .../osd_internals/map_message_handling.rst | 2 +- doc/dev/osd_internals/pg.rst | 10 +- src/crimson/osd/recovery_state.h | 2 +- src/osd/OSD.cc | 22 +- src/osd/OSD.h | 14 +- src/osd/PG.cc | 934 +++++++++--------- src/osd/PG.h | 52 +- 7 files changed, 518 insertions(+), 518 deletions(-) diff --git a/doc/dev/osd_internals/map_message_handling.rst b/doc/dev/osd_internals/map_message_handling.rst index a5013c22f44..f8104f3fd57 100644 --- a/doc/dev/osd_internals/map_message_handling.rst +++ b/doc/dev/osd_internals/map_message_handling.rst @@ -119,7 +119,7 @@ Peering messages are tagged with two epochs: These are the same in cases where there was no triggering message. We discard a peering message if the message's query_epoch if the PG in question has entered a new epoch (See PG::old_peering_evt, PG::queue_peering_event). Notifies, -infos, notifies, and logs are all handled as PG::RecoveryMachine events and +infos, notifies, and logs are all handled as PG::PeeringMachine events and are wrapped by PG::queue_* by PG::CephPeeringEvts, which include the created state machine event along with epoch_sent and query_epoch in order to generically check PG::old_peering_message upon insertion and removal from the diff --git a/doc/dev/osd_internals/pg.rst b/doc/dev/osd_internals/pg.rst index 405536396f1..397d4ab5d4f 100644 --- a/doc/dev/osd_internals/pg.rst +++ b/doc/dev/osd_internals/pg.rst @@ -8,12 +8,12 @@ Concepts *Peering Interval* See PG::start_peering_interval. See PG::acting_up_affected - See PG::RecoveryState::Reset + See PG::PeeringState::Reset A peering interval is a maximal set of contiguous map epochs in which the - up and acting sets did not change. PG::RecoveryMachine represents a + up and acting sets did not change. PG::PeeringMachine represents a transition from one interval to another as passing through - RecoveryState::Reset. On PG::RecoveryState::AdvMap PG::acting_up_affected can + PeeringState::Reset. On PG::PeeringState::AdvMap PG::acting_up_affected can cause the pg to transition to Reset. @@ -22,10 +22,10 @@ Peering Details and Gotchas For an overview of peering, see `Peering <../../peering>`_. * PG::flushed defaults to false and is set to false in - PG::start_peering_interval. Upon transitioning to PG::RecoveryState::Started + PG::start_peering_interval. Upon transitioning to PG::PeeringState::Started we send a transaction through the pg op sequencer which, upon complete, sends a FlushedEvt which sets flushed to true. The primary cannot go - active until this happens (See PG::RecoveryState::WaitFlushedPeering). + active until this happens (See PG::PeeringState::WaitFlushedPeering). Replicas can go active but cannot serve ops (writes or reads). This is necessary because we cannot read our ondisk state until unstable transactions from the previous interval have cleared. diff --git a/src/crimson/osd/recovery_state.h b/src/crimson/osd/recovery_state.h index 67382db829a..feb93356d61 100644 --- a/src/crimson/osd/recovery_state.h +++ b/src/crimson/osd/recovery_state.h @@ -12,7 +12,7 @@ class PG; namespace recovery { -// RecoveryMachine::handle_event() could send multiple notifications to a +// PeeringMachine::handle_event() could send multiple notifications to a // certain peer OSD before it reaches the last state. for better performance, // we send them in batch. the pending messages are collected in RecoveryCtx // before being dispatched upon returning of handle_event(). diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 90c1dd5070e..0f2f9314d5f 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -4506,7 +4506,7 @@ PGRef OSD::handle_pg_create_info(const OSDMapRef& osdmap, return nullptr; } - PG::RecoveryCtx rctx = create_context(); + PG::PeeringCtx rctx = create_context(); OSDMapRef startmap = get_map(info->epoch); @@ -8462,7 +8462,7 @@ void OSD::_finish_splits(set& pgs) dout(10) << __func__ << " " << pgs << dendl; if (is_stopping()) return; - PG::RecoveryCtx rctx = create_context(); + PG::PeeringCtx rctx = create_context(); for (set::iterator i = pgs.begin(); i != pgs.end(); ++i) { @@ -8499,7 +8499,7 @@ bool OSD::advance_pg( epoch_t osd_epoch, PG *pg, ThreadPool::TPHandle &handle, - PG::RecoveryCtx *rctx) + PG::PeeringCtx *rctx) { if (osd_epoch <= pg->get_osdmap_epoch()) { return true; @@ -8935,7 +8935,7 @@ void OSD::split_pgs( const set &childpgids, set *out_pgs, OSDMapRef curmap, OSDMapRef nextmap, - PG::RecoveryCtx *rctx) + PG::PeeringCtx *rctx) { unsigned pg_num = nextmap->get_pg_num(parent->pg_id.pool()); parent->update_snap_mapper_bits(parent->get_pgid().get_split_bits(pg_num)); @@ -9083,7 +9083,7 @@ void OSD::handle_pg_create(OpRequestRef op) // ---------------------------------------- // peering and recovery -PG::RecoveryCtx OSD::create_context() +PG::PeeringCtx OSD::create_context() { ObjectStore::Transaction *t = new ObjectStore::Transaction; map > *query_map = @@ -9092,11 +9092,11 @@ PG::RecoveryCtx OSD::create_context() new map > >; map > > *info_map = new map > >; - PG::RecoveryCtx rctx(query_map, info_map, notify_list, t); + PG::PeeringCtx rctx(query_map, info_map, notify_list, t); return rctx; } -void OSD::dispatch_context_transaction(PG::RecoveryCtx &ctx, PG *pg, +void OSD::dispatch_context_transaction(PG::PeeringCtx &ctx, PG *pg, ThreadPool::TPHandle *handle) { if (!ctx.transaction->empty() || ctx.transaction->has_contexts()) { @@ -9109,7 +9109,7 @@ void OSD::dispatch_context_transaction(PG::RecoveryCtx &ctx, PG *pg, } } -void OSD::dispatch_context(PG::RecoveryCtx &ctx, PG *pg, OSDMapRef curmap, +void OSD::dispatch_context(PG::PeeringCtx &ctx, PG *pg, OSDMapRef curmap, ThreadPool::TPHandle *handle) { if (!service.get_osdmap()->is_up(whoami)) { @@ -9134,7 +9134,7 @@ void OSD::dispatch_context(PG::RecoveryCtx &ctx, PG *pg, OSDMapRef curmap, delete ctx.transaction; } -void OSD::discard_context(PG::RecoveryCtx& ctx) +void OSD::discard_context(PG::PeeringCtx& ctx) { delete ctx.notify_list; delete ctx.query_map; @@ -9608,7 +9608,7 @@ void OSD::do_recovery( << " on " << *pg << dendl; if (do_unfound) { - PG::RecoveryCtx rctx = create_context(); + PG::PeeringCtx rctx = create_context(); rctx.handle = &handle; pg->find_unfound(queued, &rctx); dispatch_context(rctx, pg, pg->get_osdmap()); @@ -9782,7 +9782,7 @@ void OSD::dequeue_peering_evt( PGPeeringEventRef evt, ThreadPool::TPHandle& handle) { - PG::RecoveryCtx rctx = create_context(); + PG::PeeringCtx rctx = create_context(); auto curmap = sdata->get_osdmap(); bool need_up_thru = false; epoch_t same_interval_since = 0; diff --git a/src/osd/OSD.h b/src/osd/OSD.h index b29c5862cd7..9d2a4b5bcb0 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -188,7 +188,7 @@ enum { l_osd_last, }; -// RecoveryState perf counters +// PeeringState perf counters enum { rs_first = 20000, rs_initial_latency, @@ -1862,7 +1862,7 @@ protected: epoch_t advance_to, PG *pg, ThreadPool::TPHandle &handle, - PG::RecoveryCtx *rctx); + PG::PeeringCtx *rctx); void consume_map(); void activate_map(); @@ -1955,7 +1955,7 @@ protected: const set &childpgids, set *out_pgs, OSDMapRef curmap, OSDMapRef nextmap, - PG::RecoveryCtx *rctx); + PG::PeeringCtx *rctx); void _finish_splits(set& pgs); // == monitor interaction == @@ -2017,12 +2017,12 @@ protected: } // -- generic pg peering -- - PG::RecoveryCtx create_context(); - void dispatch_context(PG::RecoveryCtx &ctx, PG *pg, OSDMapRef curmap, + PG::PeeringCtx create_context(); + void dispatch_context(PG::PeeringCtx &ctx, PG *pg, OSDMapRef curmap, ThreadPool::TPHandle *handle = NULL); - void dispatch_context_transaction(PG::RecoveryCtx &ctx, PG *pg, + void dispatch_context_transaction(PG::PeeringCtx &ctx, PG *pg, ThreadPool::TPHandle *handle = NULL); - void discard_context(PG::RecoveryCtx &ctx); + void discard_context(PG::PeeringCtx &ctx); void do_notifies(map > >& notify_list, diff --git a/src/osd/PG.cc b/src/osd/PG.cc index c00d76d1575..e641e68e699 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -579,7 +579,7 @@ void PG::rewind_divergent_log(ObjectStore::Transaction& t, eversion_t newhead) bool PG::search_for_missing( const pg_info_t &oinfo, const pg_missing_t &omissing, pg_shard_t from, - RecoveryCtx *ctx) + PeeringCtx *ctx) { uint64_t num_unfound_before = missing_loc.num_unfound(); bool found_missing = missing_loc.add_source_info( @@ -1700,7 +1700,7 @@ void PG::choose_async_recovery_replicated(const map &all_ * a new pg info (not just here, when recovery finishes) * 2) check whether anything in want_acting went down on each new map * (and, if so, calculate a new want_acting) - * 3) remove the assertion in PG::RecoveryState::Active::react(const AdvMap) + * 3) remove the assertion in PG::PeeringState::Active::react(const AdvMap) * TODO! */ bool PG::choose_acting(pg_shard_t &auth_log_shard_id, @@ -1864,7 +1864,7 @@ void PG::activate(ObjectStore::Transaction& t, vector< pair > > *activator_map, - RecoveryCtx *ctx) + PeeringCtx *ctx) { ceph_assert(!is_peered()); ceph_assert(scrubber.callbacks.empty()); @@ -2758,7 +2758,7 @@ void PG::finish_split_stats(const object_stat_sum_t& stats, ObjectStore::Transac write_if_dirty(*t); } -void PG::merge_from(map& sources, RecoveryCtx *rctx, +void PG::merge_from(map& sources, PeeringCtx *rctx, unsigned split_bits, const pg_merge_meta_t& last_pg_merge_meta) { @@ -4133,7 +4133,7 @@ void PG::read_state(ObjectStore *store) set_role(-1); } - PG::RecoveryCtx rctx(0, 0, 0, new ObjectStore::Transaction); + PG::PeeringCtx rctx(0, 0, 0, new ObjectStore::Transaction); handle_initialize(&rctx); // note: we don't activate here because we know the OSD will advance maps // during boot. @@ -6134,7 +6134,7 @@ void PG::fulfill_log( osd->send_message_osd_cluster(mlog, con.get()); } -void PG::fulfill_query(const MQuery& query, RecoveryCtx *rctx) +void PG::fulfill_query(const MQuery& query, PeeringCtx *rctx) { if (query.query.type == pg_query_t::INFO) { pair notify_info; @@ -6801,7 +6801,7 @@ void PG::take_waiters() requeue_map_waiters(); } -void PG::do_peering_event(PGPeeringEventRef evt, RecoveryCtx *rctx) +void PG::do_peering_event(PGPeeringEventRef evt, PeeringCtx *rctx) { dout(10) << __func__ << ": " << evt->get_desc() << dendl; ceph_assert(have_same_or_newer_map(evt->get_epoch_sent())); @@ -6831,7 +6831,7 @@ void PG::queue_null(epoch_t msg_epoch, NullEvt()))); } -void PG::find_unfound(epoch_t queued, RecoveryCtx *rctx) +void PG::find_unfound(epoch_t queued, PeeringCtx *rctx) { /* * if we couldn't start any recovery ops and things are still @@ -6872,7 +6872,7 @@ void PG::handle_advance_map( OSDMapRef osdmap, OSDMapRef lastmap, vector& newup, int up_primary, vector& newacting, int acting_primary, - RecoveryCtx *rctx) + PeeringCtx *rctx) { ceph_assert(lastmap->get_epoch() == osdmap_ref->get_epoch()); ceph_assert(lastmap == osdmap_ref); @@ -6896,7 +6896,7 @@ void PG::handle_advance_map( last_require_osd_release = osdmap->require_osd_release; } -void PG::handle_activate_map(RecoveryCtx *rctx) +void PG::handle_activate_map(PeeringCtx *rctx) { dout(10) << "handle_activate_map " << dendl; ActMap evt; @@ -6918,7 +6918,7 @@ void PG::handle_activate_map(RecoveryCtx *rctx) write_if_dirty(*rctx->transaction); } -void PG::handle_initialize(RecoveryCtx *rctx) +void PG::handle_initialize(PeeringCtx *rctx) { dout(10) << __func__ << dendl; Initialize evt; @@ -7100,80 +7100,80 @@ int PG::pg_stat_adjust(osd_stat_t *ns) /*------------ Recovery State Machine----------------*/ #undef dout_prefix -#define dout_prefix (context< RecoveryMachine >().pg->gen_prefix(*_dout) \ +#define dout_prefix (context< PeeringMachine >().pg->gen_prefix(*_dout) \ << "state<" << get_state_name() << ">: ") /*------Crashed-------*/ -PG::RecoveryState::Crashed::Crashed(my_context ctx) +PG::PeeringState::Crashed::Crashed(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Crashed") + NamedState(context< PeeringMachine >().pg, "Crashed") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); ceph_abort_msg("we got a bad state machine event"); } /*------Initial-------*/ -PG::RecoveryState::Initial::Initial(my_context ctx) +PG::PeeringState::Initial::Initial(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Initial") + NamedState(context< PeeringMachine >().pg, "Initial") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } -boost::statechart::result PG::RecoveryState::Initial::react(const MNotifyRec& notify) +boost::statechart::result PG::PeeringState::Initial::react(const MNotifyRec& notify) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->proc_replica_info( notify.from, notify.notify.info, notify.notify.epoch_sent); pg->set_last_peering_reset(); return transit< Primary >(); } -boost::statechart::result PG::RecoveryState::Initial::react(const MInfoRec& i) +boost::statechart::result PG::PeeringState::Initial::react(const MInfoRec& i) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->is_primary()); post_event(i); return transit< Stray >(); } -boost::statechart::result PG::RecoveryState::Initial::react(const MLogRec& i) +boost::statechart::result PG::PeeringState::Initial::react(const MLogRec& i) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->is_primary()); post_event(i); return transit< Stray >(); } -void PG::RecoveryState::Initial::exit() +void PG::PeeringState::Initial::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_initial_latency, dur); } /*------Started-------*/ -PG::RecoveryState::Started::Started(my_context ctx) +PG::PeeringState::Started::Started(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started") + NamedState(context< PeeringMachine >().pg, "Started") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } boost::statechart::result -PG::RecoveryState::Started::react(const IntervalFlush&) +PG::PeeringState::Started::react(const IntervalFlush&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Ending blocked outgoing recovery messages" << dendl; - context< RecoveryMachine >().pg->recovery_state.end_block_outgoing(); + context< PeeringMachine >().pg->recovery_state.end_block_outgoing(); return discard_event(); } -boost::statechart::result PG::RecoveryState::Started::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::Started::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Started advmap" << dendl; pg->check_full_transition(advmap.lastmap, advmap.osdmap); if (pg->should_restart_peering( @@ -7192,7 +7192,7 @@ boost::statechart::result PG::RecoveryState::Started::react(const AdvMap& advmap return discard_event(); } -boost::statechart::result PG::RecoveryState::Started::react(const QueryState& q) +boost::statechart::result PG::PeeringState::Started::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -7201,38 +7201,38 @@ boost::statechart::result PG::RecoveryState::Started::react(const QueryState& q) return discard_event(); } -void PG::RecoveryState::Started::exit() +void PG::PeeringState::Started::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_started_latency, dur); } /*--------Reset---------*/ -PG::RecoveryState::Reset::Reset(my_context ctx) +PG::PeeringState::Reset::Reset(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Reset") + NamedState(context< PeeringMachine >().pg, "Reset") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->flushes_in_progress = 0; pg->set_last_peering_reset(); } boost::statechart::result -PG::RecoveryState::Reset::react(const IntervalFlush&) +PG::PeeringState::Reset::react(const IntervalFlush&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Ending blocked outgoing recovery messages" << dendl; - context< RecoveryMachine >().pg->recovery_state.end_block_outgoing(); + context< PeeringMachine >().pg->recovery_state.end_block_outgoing(); return discard_event(); } -boost::statechart::result PG::RecoveryState::Reset::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::Reset::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Reset advmap" << dendl; pg->check_full_transition(advmap.lastmap, advmap.osdmap); @@ -7250,18 +7250,18 @@ boost::statechart::result PG::RecoveryState::Reset::react(const AdvMap& advmap) advmap.lastmap, advmap.newup, advmap.up_primary, advmap.newacting, advmap.acting_primary, - context< RecoveryMachine >().get_cur_transaction()); + context< PeeringMachine >().get_cur_transaction()); } pg->remove_down_peer_info(advmap.osdmap); pg->check_past_interval_bounds(); return discard_event(); } -boost::statechart::result PG::RecoveryState::Reset::react(const ActMap&) +boost::statechart::result PG::PeeringState::Reset::react(const ActMap&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->should_send_notify() && pg->get_primary().osd >= 0) { - context< RecoveryMachine >().send_notify( + context< PeeringMachine >().send_notify( pg->get_primary(), pg_notify_t( pg->get_primary().shard, pg->pg_whoami.shard, @@ -7277,7 +7277,7 @@ boost::statechart::result PG::RecoveryState::Reset::react(const ActMap&) return transit< Started >(); } -boost::statechart::result PG::RecoveryState::Reset::react(const QueryState& q) +boost::statechart::result PG::PeeringState::Reset::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -7286,22 +7286,22 @@ boost::statechart::result PG::RecoveryState::Reset::react(const QueryState& q) return discard_event(); } -void PG::RecoveryState::Reset::exit() +void PG::PeeringState::Reset::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_reset_latency, dur); } /*-------Start---------*/ -PG::RecoveryState::Start::Start(my_context ctx) +PG::PeeringState::Start::Start(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Start") + NamedState(context< PeeringMachine >().pg, "Start") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->is_primary()) { ldout(pg->cct, 1) << "transitioning to Primary" << dendl; post_event(MakePrimary()); @@ -7311,21 +7311,21 @@ PG::RecoveryState::Start::Start(my_context ctx) } } -void PG::RecoveryState::Start::exit() +void PG::PeeringState::Start::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_start_latency, dur); } /*---------Primary--------*/ -PG::RecoveryState::Primary::Primary(my_context ctx) +PG::PeeringState::Primary::Primary(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary") + NamedState(context< PeeringMachine >().pg, "Started/Primary") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; ceph_assert(pg->want_acting.empty()); // set CREATING bit until we have peered for the first time. @@ -7348,44 +7348,44 @@ PG::RecoveryState::Primary::Primary(my_context ctx) } } -boost::statechart::result PG::RecoveryState::Primary::react(const MNotifyRec& notevt) +boost::statechart::result PG::PeeringState::Primary::react(const MNotifyRec& notevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 7) << "handle_pg_notify from osd." << notevt.from << dendl; pg->proc_replica_info( notevt.from, notevt.notify.info, notevt.notify.epoch_sent); return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react(const ActMap&) +boost::statechart::result PG::PeeringState::Primary::react(const ActMap&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 7) << "handle ActMap primary" << dendl; pg->publish_stats_to_osd(); pg->take_waiters(); return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react( +boost::statechart::result PG::PeeringState::Primary::react( const SetForceRecovery&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->set_force_recovery(true); return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react( +boost::statechart::result PG::PeeringState::Primary::react( const UnsetForceRecovery&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->set_force_recovery(false); return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react( +boost::statechart::result PG::PeeringState::Primary::react( const RequestScrub& evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->is_primary()) { pg->unreg_next_scrub(); pg->scrubber.must_scrub = true; @@ -7397,26 +7397,26 @@ boost::statechart::result PG::RecoveryState::Primary::react( return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react( +boost::statechart::result PG::PeeringState::Primary::react( const SetForceBackfill&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->set_force_backfill(true); return discard_event(); } -boost::statechart::result PG::RecoveryState::Primary::react( +boost::statechart::result PG::PeeringState::Primary::react( const UnsetForceBackfill&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->set_force_backfill(false); return discard_event(); } -void PG::RecoveryState::Primary::exit() +void PG::PeeringState::Primary::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->want_acting.clear(); utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_primary_latency, dur); @@ -7425,23 +7425,23 @@ void PG::RecoveryState::Primary::exit() } /*---------Peering--------*/ -PG::RecoveryState::Peering::Peering(my_context ctx) +PG::PeeringState::Peering::Peering(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering"), + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering"), history_les_bound(false) { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->is_peered()); ceph_assert(!pg->is_peering()); ceph_assert(pg->is_primary()); pg->state_set(PG_STATE_PEERING); } -boost::statechart::result PG::RecoveryState::Peering::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::Peering::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Peering advmap" << dendl; if (prior_set.affected_by_map(*(advmap.osdmap), pg)) { ldout(pg->cct, 1) << "Peering, affected_by_map, going to Reset" << dendl; @@ -7454,9 +7454,9 @@ boost::statechart::result PG::RecoveryState::Peering::react(const AdvMap& advmap return forward_event(); } -boost::statechart::result PG::RecoveryState::Peering::react(const QueryState& q) +boost::statechart::result PG::PeeringState::Peering::react(const QueryState& q) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -7507,11 +7507,11 @@ boost::statechart::result PG::RecoveryState::Peering::react(const QueryState& q) return forward_event(); } -void PG::RecoveryState::Peering::exit() +void PG::PeeringState::Peering::exit() { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Leaving Peering" << dendl; - context< RecoveryMachine >().log_exit(state_name, enter_time); + context< PeeringMachine >().log_exit(state_name, enter_time); pg->state_clear(PG_STATE_PEERING); pg->clear_probe_targets(); @@ -7521,12 +7521,12 @@ void PG::RecoveryState::Peering::exit() /*------Backfilling-------*/ -PG::RecoveryState::Backfilling::Backfilling(my_context ctx) +PG::PeeringState::Backfilling::Backfilling(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/Backfilling") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Backfilling") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->backfill_reserved = true; pg->queue_recovery(); pg->state_clear(PG_STATE_BACKFILL_TOOFULL); @@ -7535,9 +7535,9 @@ PG::RecoveryState::Backfilling::Backfilling(my_context ctx) pg->publish_stats_to_osd(); } -void PG::RecoveryState::Backfilling::backfill_release_reservations() +void PG::PeeringState::Backfilling::backfill_release_reservations() { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->osd->local_reserver.cancel_reservation(pg->info.pgid); for (set::iterator it = pg->backfill_targets.begin(); it != pg->backfill_targets.end(); @@ -7556,9 +7556,9 @@ void PG::RecoveryState::Backfilling::backfill_release_reservations() } } -void PG::RecoveryState::Backfilling::cancel_backfill() +void PG::PeeringState::Backfilling::cancel_backfill() { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; backfill_release_reservations(); if (!pg->waiting_on_backfill.empty()) { pg->waiting_on_backfill.clear(); @@ -7567,16 +7567,16 @@ void PG::RecoveryState::Backfilling::cancel_backfill() } boost::statechart::result -PG::RecoveryState::Backfilling::react(const Backfilled &c) +PG::PeeringState::Backfilling::react(const Backfilled &c) { backfill_release_reservations(); return transit(); } boost::statechart::result -PG::RecoveryState::Backfilling::react(const DeferBackfill &c) +PG::PeeringState::Backfilling::react(const DeferBackfill &c) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "defer backfill, retry delay " << c.delay << dendl; pg->state_set(PG_STATE_BACKFILL_WAIT); pg->state_clear(PG_STATE_BACKFILLING); @@ -7586,9 +7586,9 @@ PG::RecoveryState::Backfilling::react(const DeferBackfill &c) } boost::statechart::result -PG::RecoveryState::Backfilling::react(const UnfoundBackfill &c) +PG::PeeringState::Backfilling::react(const UnfoundBackfill &c) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "backfill has unfound, can't continue" << dendl; pg->state_set(PG_STATE_BACKFILL_UNFOUND); pg->state_clear(PG_STATE_BACKFILLING); @@ -7597,9 +7597,9 @@ PG::RecoveryState::Backfilling::react(const UnfoundBackfill &c) } boost::statechart::result -PG::RecoveryState::Backfilling::react(const RemoteReservationRevokedTooFull &) +PG::PeeringState::Backfilling::react(const RemoteReservationRevokedTooFull &) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_set(PG_STATE_BACKFILL_TOOFULL); pg->state_clear(PG_STATE_BACKFILLING); cancel_backfill(); @@ -7608,9 +7608,9 @@ PG::RecoveryState::Backfilling::react(const RemoteReservationRevokedTooFull &) } boost::statechart::result -PG::RecoveryState::Backfilling::react(const RemoteReservationRevoked &) +PG::PeeringState::Backfilling::react(const RemoteReservationRevoked &) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_set(PG_STATE_BACKFILL_WAIT); cancel_backfill(); if (pg->needs_backfill()) { @@ -7621,10 +7621,10 @@ PG::RecoveryState::Backfilling::react(const RemoteReservationRevoked &) } } -void PG::RecoveryState::Backfilling::exit() +void PG::PeeringState::Backfilling::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->backfill_reserved = false; pg->backfill_reserving = false; pg->state_clear(PG_STATE_BACKFILLING); @@ -7635,22 +7635,22 @@ void PG::RecoveryState::Backfilling::exit() /*--WaitRemoteBackfillReserved--*/ -PG::RecoveryState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_context ctx) +PG::PeeringState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/WaitRemoteBackfillReserved"), + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitRemoteBackfillReserved"), backfill_osd_it(context< Active >().remote_shards_to_reserve_backfill.begin()) { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->state_set(PG_STATE_BACKFILL_WAIT); pg->publish_stats_to_osd(); post_event(RemoteBackfillReserved()); } boost::statechart::result -PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteBackfillReserved &evt) +PG::PeeringState::WaitRemoteBackfillReserved::react(const RemoteBackfillReserved &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; int64_t num_bytes = pg->info.stats.stats.sum.num_bytes; ldout(pg->cct, 10) << __func__ << " num_bytes " << num_bytes << dendl; @@ -7678,17 +7678,17 @@ PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteBackfillReserve return discard_event(); } -void PG::RecoveryState::WaitRemoteBackfillReserved::exit() +void PG::PeeringState::WaitRemoteBackfillReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitremotebackfillreserved_latency, dur); } -void PG::RecoveryState::WaitRemoteBackfillReserved::retry() +void PG::PeeringState::WaitRemoteBackfillReserved::retry() { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->osd->local_reserver.cancel_reservation(pg->info.pgid); // Send CANCEL to all previously acquired reservations @@ -7719,26 +7719,26 @@ void PG::RecoveryState::WaitRemoteBackfillReserved::retry() } boost::statechart::result -PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteReservationRejected &evt) +PG::PeeringState::WaitRemoteBackfillReserved::react(const RemoteReservationRejected &evt) { retry(); return transit(); } boost::statechart::result -PG::RecoveryState::WaitRemoteBackfillReserved::react(const RemoteReservationRevoked &evt) +PG::PeeringState::WaitRemoteBackfillReserved::react(const RemoteReservationRevoked &evt) { retry(); return transit(); } /*--WaitLocalBackfillReserved--*/ -PG::RecoveryState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_context ctx) +PG::PeeringState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/WaitLocalBackfillReserved") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitLocalBackfillReserved") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->state_set(PG_STATE_BACKFILL_WAIT); pg->osd->local_reserver.request_reservation( pg->info.pgid, @@ -7752,102 +7752,102 @@ PG::RecoveryState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_conte pg->publish_stats_to_osd(); } -void PG::RecoveryState::WaitLocalBackfillReserved::exit() +void PG::PeeringState::WaitLocalBackfillReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitlocalbackfillreserved_latency, dur); } /*----NotBackfilling------*/ -PG::RecoveryState::NotBackfilling::NotBackfilling(my_context ctx) +PG::PeeringState::NotBackfilling::NotBackfilling(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/NotBackfilling") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/NotBackfilling") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_REPAIR); pg->publish_stats_to_osd(); } boost::statechart::result -PG::RecoveryState::NotBackfilling::react(const RemoteBackfillReserved &evt) +PG::PeeringState::NotBackfilling::react(const RemoteBackfillReserved &evt) { return discard_event(); } boost::statechart::result -PG::RecoveryState::NotBackfilling::react(const RemoteReservationRejected &evt) +PG::PeeringState::NotBackfilling::react(const RemoteReservationRejected &evt) { return discard_event(); } -void PG::RecoveryState::NotBackfilling::exit() +void PG::PeeringState::NotBackfilling::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_BACKFILL_UNFOUND); utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_notbackfilling_latency, dur); } /*----NotRecovering------*/ -PG::RecoveryState::NotRecovering::NotRecovering(my_context ctx) +PG::PeeringState::NotRecovering::NotRecovering(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/NotRecovering") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/NotRecovering") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->publish_stats_to_osd(); } -void PG::RecoveryState::NotRecovering::exit() +void PG::PeeringState::NotRecovering::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_RECOVERY_UNFOUND); utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_notrecovering_latency, dur); } /*---RepNotRecovering----*/ -PG::RecoveryState::RepNotRecovering::RepNotRecovering(my_context ctx) +PG::PeeringState::RepNotRecovering::RepNotRecovering(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ReplicaActive/RepNotRecovering") + NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepNotRecovering") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } boost::statechart::result -PG::RecoveryState::RepNotRecovering::react(const RejectRemoteReservation &evt) +PG::PeeringState::RepNotRecovering::react(const RejectRemoteReservation &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->reject_reservation(); post_event(RemoteReservationRejected()); return discard_event(); } -void PG::RecoveryState::RepNotRecovering::exit() +void PG::PeeringState::RepNotRecovering::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repnotrecovering_latency, dur); } /*---RepWaitRecoveryReserved--*/ -PG::RecoveryState::RepWaitRecoveryReserved::RepWaitRecoveryReserved(my_context ctx) +PG::PeeringState::RepWaitRecoveryReserved::RepWaitRecoveryReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ReplicaActive/RepWaitRecoveryReserved") + NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepWaitRecoveryReserved") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } boost::statechart::result -PG::RecoveryState::RepWaitRecoveryReserved::react(const RemoteRecoveryReserved &evt) +PG::PeeringState::RepWaitRecoveryReserved::react(const RemoteRecoveryReserved &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->osd->send_message_osd_cluster( pg->primary.osd, new MRecoveryReserve( @@ -7859,35 +7859,35 @@ PG::RecoveryState::RepWaitRecoveryReserved::react(const RemoteRecoveryReserved & } boost::statechart::result -PG::RecoveryState::RepWaitRecoveryReserved::react( +PG::PeeringState::RepWaitRecoveryReserved::react( const RemoteReservationCanceled &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); return transit(); } -void PG::RecoveryState::RepWaitRecoveryReserved::exit() +void PG::PeeringState::RepWaitRecoveryReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repwaitrecoveryreserved_latency, dur); } /*-RepWaitBackfillReserved*/ -PG::RecoveryState::RepWaitBackfillReserved::RepWaitBackfillReserved(my_context ctx) +PG::PeeringState::RepWaitBackfillReserved::RepWaitBackfillReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ReplicaActive/RepWaitBackfillReserved") + NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepWaitBackfillReserved") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } boost::statechart::result -PG::RecoveryState::RepNotRecovering::react(const RequestBackfillPrio &evt) +PG::PeeringState::RepNotRecovering::react(const RequestBackfillPrio &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; // Use tentative_bacfill_full() to make sure enough // space is available to handle target bytes from primary. @@ -7964,9 +7964,9 @@ PG::RecoveryState::RepNotRecovering::react(const RequestBackfillPrio &evt) } boost::statechart::result -PG::RecoveryState::RepNotRecovering::react(const RequestRecoveryPrio &evt) +PG::PeeringState::RepNotRecovering::react(const RequestRecoveryPrio &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; // fall back to a local reckoning of priority of primary doesn't pass one // (pre-mimic compat) @@ -7990,18 +7990,18 @@ PG::RecoveryState::RepNotRecovering::react(const RequestRecoveryPrio &evt) return transit(); } -void PG::RecoveryState::RepWaitBackfillReserved::exit() +void PG::PeeringState::RepWaitBackfillReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_repwaitbackfillreserved_latency, dur); } boost::statechart::result -PG::RecoveryState::RepWaitBackfillReserved::react(const RemoteBackfillReserved &evt) +PG::PeeringState::RepWaitBackfillReserved::react(const RemoteBackfillReserved &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->osd->send_message_osd_cluster( pg->primary.osd, @@ -8014,47 +8014,47 @@ PG::RecoveryState::RepWaitBackfillReserved::react(const RemoteBackfillReserved & } boost::statechart::result -PG::RecoveryState::RepWaitBackfillReserved::react( +PG::PeeringState::RepWaitBackfillReserved::react( const RejectRemoteReservation &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->reject_reservation(); post_event(RemoteReservationRejected()); return discard_event(); } boost::statechart::result -PG::RecoveryState::RepWaitBackfillReserved::react( +PG::PeeringState::RepWaitBackfillReserved::react( const RemoteReservationRejected &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); return transit(); } boost::statechart::result -PG::RecoveryState::RepWaitBackfillReserved::react( +PG::PeeringState::RepWaitBackfillReserved::react( const RemoteReservationCanceled &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); return transit(); } /*---RepRecovering-------*/ -PG::RecoveryState::RepRecovering::RepRecovering(my_context ctx) +PG::PeeringState::RepRecovering::RepRecovering(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ReplicaActive/RepRecovering") + NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive/RepRecovering") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } boost::statechart::result -PG::RecoveryState::RepRecovering::react(const RemoteRecoveryPreempted &) +PG::PeeringState::RepRecovering::react(const RemoteRecoveryPreempted &) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->send_message_osd_cluster( pg->primary.osd, @@ -8067,9 +8067,9 @@ PG::RecoveryState::RepRecovering::react(const RemoteRecoveryPreempted &) } boost::statechart::result -PG::RecoveryState::RepRecovering::react(const BackfillTooFull &) +PG::PeeringState::RepRecovering::react(const BackfillTooFull &) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->send_message_osd_cluster( pg->primary.osd, @@ -8082,9 +8082,9 @@ PG::RecoveryState::RepRecovering::react(const BackfillTooFull &) } boost::statechart::result -PG::RecoveryState::RepRecovering::react(const RemoteBackfillPreempted &) +PG::PeeringState::RepRecovering::react(const RemoteBackfillPreempted &) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->send_message_osd_cluster( pg->primary.osd, @@ -8096,10 +8096,10 @@ PG::RecoveryState::RepRecovering::react(const RemoteBackfillPreempted &) return discard_event(); } -void PG::RecoveryState::RepRecovering::exit() +void PG::PeeringState::RepRecovering::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); utime_t dur = ceph_clock_now() - enter_time; @@ -8107,27 +8107,27 @@ void PG::RecoveryState::RepRecovering::exit() } /*------Activating--------*/ -PG::RecoveryState::Activating::Activating(my_context ctx) +PG::PeeringState::Activating::Activating(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/Activating") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Activating") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } -void PG::RecoveryState::Activating::exit() +void PG::PeeringState::Activating::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_activating_latency, dur); } -PG::RecoveryState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_context ctx) +PG::PeeringState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/WaitLocalRecoveryReserved") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitLocalRecoveryReserved") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; // Make sure all nodes that part of the recovery aren't full if (!pg->cct->_conf->osd_debug_skip_full_check_in_recovery && @@ -8151,34 +8151,34 @@ PG::RecoveryState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_conte } boost::statechart::result -PG::RecoveryState::WaitLocalRecoveryReserved::react(const RecoveryTooFull &evt) +PG::PeeringState::WaitLocalRecoveryReserved::react(const RecoveryTooFull &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_set(PG_STATE_RECOVERY_TOOFULL); pg->schedule_recovery_retry(pg->cct->_conf->osd_recovery_retry_interval); return transit(); } -void PG::RecoveryState::WaitLocalRecoveryReserved::exit() +void PG::PeeringState::WaitLocalRecoveryReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitlocalrecoveryreserved_latency, dur); } -PG::RecoveryState::WaitRemoteRecoveryReserved::WaitRemoteRecoveryReserved(my_context ctx) +PG::PeeringState::WaitRemoteRecoveryReserved::WaitRemoteRecoveryReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/WaitRemoteRecoveryReserved"), + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/WaitRemoteRecoveryReserved"), remote_recovery_reservation_it(context< Active >().remote_shards_to_reserve_recovery.begin()) { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); post_event(RemoteRecoveryReserved()); } boost::statechart::result -PG::RecoveryState::WaitRemoteRecoveryReserved::react(const RemoteRecoveryReserved &evt) { - PG *pg = context< RecoveryMachine >().pg; +PG::PeeringState::WaitRemoteRecoveryReserved::react(const RemoteRecoveryReserved &evt) { + PG *pg = context< PeeringMachine >().pg; if (remote_recovery_reservation_it != context< Active >().remote_shards_to_reserve_recovery.end()) { ceph_assert(*remote_recovery_reservation_it != pg->pg_whoami); @@ -8200,21 +8200,21 @@ PG::RecoveryState::WaitRemoteRecoveryReserved::react(const RemoteRecoveryReserve return discard_event(); } -void PG::RecoveryState::WaitRemoteRecoveryReserved::exit() +void PG::PeeringState::WaitRemoteRecoveryReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitremoterecoveryreserved_latency, dur); } -PG::RecoveryState::Recovering::Recovering(my_context ctx) +PG::PeeringState::Recovering::Recovering(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/Recovering") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Recovering") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_RECOVERY_WAIT); pg->state_clear(PG_STATE_RECOVERY_TOOFULL); pg->state_set(PG_STATE_RECOVERING); @@ -8223,9 +8223,9 @@ PG::RecoveryState::Recovering::Recovering(my_context ctx) pg->queue_recovery(); } -void PG::RecoveryState::Recovering::release_reservations(bool cancel) +void PG::PeeringState::Recovering::release_reservations(bool cancel) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(cancel || !pg->pg_log.get_missing().have_missing()); // release remote reservations @@ -8249,9 +8249,9 @@ void PG::RecoveryState::Recovering::release_reservations(bool cancel) } boost::statechart::result -PG::RecoveryState::Recovering::react(const AllReplicasRecovered &evt) +PG::PeeringState::Recovering::react(const AllReplicasRecovered &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_FORCED_RECOVERY); release_reservations(); pg->osd->local_reserver.cancel_reservation(pg->info.pgid); @@ -8259,9 +8259,9 @@ PG::RecoveryState::Recovering::react(const AllReplicasRecovered &evt) } boost::statechart::result -PG::RecoveryState::Recovering::react(const RequestBackfill &evt) +PG::PeeringState::Recovering::react(const RequestBackfill &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_FORCED_RECOVERY); release_reservations(); pg->osd->local_reserver.cancel_reservation(pg->info.pgid); @@ -8271,9 +8271,9 @@ PG::RecoveryState::Recovering::react(const RequestBackfill &evt) } boost::statechart::result -PG::RecoveryState::Recovering::react(const DeferRecovery &evt) +PG::PeeringState::Recovering::react(const DeferRecovery &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (!pg->state_test(PG_STATE_RECOVERING)) { // we may have finished recovery and have an AllReplicasRecovered // event queued to move us to the next state. @@ -8289,9 +8289,9 @@ PG::RecoveryState::Recovering::react(const DeferRecovery &evt) } boost::statechart::result -PG::RecoveryState::Recovering::react(const UnfoundRecovery &evt) +PG::PeeringState::Recovering::react(const UnfoundRecovery &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "recovery has unfound, can't continue" << dendl; pg->state_set(PG_STATE_RECOVERY_UNFOUND); pg->osd->local_reserver.cancel_reservation(pg->info.pgid); @@ -8299,24 +8299,24 @@ PG::RecoveryState::Recovering::react(const UnfoundRecovery &evt) return transit(); } -void PG::RecoveryState::Recovering::exit() +void PG::PeeringState::Recovering::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->state_clear(PG_STATE_RECOVERING); pg->osd->recoverystate_perf->tinc(rs_recovering_latency, dur); } -PG::RecoveryState::Recovered::Recovered(my_context ctx) +PG::PeeringState::Recovered::Recovered(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/Recovered") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Recovered") { pg_shard_t auth_log_shard; - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->needs_recovery()); @@ -8343,35 +8343,35 @@ PG::RecoveryState::Recovered::Recovered(my_context ctx) post_event(GoClean()); } -void PG::RecoveryState::Recovered::exit() +void PG::PeeringState::Recovered::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_recovered_latency, dur); } -PG::RecoveryState::Clean::Clean(my_context ctx) +PG::PeeringState::Clean::Clean(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active/Clean") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active/Clean") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->info.last_complete != pg->info.last_update) { ceph_abort(); } Context *c = pg->finish_recovery(); - context< RecoveryMachine >().get_cur_transaction()->register_on_commit(c); + context< PeeringMachine >().get_cur_transaction()->register_on_commit(c); pg->try_mark_clean(); } -void PG::RecoveryState::Clean::exit() +void PG::PeeringState::Clean::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_CLEAN); utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_clean_latency, dur); @@ -8394,33 +8394,33 @@ set unique_osd_shard_set(const pg_shard_t & skip, const T &in) } /*---------Active---------*/ -PG::RecoveryState::Active::Active(my_context ctx) +PG::PeeringState::Active::Active(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Active"), + NamedState(context< PeeringMachine >().pg, "Started/Primary/Active"), remote_shards_to_reserve_recovery( unique_osd_shard_set( - context< RecoveryMachine >().pg->pg_whoami, - context< RecoveryMachine >().pg->acting_recovery_backfill)), + context< PeeringMachine >().pg->pg_whoami, + context< PeeringMachine >().pg->acting_recovery_backfill)), remote_shards_to_reserve_backfill( unique_osd_shard_set( - context< RecoveryMachine >().pg->pg_whoami, - context< RecoveryMachine >().pg->backfill_targets)), + context< PeeringMachine >().pg->pg_whoami, + context< PeeringMachine >().pg->backfill_targets)), all_replicas_activated(false) { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->backfill_reserving); ceph_assert(!pg->backfill_reserved); ceph_assert(pg->is_primary()); ldout(pg->cct, 10) << "In Active, about to call activate" << dendl; - pg->start_flush(context< RecoveryMachine >().get_cur_transaction()); - pg->activate(*context< RecoveryMachine >().get_cur_transaction(), + pg->start_flush(context< PeeringMachine >().get_cur_transaction()); + pg->activate(*context< PeeringMachine >().get_cur_transaction(), pg->get_osdmap_epoch(), - *context< RecoveryMachine >().get_query_map(), - context< RecoveryMachine >().get_info_map(), - context< RecoveryMachine >().get_recovery_ctx()); + *context< PeeringMachine >().get_query_map(), + context< PeeringMachine >().get_info_map(), + context< PeeringMachine >().get_recovery_ctx()); // everyone has to commit/ack before we are truly active pg->blocked_by.clear(); @@ -8435,9 +8435,9 @@ PG::RecoveryState::Active::Active(my_context ctx) ldout(pg->cct, 10) << "Activate Finished" << dendl; } -boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::Active::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->should_restart_peering( advmap.up_primary, advmap.acting_primary, @@ -8572,15 +8572,15 @@ boost::statechart::result PG::RecoveryState::Active::react(const AdvMap& advmap) return forward_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const ActMap&) +boost::statechart::result PG::PeeringState::Active::react(const ActMap&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Active: handling ActMap" << dendl; ceph_assert(pg->is_primary()); if (pg->have_unfound()) { // object may have become unfound - pg->discover_all_missing(*context< RecoveryMachine >().get_query_map()); + pg->discover_all_missing(*context< PeeringMachine >().get_query_map()); } if (pg->cct->_conf->osd_check_for_log_corruption) @@ -8613,9 +8613,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const ActMap&) return forward_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const MNotifyRec& notevt) +boost::statechart::result PG::PeeringState::Active::react(const MNotifyRec& notevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(pg->is_primary()); if (pg->peer_info.count(notevt.from)) { ldout(pg->cct, 10) << "Active: got notify from " << notevt.from @@ -8632,15 +8632,15 @@ boost::statechart::result PG::RecoveryState::Active::react(const MNotifyRec& not pg->proc_replica_info( notevt.from, notevt.notify.info, notevt.notify.epoch_sent); if (pg->have_unfound() || (pg->is_degraded() && pg->might_have_unfound.count(notevt.from))) { - pg->discover_all_missing(*context< RecoveryMachine >().get_query_map()); + pg->discover_all_missing(*context< PeeringMachine >().get_query_map()); } } return discard_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const MTrim& trim) +boost::statechart::result PG::PeeringState::Active::react(const MTrim& trim) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(pg->is_primary()); // peer is informing us of their last_complete_ondisk @@ -8652,9 +8652,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const MTrim& trim) return discard_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const MInfoRec& infoevt) +boost::statechart::result PG::PeeringState::Active::react(const MInfoRec& infoevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(pg->is_primary()); ceph_assert(!pg->acting_recovery_backfill.empty()); @@ -8675,9 +8675,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const MInfoRec& infoe return discard_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::Active::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "searching osd." << logevt.from << " log for unfound items" << dendl; pg->proc_replica_log( @@ -8686,7 +8686,7 @@ boost::statechart::result PG::RecoveryState::Active::react(const MLogRec& logevt pg->peer_info[logevt.from], pg->peer_missing[logevt.from], logevt.from, - context< RecoveryMachine >().get_recovery_ctx()); + context< PeeringMachine >().get_recovery_ctx()); // If there are missing AND we are "fully" active then start recovery now if (got_missing && pg->state_test(PG_STATE_ACTIVE)) { post_event(DoRecovery()); @@ -8694,9 +8694,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const MLogRec& logevt return discard_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const QueryState& q) +boost::statechart::result PG::PeeringState::Active::react(const QueryState& q) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -8754,9 +8754,9 @@ boost::statechart::result PG::RecoveryState::Active::react(const QueryState& q) return forward_event(); } -boost::statechart::result PG::RecoveryState::Active::react(const AllReplicasActivated &evt) +boost::statechart::result PG::PeeringState::Active::react(const AllReplicasActivated &evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg_t pgid = pg->info.pgid.pgid; all_replicas_activated = true; @@ -8816,10 +8816,10 @@ boost::statechart::result PG::RecoveryState::Active::react(const AllReplicasActi return discard_event(); } -void PG::RecoveryState::Active::exit() +void PG::PeeringState::Active::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->osd->local_reserver.cancel_reservation(pg->info.pgid); pg->blocked_by.clear(); @@ -8838,62 +8838,62 @@ void PG::RecoveryState::Active::exit() } /*------ReplicaActive-----*/ -PG::RecoveryState::ReplicaActive::ReplicaActive(my_context ctx) +PG::PeeringState::ReplicaActive::ReplicaActive(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ReplicaActive") + NamedState(context< PeeringMachine >().pg, "Started/ReplicaActive") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; - pg->start_flush(context< RecoveryMachine >().get_cur_transaction()); + PG *pg = context< PeeringMachine >().pg; + pg->start_flush(context< PeeringMachine >().get_cur_transaction()); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react( +boost::statechart::result PG::PeeringState::ReplicaActive::react( const Activate& actevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "In ReplicaActive, about to call activate" << dendl; map > query_map; - pg->activate(*context< RecoveryMachine >().get_cur_transaction(), + pg->activate(*context< PeeringMachine >().get_cur_transaction(), actevt.activation_epoch, query_map, NULL, NULL); ldout(pg->cct, 10) << "Activate Finished" << dendl; return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MInfoRec& infoevt) +boost::statechart::result PG::PeeringState::ReplicaActive::react(const MInfoRec& infoevt) { - PG *pg = context< RecoveryMachine >().pg; - pg->proc_primary_info(*context().get_cur_transaction(), + PG *pg = context< PeeringMachine >().pg; + pg->proc_primary_info(*context().get_cur_transaction(), infoevt.info); return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::ReplicaActive::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "received log from " << logevt.from << dendl; - ObjectStore::Transaction* t = context().get_cur_transaction(); + ObjectStore::Transaction* t = context().get_cur_transaction(); pg->merge_log(*t, logevt.msg->info, logevt.msg->log, logevt.from); ceph_assert(pg->pg_log.get_head() == pg->info.last_update); return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react(const MTrim& trim) +boost::statechart::result PG::PeeringState::ReplicaActive::react(const MTrim& trim) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; // primary is instructing us to trim pg->pg_log.trim(trim.trim_to, pg->info); pg->dirty_info = true; return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react(const ActMap&) +boost::statechart::result PG::PeeringState::ReplicaActive::react(const ActMap&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->should_send_notify() && pg->get_primary().osd >= 0) { - context< RecoveryMachine >().send_notify( + context< PeeringMachine >().send_notify( pg->get_primary(), pg_notify_t( pg->get_primary().shard, pg->pg_whoami.shard, @@ -8906,15 +8906,15 @@ boost::statechart::result PG::RecoveryState::ReplicaActive::react(const ActMap&) return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react( +boost::statechart::result PG::PeeringState::ReplicaActive::react( const MQuery& query) { - PG *pg = context< RecoveryMachine >().pg; - pg->fulfill_query(query, context().get_recovery_ctx()); + PG *pg = context< PeeringMachine >().pg; + pg->fulfill_query(query, context().get_recovery_ctx()); return discard_event(); } -boost::statechart::result PG::RecoveryState::ReplicaActive::react(const QueryState& q) +boost::statechart::result PG::PeeringState::ReplicaActive::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -8923,10 +8923,10 @@ boost::statechart::result PG::RecoveryState::ReplicaActive::react(const QuerySta return forward_event(); } -void PG::RecoveryState::ReplicaActive::exit() +void PG::PeeringState::ReplicaActive::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->clear_reserved_num_bytes(); pg->osd->remote_reserver.cancel_reservation(pg->info.pgid); utime_t dur = ceph_clock_now() - enter_time; @@ -8934,13 +8934,13 @@ void PG::RecoveryState::ReplicaActive::exit() } /*-------Stray---*/ -PG::RecoveryState::Stray::Stray(my_context ctx) +PG::PeeringState::Stray::Stray(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Stray") + NamedState(context< PeeringMachine >().pg, "Started/Stray") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->is_peered()); ceph_assert(!pg->is_peering()); ceph_assert(!pg->is_primary()); @@ -8949,17 +8949,17 @@ PG::RecoveryState::Stray::Stray(my_context ctx) ldout(pg->cct,10) << __func__ << " pool is deleted" << dendl; post_event(DeleteStart()); } else { - pg->start_flush(context< RecoveryMachine >().get_cur_transaction()); + pg->start_flush(context< PeeringMachine >().get_cur_transaction()); } } -boost::statechart::result PG::RecoveryState::Stray::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::Stray::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; MOSDPGLog *msg = logevt.msg.get(); ldout(pg->cct, 10) << "got info+log from osd." << logevt.from << " " << msg->info << " " << msg->log << dendl; - ObjectStore::Transaction* t = context().get_cur_transaction(); + ObjectStore::Transaction* t = context().get_cur_transaction(); if (msg->info.last_backfill == hobject_t()) { // restart backfill pg->unreg_next_scrub(); @@ -8982,14 +8982,14 @@ boost::statechart::result PG::RecoveryState::Stray::react(const MLogRec& logevt) return transit(); } -boost::statechart::result PG::RecoveryState::Stray::react(const MInfoRec& infoevt) +boost::statechart::result PG::PeeringState::Stray::react(const MInfoRec& infoevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "got info from osd." << infoevt.from << " " << infoevt.info << dendl; if (pg->info.last_update > infoevt.info.last_update) { // rewind divergent log entries - ObjectStore::Transaction* t = context().get_cur_transaction(); + ObjectStore::Transaction* t = context().get_cur_transaction(); pg->rewind_divergent_log(*t, infoevt.info.last_update); pg->info.stats = infoevt.info.stats; pg->info.hit_set = infoevt.info.hit_set; @@ -9002,18 +9002,18 @@ boost::statechart::result PG::RecoveryState::Stray::react(const MInfoRec& infoev return transit(); } -boost::statechart::result PG::RecoveryState::Stray::react(const MQuery& query) +boost::statechart::result PG::PeeringState::Stray::react(const MQuery& query) { - PG *pg = context< RecoveryMachine >().pg; - pg->fulfill_query(query, context().get_recovery_ctx()); + PG *pg = context< PeeringMachine >().pg; + pg->fulfill_query(query, context().get_recovery_ctx()); return discard_event(); } -boost::statechart::result PG::RecoveryState::Stray::react(const ActMap&) +boost::statechart::result PG::PeeringState::Stray::react(const ActMap&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->should_send_notify() && pg->get_primary().osd >= 0) { - context< RecoveryMachine >().send_notify( + context< PeeringMachine >().send_notify( pg->get_primary(), pg_notify_t( pg->get_primary().shard, pg->pg_whoami.shard, @@ -9026,29 +9026,29 @@ boost::statechart::result PG::RecoveryState::Stray::react(const ActMap&) return discard_event(); } -void PG::RecoveryState::Stray::exit() +void PG::PeeringState::Stray::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_stray_latency, dur); } /*--------ToDelete----------*/ -PG::RecoveryState::ToDelete::ToDelete(my_context ctx) +PG::PeeringState::ToDelete::ToDelete(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ToDelete") + NamedState(context< PeeringMachine >().pg, "Started/ToDelete") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->osd->logger->inc(l_osd_pg_removing); } -void PG::RecoveryState::ToDelete::exit() +void PG::PeeringState::ToDelete::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; // note: on a successful removal, this path doesn't execute. see // _delete_some(). pg->osd->logger->dec(l_osd_pg_removing); @@ -9056,13 +9056,13 @@ void PG::RecoveryState::ToDelete::exit() } /*----WaitDeleteReserved----*/ -PG::RecoveryState::WaitDeleteReserved::WaitDeleteReserved(my_context ctx) +PG::PeeringState::WaitDeleteReserved::WaitDeleteReserved(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, + NamedState(context< PeeringMachine >().pg, "Started/ToDelete/WaitDeleteReseved") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; context().priority = pg->get_delete_priority(); pg->osd->local_reserver.cancel_reservation(pg->info.pgid); pg->osd->local_reserver.request_reservation( @@ -9076,10 +9076,10 @@ PG::RecoveryState::WaitDeleteReserved::WaitDeleteReserved(my_context ctx) DeleteInterrupted())); } -boost::statechart::result PG::RecoveryState::ToDelete::react( +boost::statechart::result PG::PeeringState::ToDelete::react( const ActMap& evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (pg->get_delete_priority() != priority) { ldout(pg->cct,10) << __func__ << " delete priority changed, resetting" << dendl; @@ -9088,48 +9088,48 @@ boost::statechart::result PG::RecoveryState::ToDelete::react( return discard_event(); } -void PG::RecoveryState::WaitDeleteReserved::exit() +void PG::PeeringState::WaitDeleteReserved::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); + context< PeeringMachine >().log_exit(state_name, enter_time); } /*----Deleting-----*/ -PG::RecoveryState::Deleting::Deleting(my_context ctx) +PG::PeeringState::Deleting::Deleting(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/ToDelete/Deleting") + NamedState(context< PeeringMachine >().pg, "Started/ToDelete/Deleting") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->deleting = true; - ObjectStore::Transaction* t = context().get_cur_transaction(); + ObjectStore::Transaction* t = context().get_cur_transaction(); pg->on_removal(t); t->register_on_commit(new C_DeleteMore(pg, pg->get_osdmap_epoch())); } -boost::statechart::result PG::RecoveryState::Deleting::react( +boost::statechart::result PG::PeeringState::Deleting::react( const DeleteSome& evt) { - PG *pg = context< RecoveryMachine >().pg; - pg->_delete_some(context().get_cur_transaction()); + PG *pg = context< PeeringMachine >().pg; + pg->_delete_some(context().get_cur_transaction()); return discard_event(); } -void PG::RecoveryState::Deleting::exit() +void PG::PeeringState::Deleting::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->deleting = false; pg->osd->local_reserver.cancel_reservation(pg->info.pgid); } /*--------GetInfo---------*/ -PG::RecoveryState::GetInfo::GetInfo(my_context ctx) +PG::PeeringState::GetInfo::GetInfo(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering/GetInfo") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/GetInfo") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; pg->check_past_interval_bounds(); PastIntervals::PriorSet &prior_set = context< Peering >().prior_set; @@ -9146,9 +9146,9 @@ PG::RecoveryState::GetInfo::GetInfo(my_context ctx) } } -void PG::RecoveryState::GetInfo::get_infos() +void PG::PeeringState::GetInfo::get_infos() { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; PastIntervals::PriorSet &prior_set = context< Peering >().prior_set; pg->blocked_by.clear(); @@ -9170,7 +9170,7 @@ void PG::RecoveryState::GetInfo::get_infos() ldout(pg->cct, 10) << " not querying info from down osd." << peer << dendl; } else { ldout(pg->cct, 10) << " querying info from osd." << peer << dendl; - context< RecoveryMachine >().send_query( + context< PeeringMachine >().send_query( peer, pg_query_t(pg_query_t::INFO, it->shard, pg->pg_whoami.shard, pg->info.history, @@ -9183,9 +9183,9 @@ void PG::RecoveryState::GetInfo::get_infos() pg->publish_stats_to_osd(); } -boost::statechart::result PG::RecoveryState::GetInfo::react(const MNotifyRec& infoevt) +boost::statechart::result PG::PeeringState::GetInfo::react(const MNotifyRec& infoevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; set::iterator p = peer_info_requested.find(infoevt.from); if (p != peer_info_requested.end()) { @@ -9231,9 +9231,9 @@ boost::statechart::result PG::RecoveryState::GetInfo::react(const MNotifyRec& in return discard_event(); } -boost::statechart::result PG::RecoveryState::GetInfo::react(const QueryState& q) +boost::statechart::result PG::PeeringState::GetInfo::react(const QueryState& q) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; q.f->open_object_section("state"); q.f->dump_string("name", state_name); q.f->dump_stream("enter_time") << enter_time; @@ -9257,25 +9257,25 @@ boost::statechart::result PG::RecoveryState::GetInfo::react(const QueryState& q) return forward_event(); } -void PG::RecoveryState::GetInfo::exit() +void PG::PeeringState::GetInfo::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getinfo_latency, dur); pg->blocked_by.clear(); } /*------GetLog------------*/ -PG::RecoveryState::GetLog::GetLog(my_context ctx) +PG::PeeringState::GetLog::GetLog(my_context ctx) : my_base(ctx), NamedState( - context< RecoveryMachine >().pg, "Started/Primary/Peering/GetLog"), + context< PeeringMachine >().pg, "Started/Primary/Peering/GetLog"), msg(0) { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; // adjust acting? if (!pg->choose_acting(auth_log_shard, false, @@ -9318,7 +9318,7 @@ PG::RecoveryState::GetLog::GetLog(my_context ctx) // how much? ldout(pg->cct, 10) << " requesting log from osd." << auth_log_shard << dendl; - context().send_query( + context().send_query( auth_log_shard, pg_query_t( pg_query_t::LOG, @@ -9331,9 +9331,9 @@ PG::RecoveryState::GetLog::GetLog(my_context ctx) pg->publish_stats_to_osd(); } -boost::statechart::result PG::RecoveryState::GetLog::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::GetLog::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; // make sure our log source didn't go down. we need to check // explicitly because it may not be part of the prior set, which // means the Peering state check won't catch it going down. @@ -9348,9 +9348,9 @@ boost::statechart::result PG::RecoveryState::GetLog::react(const AdvMap& advmap) return forward_event(); } -boost::statechart::result PG::RecoveryState::GetLog::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::GetLog::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!msg); if (logevt.from != auth_log_shard) { ldout(pg->cct, 10) << "GetLog: discarding log from " @@ -9364,21 +9364,21 @@ boost::statechart::result PG::RecoveryState::GetLog::react(const MLogRec& logevt return discard_event(); } -boost::statechart::result PG::RecoveryState::GetLog::react(const GotLog&) +boost::statechart::result PG::PeeringState::GetLog::react(const GotLog&) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "leaving GetLog" << dendl; if (msg) { ldout(pg->cct, 10) << "processing master log" << dendl; - pg->proc_master_log(*context().get_cur_transaction(), + pg->proc_master_log(*context().get_cur_transaction(), msg->info, msg->log, msg->missing, auth_log_shard); } - pg->start_flush(context< RecoveryMachine >().get_cur_transaction()); + pg->start_flush(context< PeeringMachine >().get_cur_transaction()); return transit< GetMissing >(); } -boost::statechart::result PG::RecoveryState::GetLog::react(const QueryState& q) +boost::statechart::result PG::PeeringState::GetLog::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -9388,26 +9388,26 @@ boost::statechart::result PG::RecoveryState::GetLog::react(const QueryState& q) return forward_event(); } -void PG::RecoveryState::GetLog::exit() +void PG::PeeringState::GetLog::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getlog_latency, dur); pg->blocked_by.clear(); } /*------WaitActingChange--------*/ -PG::RecoveryState::WaitActingChange::WaitActingChange(my_context ctx) +PG::PeeringState::WaitActingChange::WaitActingChange(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/WaitActingChange") + NamedState(context< PeeringMachine >().pg, "Started/Primary/WaitActingChange") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } -boost::statechart::result PG::RecoveryState::WaitActingChange::react(const AdvMap& advmap) +boost::statechart::result PG::PeeringState::WaitActingChange::react(const AdvMap& advmap) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; OSDMapRef osdmap = advmap.osdmap; ldout(pg->cct, 10) << "verifying no want_acting " << pg->want_acting << " targets didn't go down" << dendl; @@ -9421,28 +9421,28 @@ boost::statechart::result PG::RecoveryState::WaitActingChange::react(const AdvMa return forward_event(); } -boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::WaitActingChange::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "In WaitActingChange, ignoring MLocRec" << dendl; return discard_event(); } -boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MInfoRec& evt) +boost::statechart::result PG::PeeringState::WaitActingChange::react(const MInfoRec& evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "In WaitActingChange, ignoring MInfoRec" << dendl; return discard_event(); } -boost::statechart::result PG::RecoveryState::WaitActingChange::react(const MNotifyRec& evt) +boost::statechart::result PG::PeeringState::WaitActingChange::react(const MNotifyRec& evt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "In WaitActingChange, ignoring MNotifyRec" << dendl; return discard_event(); } -boost::statechart::result PG::RecoveryState::WaitActingChange::react(const QueryState& q) +boost::statechart::result PG::PeeringState::WaitActingChange::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -9452,21 +9452,21 @@ boost::statechart::result PG::RecoveryState::WaitActingChange::react(const Query return forward_event(); } -void PG::RecoveryState::WaitActingChange::exit() +void PG::PeeringState::WaitActingChange::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitactingchange_latency, dur); } /*------Down--------*/ -PG::RecoveryState::Down::Down(my_context ctx) +PG::PeeringState::Down::Down(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering/Down") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/Down") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_PEERING); pg->state_set(PG_STATE_DOWN); @@ -9477,10 +9477,10 @@ PG::RecoveryState::Down::Down(my_context ctx) pg->publish_stats_to_osd(); } -void PG::RecoveryState::Down::exit() +void PG::PeeringState::Down::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_DOWN); utime_t dur = ceph_clock_now() - enter_time; @@ -9489,7 +9489,7 @@ void PG::RecoveryState::Down::exit() pg->blocked_by.clear(); } -boost::statechart::result PG::RecoveryState::Down::react(const QueryState& q) +boost::statechart::result PG::PeeringState::Down::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -9500,9 +9500,9 @@ boost::statechart::result PG::RecoveryState::Down::react(const QueryState& q) return forward_event(); } -boost::statechart::result PG::RecoveryState::Down::react(const MNotifyRec& infoevt) +boost::statechart::result PG::PeeringState::Down::react(const MNotifyRec& infoevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(pg->is_primary()); epoch_t old_start = pg->info.history.last_epoch_started; @@ -9523,12 +9523,12 @@ boost::statechart::result PG::RecoveryState::Down::react(const MNotifyRec& infoe /*------Incomplete--------*/ -PG::RecoveryState::Incomplete::Incomplete(my_context ctx) +PG::PeeringState::Incomplete::Incomplete(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering/Incomplete") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/Incomplete") { - context< RecoveryMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_enter(state_name); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_PEERING); pg->state_set(PG_STATE_INCOMPLETE); @@ -9539,8 +9539,8 @@ PG::RecoveryState::Incomplete::Incomplete(my_context ctx) pg->publish_stats_to_osd(); } -boost::statechart::result PG::RecoveryState::Incomplete::react(const AdvMap &advmap) { - PG *pg = context< RecoveryMachine >().pg; +boost::statechart::result PG::PeeringState::Incomplete::react(const AdvMap &advmap) { + PG *pg = context< PeeringMachine >().pg; int64_t poolnum = pg->info.pgid.pool(); // Reset if min_size turn smaller than previous value, pg might now be able to go active @@ -9554,8 +9554,8 @@ boost::statechart::result PG::RecoveryState::Incomplete::react(const AdvMap &adv return forward_event(); } -boost::statechart::result PG::RecoveryState::Incomplete::react(const MNotifyRec& notevt) { - PG *pg = context< RecoveryMachine >().pg; +boost::statechart::result PG::PeeringState::Incomplete::react(const MNotifyRec& notevt) { + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 7) << "handle_pg_notify from osd." << notevt.from << dendl; if (pg->proc_replica_info( notevt.from, notevt.notify.info, notevt.notify.epoch_sent)) { @@ -9566,7 +9566,7 @@ boost::statechart::result PG::RecoveryState::Incomplete::react(const MNotifyRec& } } -boost::statechart::result PG::RecoveryState::Incomplete::react( +boost::statechart::result PG::PeeringState::Incomplete::react( const QueryState& q) { q.f->open_object_section("state"); @@ -9577,10 +9577,10 @@ boost::statechart::result PG::RecoveryState::Incomplete::react( return forward_event(); } -void PG::RecoveryState::Incomplete::exit() +void PG::PeeringState::Incomplete::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; pg->state_clear(PG_STATE_INCOMPLETE); utime_t dur = ceph_clock_now() - enter_time; @@ -9590,13 +9590,13 @@ void PG::RecoveryState::Incomplete::exit() } /*------GetMissing--------*/ -PG::RecoveryState::GetMissing::GetMissing(my_context ctx) +PG::PeeringState::GetMissing::GetMissing(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering/GetMissing") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/GetMissing") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ceph_assert(!pg->acting_recovery_backfill.empty()); eversion_t since; for (set::iterator i = pg->acting_recovery_backfill.begin(); @@ -9641,7 +9641,7 @@ PG::RecoveryState::GetMissing::GetMissing(my_context ctx) ceph_assert(pi.last_update >= pg->info.log_tail); // or else choose_acting() did a bad thing if (pi.log_tail <= since) { ldout(pg->cct, 10) << " requesting log+missing since " << since << " from osd." << *i << dendl; - context< RecoveryMachine >().send_query( + context< PeeringMachine >().send_query( *i, pg_query_t( pg_query_t::LOG, @@ -9652,7 +9652,7 @@ PG::RecoveryState::GetMissing::GetMissing(my_context ctx) ldout(pg->cct, 10) << " requesting fulllog+missing from osd." << *i << " (want since " << since << " < log.tail " << pi.log_tail << ")" << dendl; - context< RecoveryMachine >().send_query( + context< PeeringMachine >().send_query( *i, pg_query_t( pg_query_t::FULLLOG, i->shard, pg->pg_whoami.shard, @@ -9677,9 +9677,9 @@ PG::RecoveryState::GetMissing::GetMissing(my_context ctx) } } -boost::statechart::result PG::RecoveryState::GetMissing::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::GetMissing::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; peer_missing_requested.erase(logevt.from); pg->proc_replica_log(logevt.msg->info, logevt.msg->log, logevt.msg->missing, logevt.from); @@ -9698,9 +9698,9 @@ boost::statechart::result PG::RecoveryState::GetMissing::react(const MLogRec& lo return discard_event(); } -boost::statechart::result PG::RecoveryState::GetMissing::react(const QueryState& q) +boost::statechart::result PG::PeeringState::GetMissing::react(const QueryState& q) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; q.f->open_object_section("state"); q.f->dump_string("name", state_name); q.f->dump_stream("enter_time") << enter_time; @@ -9724,42 +9724,42 @@ boost::statechart::result PG::RecoveryState::GetMissing::react(const QueryState& return forward_event(); } -void PG::RecoveryState::GetMissing::exit() +void PG::PeeringState::GetMissing::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_getmissing_latency, dur); pg->blocked_by.clear(); } /*------WaitUpThru--------*/ -PG::RecoveryState::WaitUpThru::WaitUpThru(my_context ctx) +PG::PeeringState::WaitUpThru::WaitUpThru(my_context ctx) : my_base(ctx), - NamedState(context< RecoveryMachine >().pg, "Started/Primary/Peering/WaitUpThru") + NamedState(context< PeeringMachine >().pg, "Started/Primary/Peering/WaitUpThru") { - context< RecoveryMachine >().log_enter(state_name); + context< PeeringMachine >().log_enter(state_name); } -boost::statechart::result PG::RecoveryState::WaitUpThru::react(const ActMap& am) +boost::statechart::result PG::PeeringState::WaitUpThru::react(const ActMap& am) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; if (!pg->need_up_thru) { post_event(Activate(pg->get_osdmap_epoch())); } return forward_event(); } -boost::statechart::result PG::RecoveryState::WaitUpThru::react(const MLogRec& logevt) +boost::statechart::result PG::PeeringState::WaitUpThru::react(const MLogRec& logevt) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 10) << "Noting missing from osd." << logevt.from << dendl; pg->peer_missing[logevt.from].claim(logevt.msg->missing); pg->peer_info[logevt.from] = logevt.msg->info; return discard_event(); } -boost::statechart::result PG::RecoveryState::WaitUpThru::react(const QueryState& q) +boost::statechart::result PG::PeeringState::WaitUpThru::react(const QueryState& q) { q.f->open_object_section("state"); q.f->dump_string("name", state_name); @@ -9769,29 +9769,29 @@ boost::statechart::result PG::RecoveryState::WaitUpThru::react(const QueryState& return forward_event(); } -void PG::RecoveryState::WaitUpThru::exit() +void PG::PeeringState::WaitUpThru::exit() { - context< RecoveryMachine >().log_exit(state_name, enter_time); - PG *pg = context< RecoveryMachine >().pg; + context< PeeringMachine >().log_exit(state_name, enter_time); + PG *pg = context< PeeringMachine >().pg; utime_t dur = ceph_clock_now() - enter_time; pg->osd->recoverystate_perf->tinc(rs_waitupthru_latency, dur); } -/*----RecoveryState::RecoveryMachine Methods-----*/ +/*----PeeringState::PeeringMachine Methods-----*/ #undef dout_prefix #define dout_prefix pg->gen_prefix(*_dout) -void PG::RecoveryState::RecoveryMachine::log_enter(const char *state_name) +void PG::PeeringState::PeeringMachine::log_enter(const char *state_name) { - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 5) << "enter " << state_name << dendl; pg->osd->pg_recovery_stats.log_enter(state_name); } -void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_t enter_time) +void PG::PeeringState::PeeringMachine::log_exit(const char *state_name, utime_t enter_time) { utime_t dur = ceph_clock_now() - enter_time; - PG *pg = context< RecoveryMachine >().pg; + PG *pg = context< PeeringMachine >().pg; ldout(pg->cct, 5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl; pg->osd->pg_recovery_stats.log_exit(state_name, ceph_clock_now() - enter_time, event_count, event_time); @@ -9804,13 +9804,13 @@ void PG::RecoveryState::RecoveryMachine::log_exit(const char *state_name, utime_ #undef dout_prefix #define dout_prefix ((debug_pg ? debug_pg->gen_prefix(*_dout) : *_dout) << " PriorSet: ") -void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) { +void PG::PeeringState::start_handle(PeeringCtx *new_ctx) { ceph_assert(!rctx); ceph_assert(!orig_ctx); orig_ctx = new_ctx; if (new_ctx) { if (messages_pending_flush) { - rctx = RecoveryCtx(*messages_pending_flush, *new_ctx); + rctx = PeeringCtx(*messages_pending_flush, *new_ctx); } else { rctx = *new_ctx; } @@ -9818,38 +9818,38 @@ void PG::RecoveryState::start_handle(RecoveryCtx *new_ctx) { } } -void PG::RecoveryState::begin_block_outgoing() { +void PG::PeeringState::begin_block_outgoing() { ceph_assert(!messages_pending_flush); ceph_assert(orig_ctx); ceph_assert(rctx); messages_pending_flush = BufferedRecoveryMessages(); - rctx = RecoveryCtx(*messages_pending_flush, *orig_ctx); + rctx = PeeringCtx(*messages_pending_flush, *orig_ctx); } -void PG::RecoveryState::clear_blocked_outgoing() { +void PG::PeeringState::clear_blocked_outgoing() { ceph_assert(orig_ctx); ceph_assert(rctx); messages_pending_flush = boost::optional(); } -void PG::RecoveryState::end_block_outgoing() { +void PG::PeeringState::end_block_outgoing() { ceph_assert(messages_pending_flush); ceph_assert(orig_ctx); ceph_assert(rctx); - rctx = RecoveryCtx(*orig_ctx); + rctx = PeeringCtx(*orig_ctx); rctx->accept_buffered_messages(*messages_pending_flush); messages_pending_flush = boost::optional(); } -void PG::RecoveryState::end_handle() { +void PG::PeeringState::end_handle() { if (rctx) { utime_t dur = ceph_clock_now() - rctx->start_time; machine.event_time += dur; } machine.event_count++; - rctx = boost::optional(); + rctx = boost::optional(); orig_ctx = NULL; } diff --git a/src/osd/PG.h b/src/osd/PG.h index 357848881fd..1dbe23899c3 100644 --- a/src/osd/PG.h +++ b/src/osd/PG.h @@ -263,7 +263,7 @@ public: ObjectStore::CollectionHandle ch; - struct RecoveryCtx; + struct PeeringCtx; // -- methods -- std::ostream& gen_prefix(std::ostream& out) const override; @@ -412,7 +412,7 @@ public: const pg_pool_t *pool, ObjectStore::Transaction *t) = 0; void split_into(pg_t child_pgid, PG *child, unsigned split_bits); - void merge_from(map& sources, RecoveryCtx *rctx, + void merge_from(map& sources, PeeringCtx *rctx, unsigned split_bits, const pg_merge_meta_t& last_pg_merge_meta); void finish_split_stats(const object_stat_sum_t& stats, ObjectStore::Transaction *t); @@ -428,16 +428,16 @@ public: bool set_force_backfill(bool b); void queue_peering_event(PGPeeringEventRef evt); - void do_peering_event(PGPeeringEventRef evt, RecoveryCtx *rcx); + void do_peering_event(PGPeeringEventRef evt, PeeringCtx *rcx); void queue_null(epoch_t msg_epoch, epoch_t query_epoch); void queue_flushed(epoch_t started_at); void handle_advance_map( OSDMapRef osdmap, OSDMapRef lastmap, vector& newup, int up_primary, vector& newacting, int acting_primary, - RecoveryCtx *rctx); - void handle_activate_map(RecoveryCtx *rctx); - void handle_initialize(RecoveryCtx *rctx); + PeeringCtx *rctx); + void handle_activate_map(PeeringCtx *rctx); + void handle_initialize(PeeringCtx *rctx); void handle_query_state(Formatter *f); /** @@ -449,8 +449,8 @@ public: ThreadPool::TPHandle &handle, uint64_t *ops_begun) = 0; - // more work after the above, but with a RecoveryCtx - void find_unfound(epoch_t queued, RecoveryCtx *rctx); + // more work after the above, but with a PeeringCtx + void find_unfound(epoch_t queued, PeeringCtx *rctx); virtual void get_watchers(std::list *ls) = 0; @@ -995,14 +995,14 @@ protected: public: bool dne() { return info.dne(); } - struct RecoveryCtx { + struct PeeringCtx { utime_t start_time; map > *query_map; map > > *info_map; map > > *notify_list; ObjectStore::Transaction *transaction; ThreadPool::TPHandle* handle; - RecoveryCtx(map > *query_map, + PeeringCtx(map > *query_map, map > > *info_map, map > &query_map); @@ -1591,7 +1591,7 @@ protected: map >& query_map, map > > *activator_map, - RecoveryCtx *ctx); + PeeringCtx *ctx); struct C_PG_ActivateCommitted : public Context { PGRef pg; @@ -2077,7 +2077,7 @@ protected: TrivialEvent(DeleteInterrupted) /* Encapsulates PG recovery process */ - class RecoveryState { + class PeeringState { void start_handle(RecoveryCtx *new_ctx); void end_handle(); public: @@ -2088,8 +2088,8 @@ protected: /* States */ struct Initial; - class RecoveryMachine : public boost::statechart::state_machine< RecoveryMachine, Initial > { - RecoveryState *state; + class PeeringMachine : public boost::statechart::state_machine< PeeringMachine, Initial > { + PeeringState *state; public: PG *pg; @@ -2104,7 +2104,7 @@ protected: void log_enter(const char *state_name); void log_exit(const char *state_name, utime_t duration); - RecoveryMachine(RecoveryState *state, PG *pg) : state(state), pg(pg), event_count(0) {} + PeeringMachine(PeeringState *state, PG *pg) : state(state), pg(pg), event_count(0) {} /* Accessor functions for state methods */ ObjectStore::Transaction* get_cur_transaction() { @@ -2140,7 +2140,7 @@ protected: state->rctx->send_notify(to, info, pi); } }; - friend class RecoveryMachine; + friend class PeeringMachine; /* States */ // Initial @@ -2178,13 +2178,13 @@ protected: // Deleting // Crashed - struct Crashed : boost::statechart::state< Crashed, RecoveryMachine >, NamedState { + struct Crashed : boost::statechart::state< Crashed, PeeringMachine >, NamedState { explicit Crashed(my_context ctx); }; struct Reset; - struct Initial : boost::statechart::state< Initial, RecoveryMachine >, NamedState { + struct Initial : boost::statechart::state< Initial, PeeringMachine >, NamedState { explicit Initial(my_context ctx); void exit(); @@ -2202,7 +2202,7 @@ protected: } }; - struct Reset : boost::statechart::state< Reset, RecoveryMachine >, NamedState { + struct Reset : boost::statechart::state< Reset, PeeringMachine >, NamedState { explicit Reset(my_context ctx); void exit(); @@ -2225,7 +2225,7 @@ protected: struct Start; - struct Started : boost::statechart::state< Started, RecoveryMachine, Start >, NamedState { + struct Started : boost::statechart::state< Started, PeeringMachine, Start >, NamedState { explicit Started(my_context ctx); void exit(); @@ -2830,7 +2830,7 @@ protected: void exit(); }; - RecoveryMachine machine; + PeeringMachine machine; PG *pg; /// context passed in by state machine caller @@ -2847,7 +2847,7 @@ protected: boost::optional rctx; public: - explicit RecoveryState(PG *pg) + explicit PeeringState(PG *pg) : machine(this, pg), pg(pg), orig_ctx(0) { machine.initiate(); } @@ -3002,7 +3002,7 @@ public: bool try_fast_info, PerfCounters *logger = nullptr); - void write_if_dirty(RecoveryCtx *rctx) { + void write_if_dirty(PeeringCtx *rctx) { write_if_dirty(*rctx->transaction); } protected: @@ -3087,7 +3087,7 @@ protected: void fulfill_info(pg_shard_t from, const pg_query_t &query, pair ¬ify_info); void fulfill_log(pg_shard_t from, const pg_query_t &query, epoch_t query_epoch); - void fulfill_query(const MQuery& q, RecoveryCtx *rctx); + void fulfill_query(const MQuery& q, PeeringCtx *rctx); void check_full_transition(OSDMapRef lastmap, OSDMapRef osdmap); bool should_restart_peering( -- 2.39.5