// handle an event
peering_wq.queue(pg);
} else {
- assert(!pg->peering_queue.empty());
- PG::CephPeeringEvtRef evt = pg->peering_queue.front();
- pg->peering_queue.pop_front();
- pg->handle_peering_event(evt, &rctx);
+ pg->process_peering_event(&rctx);
}
need_up_thru = pg->need_up_thru || need_up_thru;
same_interval_since = MAX(pg->info.history.same_interval_since,
same_interval_since);
- pg->write_if_dirty(*rctx.transaction);
if (!split_pgs.empty()) {
rctx.on_applied->add(new C_CompleteSplits(this, split_pgs));
split_pgs.clear();
peering_waiters.begin(), peering_waiters.end());
}
-void PG::handle_peering_event(CephPeeringEvtRef evt, RecoveryCtx *rctx)
+void PG::process_peering_event(RecoveryCtx *rctx)
{
- dout(10) << "handle_peering_event: " << evt->get_desc() << dendl;
+ assert(!peering_queue.empty());
+ CephPeeringEvtRef evt = peering_queue.front();
+ peering_queue.pop_front();
+
+ dout(10) << __func__ << ": " << evt->get_desc() << dendl;
if (!have_same_or_newer_map(evt->get_epoch_sent())) {
dout(10) << "deferring event " << evt->get_desc() << dendl;
peering_waiters.push_back(evt);
if (old_peering_evt(evt))
return;
recovery_state.handle_event(evt, rctx);
+ write_if_dirty(*rctx->transaction);
}
void PG::queue_peering_event(CephPeeringEvtRef evt)
<< last_persisted_osdmap_ref->get_epoch()
<< " while current is " << osdmap_ref->get_epoch() << dendl;
}
- if (osdmap_ref->check_new_blacklist_entries()) check_blacklisted_watchers();
+ if (osdmap_ref->check_new_blacklist_entries()) {
+ check_blacklisted_watchers();
+ }
+ write_if_dirty(*rctx->transaction);
}
void PG::handle_loaded(RecoveryCtx *rctx)
void set_force_backfill(bool b);
void queue_peering_event(CephPeeringEvtRef evt);
- void handle_peering_event(CephPeeringEvtRef evt, RecoveryCtx *rctx);
+ void process_peering_event(RecoveryCtx *rctx);
void queue_query(epoch_t msg_epoch, epoch_t query_epoch,
pg_shard_t from, const pg_query_t& q);
void queue_null(epoch_t msg_epoch, epoch_t query_epoch);