return peering_state.update_complete_backfill_object_stats(hoid, stats);
}
+ bool is_backfilling() const {
+ return peering_state.is_backfilling();
+ }
+
PeeringFacade(PeeringState& peering_state)
: peering_state(peering_state) {
}
logger().debug("{}: backfill triggered", __func__);
ceph_assert(backfill_state().last_backfill_started == \
peering_state().earliest_backfill());
+ ceph_assert(peering_state().is_backfilling());
// initialize BackfillIntervals
for (const auto& bt : peering_state().get_backfill_targets()) {
backfill_state().peer_backfill_info[bt].reset(
crimson::common::local_conf()->osd_recovery_max_single_start));
}
+BackfillRecovery::BackfillRecoveryPipeline &BackfillRecovery::bp(PG &pg)
+{
+ return pg.backfill_pipeline;
+}
+
seastar::future<bool> BackfillRecovery::do_recovery()
{
logger().debug("{}", __func__);
// process_event() of our boost::statechart machine is non-reentrant.
// with the backfill_pipeline we protect it from a second entry from
// the implementation of BackfillListener.
- handle.enter(pg->backfill_pipeline.process)
+ // additionally, this stage serves to synchronize with PeeringEvent.
+ handle.enter(bp(*pg).process)
).then([this] {
pg->get_recovery_handler()->dispatch_backfill_event(std::move(evt));
return seastar::make_ready_future<bool>(false);
"BackfillRecovery::PGPipeline::process"
};
friend class BackfillRecovery;
+ friend class PeeringEvent;
};
template <class EventT>
ShardServices &ss,
epoch_t epoch_started,
const EventT& evt);
+
+ static BackfillRecoveryPipeline &bp(PG &pg);
};
template <class EventT>
pg->osdmap_gate.wait_for_map(evt.get_epoch_sent()));
}).then([this, pg](auto) {
return with_blocking_future(handle.enter(pp(*pg).process));
+ }).then([this, pg] {
+ // TODO: likely we should synchronize also with the pg log-based
+ // recovery.
+ return with_blocking_future(
+ handle.enter(BackfillRecovery::bp(*pg).process));
}).then([this, pg] {
pg->do_peering_event(evt, ctx);
handle.exit();
std::make_unique<BackfillState::PeeringFacade>(pg->get_peering_state()),
std::make_unique<BackfillState::PGFacade>(
*static_cast<crimson::osd::PG*>(pg)));
+ // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING
+ // will be set after on_backfill_reserved() returns.
+ // Backfill needs to take this into consideration when scheduling
+ // events -- they must be mutually exclusive with PeeringEvent
+ // instances. Otherwise the execution might begin without having
+ // the state updated.
+ ceph_assert(!pg->get_peering_state().is_backfilling());
start_backfill_recovery(BackfillState::Triggered{});
}