From 9ba4cd8bc39467fc9901ba31b33dd628f5330b07 Mon Sep 17 00:00:00 2001 From: Radoslaw Zarzynski Date: Mon, 9 Jan 2023 15:02:36 +0000 Subject: [PATCH] crimson/osd: wire up backfill resumption Signed-off-by: Radoslaw Zarzynski --- src/crimson/osd/pg.cc | 1 + src/crimson/osd/pg_recovery.cc | 47 ++++++++++++++++++++-------------- src/crimson/osd/pg_recovery.h | 1 + 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/src/crimson/osd/pg.cc b/src/crimson/osd/pg.cc index bb249b8e10e..f6206fd2a9a 100644 --- a/src/crimson/osd/pg.cc +++ b/src/crimson/osd/pg.cc @@ -421,6 +421,7 @@ void PG::on_activate_complete() PeeringState::AllReplicasRecovered{}); } publish_stats_to_osd(); + recovery_handler->on_activate_complete(); } void PG::prepare_write(pg_info_t &info, diff --git a/src/crimson/osd/pg_recovery.cc b/src/crimson/osd/pg_recovery.cc index f4a7d8a63db..b583c87a3f6 100644 --- a/src/crimson/osd/pg_recovery.cc +++ b/src/crimson/osd/pg_recovery.cc @@ -623,36 +623,31 @@ void PGRecovery::backfill_cancelled() using BackfillState = crimson::osd::BackfillState; backfill_state->process_event( BackfillState::CancelBackfill{}.intrusive_from_this()); - backfill_state.reset(); } void PGRecovery::dispatch_backfill_event( boost::intrusive_ptr evt) { logger().debug("{}", __func__); - if (backfill_state) { - backfill_state->process_event(evt); - } else { - // TODO: Do we need to worry about cases in which the pg has - // been through both backfill cancellations and backfill - // restarts between the sendings and replies of - // ReplicaScan/ObjectPush requests? Seems classic OSDs - // doesn't handle these cases. - logger().debug("{}, backfill cancelled, dropping evt"); - } + assert(backfill_state); + backfill_state->process_event(evt); + // TODO: Do we need to worry about cases in which the pg has + // been through both backfill cancellations and backfill + // restarts between the sendings and replies of + // ReplicaScan/ObjectPush requests? Seems classic OSDs + // doesn't handle these cases. +} + +void PGRecovery::on_activate_complete() +{ + logger().debug("{} backfill_state={}", + __func__, fmt::ptr(backfill_state.get())); + backfill_state.reset(); } void PGRecovery::on_backfill_reserved() { logger().debug("{}", __func__); - // PIMP and depedency injection for the sake unittestability. - // I'm not afraid about the performance here. - using BackfillState = crimson::osd::BackfillState; - backfill_state = std::make_unique( - *this, - std::make_unique(pg->get_peering_state()), - std::make_unique( - *static_cast(pg))); // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING // will be set after on_backfill_reserved() returns. // Backfill needs to take this into consideration when scheduling @@ -660,5 +655,19 @@ void PGRecovery::on_backfill_reserved() // instances. Otherwise the execution might begin without having // the state updated. ceph_assert(!pg->get_peering_state().is_backfilling()); + // let's be lazy with creating the backfill stuff + using BackfillState = crimson::osd::BackfillState; + if (!backfill_state) { + // PIMP and depedency injection for the sake of unittestability. + // I'm not afraid about the performance here. + backfill_state = std::make_unique( + *this, + std::make_unique(pg->get_peering_state()), + std::make_unique( + *static_cast(pg))); + } + // it may be we either start a completely new backfill (first + // event since last on_activate_complete()) or to resume already + // (but stopped one). start_backfill_recovery(BackfillState::Triggered{}); } diff --git a/src/crimson/osd/pg_recovery.h b/src/crimson/osd/pg_recovery.h index f5b8632a382..c2f289e3cf6 100644 --- a/src/crimson/osd/pg_recovery.h +++ b/src/crimson/osd/pg_recovery.h @@ -33,6 +33,7 @@ public: interruptible_future start_recovery_ops( RecoveryBackend::RecoveryBlockingEvent::TriggerI&, size_t max_to_start); + void on_activate_complete(); void on_backfill_reserved(); void dispatch_backfill_event( boost::intrusive_ptr evt); -- 2.39.5