]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd: wire up backfill resumption
authorRadoslaw Zarzynski <rzarzyns@redhat.com>
Mon, 9 Jan 2023 15:02:36 +0000 (15:02 +0000)
committerXuehan Xu <xuxuehan@qianxin.com>
Sat, 31 Aug 2024 13:24:12 +0000 (21:24 +0800)
Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
src/crimson/osd/pg.cc
src/crimson/osd/pg_recovery.cc
src/crimson/osd/pg_recovery.h

index bb249b8e10e830927e60c5e739f529cdeff22937..f6206fd2a9a51f765c821251a5c4181599914bec 100644 (file)
@@ -421,6 +421,7 @@ void PG::on_activate_complete()
       PeeringState::AllReplicasRecovered{});
   }
   publish_stats_to_osd();
+  recovery_handler->on_activate_complete();
 }
 
 void PG::prepare_write(pg_info_t &info,
index f4a7d8a63db9f4787578f07688085c7d1bb2b5b4..b583c87a3f6062eca40b47ba300cd62032608c77 100644 (file)
@@ -623,36 +623,31 @@ void PGRecovery::backfill_cancelled()
   using BackfillState = crimson::osd::BackfillState;
   backfill_state->process_event(
     BackfillState::CancelBackfill{}.intrusive_from_this());
-  backfill_state.reset();
 }
 
 void PGRecovery::dispatch_backfill_event(
   boost::intrusive_ptr<const boost::statechart::event_base> evt)
 {
   logger().debug("{}", __func__);
-  if (backfill_state) {
-    backfill_state->process_event(evt);
-  } else {
-    // TODO: Do we need to worry about cases in which the pg has
-    //              been through both backfill cancellations and backfill
-    //              restarts between the sendings and replies of
-    //              ReplicaScan/ObjectPush requests? Seems classic OSDs
-    //              doesn't handle these cases.
-    logger().debug("{}, backfill cancelled, dropping evt");
-  }
+  assert(backfill_state);
+  backfill_state->process_event(evt);
+  // TODO: Do we need to worry about cases in which the pg has
+  //       been through both backfill cancellations and backfill
+  //       restarts between the sendings and replies of
+  //       ReplicaScan/ObjectPush requests? Seems classic OSDs
+  //       doesn't handle these cases.
+}
+
+void PGRecovery::on_activate_complete()
+{
+  logger().debug("{} backfill_state={}",
+                 __func__, fmt::ptr(backfill_state.get()));
+  backfill_state.reset();
 }
 
 void PGRecovery::on_backfill_reserved()
 {
   logger().debug("{}", __func__);
-  // PIMP and depedency injection for the sake unittestability.
-  // I'm not afraid about the performance here.
-  using BackfillState = crimson::osd::BackfillState;
-  backfill_state = std::make_unique<BackfillState>(
-    *this,
-    std::make_unique<crimson::osd::PeeringFacade>(pg->get_peering_state()),
-    std::make_unique<crimson::osd::PGFacade>(
-      *static_cast<crimson::osd::PG*>(pg)));
   // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING
   // will be set after on_backfill_reserved() returns.
   // Backfill needs to take this into consideration when scheduling
@@ -660,5 +655,19 @@ void PGRecovery::on_backfill_reserved()
   // instances. Otherwise the execution might begin without having
   // the state updated.
   ceph_assert(!pg->get_peering_state().is_backfilling());
+  // let's be lazy with creating the backfill stuff
+  using BackfillState = crimson::osd::BackfillState;
+  if (!backfill_state) {
+    // PIMP and depedency injection for the sake of unittestability.
+    // I'm not afraid about the performance here.
+    backfill_state = std::make_unique<BackfillState>(
+      *this,
+      std::make_unique<crimson::osd::PeeringFacade>(pg->get_peering_state()),
+      std::make_unique<crimson::osd::PGFacade>(
+        *static_cast<crimson::osd::PG*>(pg)));
+  }
+  // it may be we either start a completely new backfill (first
+  // event since last on_activate_complete()) or to resume already
+  // (but stopped one).
   start_backfill_recovery(BackfillState::Triggered{});
 }
index f5b8632a3826372720f477a71394b9779b77e1e1..c2f289e3cf62b84bae24ab51219719049b5250a8 100644 (file)
@@ -33,6 +33,7 @@ public:
   interruptible_future<bool> start_recovery_ops(
     RecoveryBackend::RecoveryBlockingEvent::TriggerI&,
     size_t max_to_start);
+  void on_activate_complete();
   void on_backfill_reserved();
   void dispatch_backfill_event(
     boost::intrusive_ptr<const boost::statechart::event_base> evt);