]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd/: remove PL::reschedule_scrub, notify scrubber on config/pool change directly
authorSamuel Just <sjust@redhat.com>
Wed, 8 Mar 2023 01:21:24 +0000 (17:21 -0800)
committerSamuel Just <sjust@redhat.com>
Wed, 12 Apr 2023 03:39:19 +0000 (20:39 -0700)
As with on_info_history_change(), we don't need to deal with scrub
scheduling during peering. Once we've gone active, the scrubber itself
would be the origin of any stat changes that could affect scrub
scheduling.  The other possible change vectors would be OSD config
changes or pool config changes.

PG::reschedule_scrub becomes PG::on_scrub_schedule_input_change.  Should
be called in all cases where an input to scrub scheduling changes.

OSD::resched_all_scrubs() calls PG::on_scrub_schedule_input_change
unconditionally
now to deal with changes to osd_scrub_(min|max)_interval.

PG::set_last_[deep_]scrub_stamp now invoke
PG::on_scrub_schedule_input_change directly.

PG::handle_activate_map() now calls PG::on_scrub_schedule_input_change
directly to deal with changes to scrub related pool options.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/crimson/osd/pg.h
src/osd/OSD.cc
src/osd/PG.cc
src/osd/PG.h
src/osd/PeeringState.cc
src/osd/PeeringState.h
src/osd/scrubber/pg_scrubber.cc

index d510a7145f9bab2969a652d25100ba321ec77605..997fb5711e764345fdf154d49b2bce1e1dd9380b 100644 (file)
@@ -159,10 +159,6 @@ public:
     bool need_write_epoch,
     ceph::os::Transaction &t) final;
 
-  /// Need to reschedule next scrub. Assuming no change in role
-  void reschedule_scrub() final {
-  }
-
   void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) final;
 
   uint64_t get_snap_trimq_size() const final {
index eff053937a1ebde0095fd911967e613b892140ca..62e00a425b410a537ed458a51a576fad44ba8377 100644 (file)
@@ -7626,10 +7626,9 @@ void OSD::resched_all_scrubs()
     if (!pg)
       continue;
 
-    if (!pg->get_planned_scrub().must_scrub && !pg->get_planned_scrub().need_auto) {
-      dout(15) << __func__ << ": reschedule " << job.pgid << dendl;
-      pg->reschedule_scrub();
-    }
+    dout(15) << __func__ << ": updating scrub schedule on " << job.pgid << dendl;
+    pg->on_scrub_schedule_input_change();
+
     pg->unlock();
   }
   dout(10) << __func__ << ": done" << dendl;
index e7dafda809b6f01191482076ee7bec210e6c0b9a..b57cafd4475f78fdf0485ebf87499e9acac0ebb0 100644 (file)
@@ -1679,17 +1679,14 @@ std::optional<requested_scrub_t> PG::validate_scrub_mode() const
   return upd_flags;
 }
 
-void PG::reschedule_scrub()
+void PG::on_scrub_schedule_input_change()
 {
-  dout(20) << fmt::format(
-                 "{} for a {}", __func__,
-                 (is_primary() ? "Primary" : "non-primary"))
-          << dendl;
-
-  // we are assuming no change in primary status
-  if (is_primary()) {
+  if (is_active() && is_primary()) {
+    dout(20) << __func__ << ": active/primary" << dendl;
     ceph_assert(m_scrubber);
     m_scrubber->update_scrub_job(m_planned_scrub);
+  } else {
+    dout(20) << __func__ << ": inactive or non-primary" << dendl;
   }
 }
 
@@ -2562,6 +2559,9 @@ void PG::handle_activate_map(PeeringCtx &rctx)
   recovery_state.activate_map(rctx);
 
   requeue_map_waiters();
+
+  // pool options affecting scrub may have changed
+  on_scrub_schedule_input_change();
 }
 
 void PG::handle_initialize(PeeringCtx &rctx)
index ccec091e08c0df9d3fce41121285cf9509c5fcb4..dc346b8be027c81201144c1deabff82490a402e3 100644 (file)
@@ -269,6 +269,7 @@ public:
        set_last_scrub_stamp(t, history, stats);
        return true;
       });
+    on_scrub_schedule_input_change();
   }
 
   static void set_last_deep_scrub_stamp(
@@ -283,6 +284,7 @@ public:
        set_last_deep_scrub_stamp(t, history, stats);
        return true;
       });
+    on_scrub_schedule_input_change();
   }
 
   static void add_objects_scrubbed_count(
@@ -538,7 +540,18 @@ public:
   void on_pool_change() override;
   virtual void plpg_on_pool_change() = 0;
 
-  void reschedule_scrub() override;
+  /**
+   * on_scrub_schedule_input_change
+   *
+   * To be called when inputs to scrub scheduling may have changed.
+   * - OSD config params related to scrub such as  osd_scrub_min_interval,
+   *   osd_scrub_max_interval
+   * - Pool params related to scrub such as osd_scrub_min_interval,
+   *   osd_scrub_max_interval
+   * - pg stat scrub timestamps
+   * - etc
+   */
+  void on_scrub_schedule_input_change();
 
   void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) override;
 
index 1ef3141ef47fb8ed8317a812b2cbf5758a3d3963..5f1a4d864839f4ac33aa192185e4f5ed3e8b16e6 100644 (file)
@@ -732,10 +732,6 @@ void PeeringState::start_peering_interval(
     }
   }
 
-  if (is_primary() && was_old_primary) {
-    pl->reschedule_scrub();
-  }
-
   if (acting.empty() && !up.empty() && up_primary == pg_whoami) {
     psdout(10) << " acting empty, but i am up[0], clearing pg_temp" << dendl;
     pl->queue_want_pg_temp(acting);
@@ -4026,7 +4022,6 @@ void PeeringState::update_stats(
   if (f(info.history, info.stats)) {
     pl->publish_stats_to_osd();
   }
-  pl->reschedule_scrub();
 
   if (t) {
     dirty_info = true;
index b1c8b0bb40f61e78851dee7255b5ee120754977f..a8cdf45fcceafef22d888c054f3be73ec2949405 100644 (file)
@@ -279,9 +279,6 @@ public:
       bool need_write_epoch,
       ObjectStore::Transaction &t) = 0;
 
-    /// Need to reschedule next scrub. Assuming no change in role
-    virtual void reschedule_scrub() = 0;
-
     /// Notify that a scrub has been requested
     virtual void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) = 0;
 
index 93749e7c5a5b94b6690d9042cb92f8f8f3c2dd2b..56de19496ba03d886ee9265a897ad4aa6f82c9b2 100644 (file)
@@ -1935,6 +1935,7 @@ void PgScrubber::scrub_finish()
     int tr = m_osds->store->queue_transaction(m_pg->ch, std::move(t), nullptr);
     ceph_assert(tr == 0);
   }
+  update_scrub_job(m_planned_scrub);
 
   if (has_error) {
     m_pg->queue_peering_event(PGPeeringEventRef(