]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mon/MgrStatMonitor.cc: cleanup handle_conf_change 64389/head
authorShraddha Agrawal <shraddha.agrawal000@gmail.com>
Thu, 26 Jun 2025 12:27:45 +0000 (17:57 +0530)
committerShraddha Agrawal <shraddha.agrawal000@gmail.com>
Tue, 8 Jul 2025 17:10:42 +0000 (22:40 +0530)
Prior to this change, we were using a flag value,
`reset_availability_last_uptime_downtime_val` to record the
timestamp to which the last_uptime and last_downtime should be
updated to. This was originally done so to avoid the values
being overwritten by a paxos update.

Now, instead of using an intermediate value, we are immediately
clearing the last_uptime and last_downtime values in
pending_pool_availability object. Since we are updating the values
in the pending object, we will not lost this information due to
an incoming paxos update.

Fixes: https://tracker.ceph.com/issues/71857
Signed-off-by: Shraddha Agrawal <shraddhaag@ibm.com>
(cherry picked from commit 950d911b4b28ffaffc2d63cfc15d4edabce2b533)

src/mon/MgrStatMonitor.cc
src/mon/MgrStatMonitor.h

index 4d85a260a82619530aa6601be4ec46e23842456e..9bcee5b77d0ff2e44ca6bf9e9b004b070b86df6c 100644 (file)
@@ -80,12 +80,16 @@ void MgrStatMonitor::handle_conf_change(
              << dendl;
 
     // if fetaure is toggled from off to on, 
-    // store the new value of last_uptime and last_downtime 
-    // (to be updated in calc_pool_availability) 
+    // reset last_uptime and last_downtime across all pools
     if (newval > oldval) {
-      reset_availability_last_uptime_downtime_val = ceph_clock_now();
-      dout(10) << __func__ << " reset_availability_last_uptime_downtime_val " 
-               <<  reset_availability_last_uptime_downtime_val << dendl; 
+      utime_t now = ceph_clock_now(); 
+      for (const auto& i : pending_pool_availability) {
+        const auto& poolid = i.first;
+        pending_pool_availability[poolid].last_downtime = now;
+        pending_pool_availability[poolid].last_uptime = now;
+      }
+      dout(20) << __func__ << " reset last_uptime and last_downtime to " 
+               << now << dendl;
     }
     enable_availability_tracking = newval;
   }
@@ -195,18 +199,8 @@ void MgrStatMonitor::calc_pool_availability()
       }
     }
 
-    // if reset_availability_last_uptime_downtime_val is not utime_t(1, 2), 
-    // update last_uptime and last_downtime for all pools to the 
-    // recorded values
-    if (reset_availability_last_uptime_downtime_val.has_value()) {
-      dout(20) << fmt::format("{}: Pool {} reset last_uptime and last_downtime to {}",
-                              __func__, poolid, reset_availability_last_uptime_downtime_val.value()) << dendl;
-      avail.last_downtime = reset_availability_last_uptime_downtime_val.value();
-      avail.last_uptime = reset_availability_last_uptime_downtime_val.value();
-    }
   }
   pending_pool_availability = pool_availability;
-  reset_availability_last_uptime_downtime_val.reset();
 }
 
 void MgrStatMonitor::update_from_paxos(bool *need_bootstrap)
index 66a184090e9d5b3f9be09a1dd108478ab39925ac..ef06087d24ca66230aa7563a4234469b5245b5f3 100644 (file)
@@ -56,7 +56,6 @@ public:
 
   void calc_pool_availability();
   bool enable_availability_tracking = g_conf().get_val<bool>("enable_availability_tracking"); ///< tracking availability score feature 
-  std::optional<utime_t> reset_availability_last_uptime_downtime_val;
   
   void clear_pool_availability(int64_t poolid);