return m_queue.get_blocked_pgs_count();
}
-bool OsdScrub::set_reserving_now()
+bool OsdScrub::set_reserving_now(spg_t reserving_id, utime_t now_is)
{
- return m_queue.set_reserving_now();
+ return m_queue.set_reserving_now(reserving_id, now_is);
}
-void OsdScrub::clear_reserving_now()
+void OsdScrub::clear_reserving_now(spg_t reserving_id)
{
- m_queue.clear_reserving_now();
+ m_queue.clear_reserving_now(reserving_id);
}
* and that PG is trying to acquire replica resources.
* \retval false if the flag was already set (due to a race)
*/
- bool set_reserving_now();
+ bool set_reserving_now(spg_t reserving_id, utime_t now_is);
- void clear_reserving_now();
+ void clear_reserving_now(spg_t reserving_id);
/**
* \returns true if the current time is within the scrub time window
// ////////////////////////////////////////////////////////////////////////// //
// ScrubQueue - maintaining the 'some PG is reserving' flag
-bool ScrubQueue::set_reserving_now()
+bool ScrubQueue::set_reserving_now(spg_t reserving_id, utime_t now_is)
{
- auto was_set = a_pg_is_reserving.exchange(true);
- return !was_set;
+ std::unique_lock l{reserving_lock};
+
+ if (!reserving_pg.has_value()) {
+ reserving_pg = reserving_id;
+ reserving_since = now_is;
+ return true;
+ }
+ ceph_assert(reserving_id != *reserving_pg);
+ return false;
}
-void ScrubQueue::clear_reserving_now()
+void ScrubQueue::clear_reserving_now(spg_t was_reserving_id)
{
- a_pg_is_reserving = false;
+ std::unique_lock l{reserving_lock};
+ if (reserving_pg && (*reserving_pg == was_reserving_id)) {
+ reserving_pg.reset();
+ }
+ // otherwise - ignore silently
}
bool ScrubQueue::is_reserving_now() const
{
- return a_pg_is_reserving;
+ // no lock needed, as set_reserving_now() will recheck
+ return reserving_pg.has_value();
}
* (which is a possible result of a race between the check in OsdScrub and
* the initiation of a scrub by some other PG)
*/
- bool set_reserving_now();
- void clear_reserving_now();
+ bool set_reserving_now(spg_t reserving_id, utime_t now_is);
+
+ /**
+ * silently ignore attempts to clear the flag if it was not set by
+ * the named pg.
+ */
+ void clear_reserving_now(spg_t reserving_id);
bool is_reserving_now() const;
/// counting the number of PGs stuck while scrubbing, waiting for objects
* trying to secure its replicas' resources. We will refrain from initiating
* any other scrub sessions until this one is done.
*
- * \todo keep the ID of the reserving PG; possibly also the time it started.
+ * \todo replace the local lock with regular osd-service locking
*/
- std::atomic_bool a_pg_is_reserving{false};
+ ceph::mutex reserving_lock = ceph::make_mutex("ScrubQueue::reserving_lock");
+ std::optional<spg_t> reserving_pg;
+ utime_t reserving_since;
/**
* If the scrub job was not explicitly requested, we postpone it by some
}
}
-bool PgScrubber::set_reserving_now()
-{
- return m_osds->get_scrub_services().set_reserving_now();
+bool PgScrubber::set_reserving_now() {
+ return m_osds->get_scrub_services().set_reserving_now(m_pg_id,
+ ceph_clock_now());
}
void PgScrubber::clear_reserving_now()
{
- m_osds->get_scrub_services().clear_reserving_now();
+ m_osds->get_scrub_services().clear_reserving_now(m_pg_id);
}
void PgScrubber::set_queued_or_active()