cct->_conf->osd_min_recovery_priority),
snap_reserver(cct, &reserver_finisher,
cct->_conf->osd_max_trimming_pgs),
+ scrub_reserver(cct, &reserver_finisher,
+ cct->_conf->osd_max_scrubs),
recovery_ops_active(0),
recovery_ops_reserved(0),
recovery_paused(false),
if (changed.count("osd_max_trimming_pgs")) {
service.snap_reserver.set_max(cct->_conf->osd_max_trimming_pgs);
}
+ if (changed.count("osd_max_scrubs")) {
+ service.scrub_reserver.set_max(cct->_conf->osd_max_scrubs);
+ }
if (changed.count("osd_op_complaint_time") ||
changed.count("osd_op_log_threshold")) {
op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time,
*/
std::optional<PGLockWrapper> get_locked_pg(spg_t pgid) final;
+ /**
+ * the entity that counts the number of active replica scrub
+ * operations, and grant scrub reservation requests asynchronously.
+ */
+ AsyncReserver<spg_t, Finisher>& get_scrub_reserver() {
+ return scrub_reserver;
+ }
+
private:
// -- agent shared state --
ceph::mutex agent_lock = ceph::make_mutex("OSDService::agent_lock");
void send_pg_created();
AsyncReserver<spg_t, Finisher> snap_reserver;
+ /// keeping track of replicas being reserved for scrubbing
+ AsyncReserver<spg_t, Finisher> scrub_reserver;
void queue_recovery_context(PG *pg,
GenContext<ThreadPool::TPHandle&> *c,
uint64_t cost,