p != pg_map.end();
p++) {
PG *pg = p->second;
+ pg->lock();
if (pg->is_primary()) {
if (m->repair)
pg->state_set(PG_STATE_REPAIR);
- if (!pg->is_scrubbing()) {
+ if (pg->queue_scrub()) {
dout(10) << "queueing " << *pg << " for scrub" << dendl;
- scrub_wq.queue(pg);
}
}
+ pg->unlock();
}
} else {
for (vector<pg_t>::iterator p = m->scrub_pgs.begin();
p++)
if (pg_map.count(*p)) {
PG *pg = pg_map[*p];
+ pg->lock();
if (pg->is_primary()) {
if (m->repair)
pg->state_set(PG_STATE_REPAIR);
- if (!pg->is_scrubbing()) {
+ if (pg->queue_scrub()) {
dout(10) << "queueing " << *pg << " for scrub" << dendl;
- scrub_wq.queue(pg);
}
}
+ pg->unlock();
}
}
dout(10) << "queue_snap_trim -- already trimming" << dendl;
}
+bool PG::queue_scrub()
+{
+ assert(_lock.is_locked());
+ if (is_scrubbing()) {
+ return false;
+ }
+ state_set(PG_STATE_SCRUBBING);
+ osd->scrub_wq.queue(this);
+ return true;
+}
struct C_PG_FinishRecovery : public Context {
PG *pg;
if (state_test(PG_STATE_INCONSISTENT)) {
dout(10) << "_finish_recovery requeueing for scrub" << dendl;
- osd->scrub_wq.queue(this);
+ queue_scrub();
} else if (log.backlog) {
ObjectStore::Transaction *t = new ObjectStore::Transaction;
drop_backlog();
ret = true;
} else if (scrub_reserved_peers.size() == acting.size()) {
dout(20) << "sched_scrub: success, reserved self and replicas" << dendl;
- osd->scrub_wq.queue(this);
+ queue_scrub();
ret = true;
} else {
// none declined, since scrub_reserved is set
void adjust_local_snaps(ObjectStore::Transaction &t, interval_set<snapid_t> &to_check);
void queue_snap_trim();
+ bool queue_scrub();
void share_pg_info();
void share_pg_log(const eversion_t &oldver);