}
--scrub_waiting_on;
+ scrub_waiting_on_whom.erase(from);
if (scrub_waiting_on == 0) {
if (finalizing_scrub) { // incremental lists received
osd->scrub_finalize_wq.queue(this);
finalizing_scrub = true;
scrub_gather_replica_maps();
++scrub_waiting_on;
+ scrub_waiting_on_whom.insert(osd->whoami);
osd->scrub_wq.queue(this);
}
}
* last_update_applied == info.last_update)
*/
scrub_waiting_on = acting.size();
+ scrub_waiting_on_whom.insert(acting.begin(), acting.end());
// request maps from replicas
for (unsigned i=1; i<acting.size(); i++) {
}
--scrub_waiting_on;
+ scrub_waiting_on_whom.erase(osd->whoami);
if (scrub_waiting_on == 0) {
// the replicas have completed their scrub map, so lock out writes
// request incrementals from replicas
scrub_gather_replica_maps();
++scrub_waiting_on;
+ scrub_waiting_on_whom.insert(osd->whoami);
}
dout(10) << "clean up scrub" << dendl;
}
--scrub_waiting_on;
+ scrub_waiting_on_whom.erase(osd->whoami);
if (scrub_waiting_on == 0) {
assert(last_update_applied == info.last_update);
osd->scrub_finalize_wq.queue(this);
finalizing_scrub = false;
scrub_block_writes = false;
scrub_active = false;
+ scrub_waiting_on = 0;
+ scrub_waiting_on_whom.clear();
if (active_rep_scrub) {
active_rep_scrub->put();
active_rep_scrub = NULL;
if (scrub_received_maps[p->first].valid_through != log.head) {
scrub_waiting_on++;
+ scrub_waiting_on_whom.insert(p->first);
// Need to request another incremental map
_request_scrub_map(p->first, p->second.valid_through);
}
bool scrub_active;
bool scrub_reserved, scrub_reserve_failed;
int scrub_waiting_on;
+ set<int> scrub_waiting_on_whom;
epoch_t scrub_epoch_start;
ScrubMap primary_scrubmap;
MOSDRepScrub *active_rep_scrub;