}
scrubber.end = candidate_end;
}
- scrubber.block_writes = true;
// walk the log to find the latest update that affects our chunk
scrubber.subset_last_update = pg_log.get_tail();
assert(scrubber.waiting_on == 0);
scrub_compare_maps();
- scrubber.block_writes = false;
scrubber.run_callbacks();
// requeue the writes from the chunk that just finished
q.f->open_object_section("scrub");
q.f->dump_stream("scrubber.epoch_start") << pg->scrubber.epoch_start;
q.f->dump_int("scrubber.active", pg->scrubber.active);
- q.f->dump_int("scrubber.block_writes", pg->scrubber.block_writes);
q.f->dump_int("scrubber.waiting_on", pg->scrubber.waiting_on);
{
q.f->open_array_section("scrubber.waiting_on_whom");
Scrubber() :
reserved(false), reserve_failed(false),
epoch_start(0),
- block_writes(false), active(false), queue_snap_trim(false),
+ active(false), queue_snap_trim(false),
waiting_on(0), shallow_errors(0), deep_errors(0), fixed(0),
active_rep_scrub(0),
must_scrub(false), must_deep_scrub(false), must_repair(false),
epoch_t epoch_start;
// common to both scrubs
- bool block_writes;
bool active;
bool queue_snap_trim;
int waiting_on;
// classic (non chunk) scrubs block all writes
// chunky scrubs only block writes to a range
bool write_blocked_by_scrub(const hobject_t &soid) {
- if (!block_writes)
- return false;
-
if (soid >= start && soid < end)
return true;
// clear all state
void reset() {
- block_writes = false;
active = false;
queue_snap_trim = false;
waiting_on = 0;
osd->scrub_wq.queue(this);
}
} else {
- assert(!scrubber.block_writes);
+ assert(scrubber.start == scrubber.end);
}
} else {
if (scrubber.active_rep_scrub) {