* (4) Wait for writes to flush on the chunk
* (5) Wait for maps from replicas
* (6) Compare / repair all scrub maps
+ * (7) Wait for digest updates to apply
*
- * This logic is encoded in the very linear state machine:
+ * This logic is encoded in the mostly linear state machine:
*
* +------------------+
* _________v__________ |
* | | | |
* | COMPARE_MAPS | | |
* |____________________| | |
+ * | | |
+ * | | |
+ * _________v__________ | |
+ * | | | |
+ * |WAIT_DIGEST_UPDATES | | |
+ * |____________________| | |
* | | | |
* | +----------+ |
* _________v__________ |
// requeue the writes from the chunk that just finished
requeue_ops(waiting_for_active);
- if (scrubber.end < hobject_t::get_max()) {
- // schedule another leg of the scrub
+ scrubber.state = PG::Scrubber::WAIT_DIGEST_UPDATES;
+
+ // fall-thru
+
+ case PG::Scrubber::WAIT_DIGEST_UPDATES:
+ if (scrubber.num_digest_updates_pending) {
+ dout(10) << __func__ << " waiting on "
+ << scrubber.num_digest_updates_pending
+ << " digest updates" << dendl;
+ done = true;
+ break;
+ }
+
+ if (scrubber.end < hobject_t::get_max()) {
+ // schedule another leg of the scrub
scrubber.start = scrubber.end;
scrubber.state = PG::Scrubber::NEW_CHUNK;
scrubber.state = PG::Scrubber::FINISH;
}
- break;
+ break;
case PG::Scrubber::FINISH:
scrub_finish();
waiting_on(0), shallow_errors(0), deep_errors(0), fixed(0),
active_rep_scrub(0),
must_scrub(false), must_deep_scrub(false), must_repair(false),
+ num_digest_updates_pending(0),
state(INACTIVE),
deep(false),
seed(0)
// Objects who need digest updates
map<hobject_t, pair<uint32_t,uint32_t> > missing_digest;
+ int num_digest_updates_pending;
// chunky scrub
hobject_t start, end;
BUILD_MAP,
WAIT_REPLICAS,
COMPARE_MAPS,
+ WAIT_DIGEST_UPDATES,
FINISH,
} state;
case BUILD_MAP: ret = "BUILD_MAP"; break;
case WAIT_REPLICAS: ret = "WAIT_REPLICAS"; break;
case COMPARE_MAPS: ret = "COMPARE_MAPS"; break;
+ case WAIT_DIGEST_UPDATES: ret = "WAIT_DIGEST_UPDATES"; break;
case FINISH: ret = "FINISH"; break;
}
return ret;
missing.clear();
authoritative.clear();
missing_digest.clear();
+ num_digest_updates_pending = 0;
}
} scrubber;
return true;
}
+struct C_ScrubDigestUpdated : public Context {
+ ReplicatedPGRef pg;
+ C_ScrubDigestUpdated(ReplicatedPG *pg) : pg(pg) {}
+ void finish(int r) {
+ pg->_scrub_digest_updated();
+ }
+};
+
+void ReplicatedPG::_scrub_digest_updated()
+{
+ dout(20) << __func__ << dendl;
+ if (--scrubber.num_digest_updates_pending == 0) {
+ osd->scrub_wq.queue(this);
+ }
+}
+
void ReplicatedPG::_scrub(ScrubMap& scrubmap)
{
dout(10) << "_scrub" << dendl;
ctx->new_obs.oi.set_data_digest(p->second.first);
ctx->new_obs.oi.set_omap_digest(p->second.second);
finish_ctx(ctx, pg_log_entry_t::MODIFY, true, true);
+ ctx->on_finish = new C_ScrubDigestUpdated(this);
simple_repop_submit(repop);
+ ++scrubber.num_digest_updates_pending;
}
}
virtual bool _range_available_for_scrub(
const hobject_t &begin, const hobject_t &end);
virtual void _scrub(ScrubMap& map);
+ void _scrub_digest_updated();
virtual void _scrub_clear_state();
virtual void _scrub_finish();
object_stat_collection_t scrub_cstat;
+ friend class C_ScrubDigestUpdated;
virtual void _split_into(pg_t child_pgid, PG *child, unsigned split_bits);
void apply_and_flush_repops(bool requeue);