scrubber.received_maps.clear();
{
+ hobject_t end;
// get the start and end of our scrub chunk
//
cct->_conf->osd_scrub_chunk_max,
0,
&objects,
- &scrubber.end);
+ &end);
assert(ret >= 0);
// in case we don't find a boundary: start again at the end
- start = scrubber.end;
+ start = end;
// special case: reached end of file store, implicitly a boundary
if (objects.empty()) {
}
// search backward from the end looking for a boundary
- objects.push_back(scrubber.end);
+ objects.push_back(end);
while (!boundary_found && objects.size() > 1) {
hobject_t end = objects.back().get_boundary();
objects.pop_back();
if (objects.back().get_filestore_key() != end.get_filestore_key()) {
- scrubber.end = end;
+ end = end;
boundary_found = true;
}
}
}
- }
+ if (!_range_available_for_scrub(scrubber.start, end)) {
+ // we'll be requeued by whatever made us unavailable for scrub
+ done = true;
+ break;
+ }
+ scrubber.end = end;
+ }
scrubber.block_writes = true;
// walk the log to find the latest update that affects our chunk
void build_scrub_map(ScrubMap &map, ThreadPool::TPHandle &handle);
void build_inc_scrub_map(
ScrubMap &map, eversion_t v, ThreadPool::TPHandle &handle);
+ /**
+ * returns true if [begin, end) is good to scrub at this time
+ * a false return value obliges the implementer to requeue scrub when the
+ * condition preventing scrub clears
+ */
+ virtual bool _range_available_for_scrub(
+ const hobject_t &begin, const hobject_t &end) = 0;
virtual void _scrub(ScrubMap &map) { }
virtual void _scrub_clear_state() { }
virtual void _scrub_finish() { }
dout(10) << __func__ << " " << soid << " requeuing " << ls.size() << " requests" << dendl;
requeue_ops(ls);
waiting_for_blocked_object.erase(p);
+
+ if (obc->requeue_scrub_on_unblock)
+ osd->queue_for_scrub(this);
}
SnapSetContext *ReplicatedPG::create_snapset_context(const hobject_t& oid)
// SCRUB
+bool ReplicatedPG::_range_available_for_scrub(
+ const hobject_t &begin, const hobject_t &end)
+{
+ pair<hobject_t, ObjectContextRef> next;
+ next.second = object_contexts.lookup(begin);
+ next.first = begin;
+ bool more = true;
+ while (more && next.first < end) {
+ if (next.second && next.second->is_blocked()) {
+ next.second->requeue_scrub_on_unblock = true;
+ return true;
+ }
+ more = object_contexts.get_next(next.first, &next);
+ }
+ return false;
+}
+
void ReplicatedPG::_scrub(ScrubMap& scrubmap)
{
dout(10) << "_scrub" << dendl;
friend struct C_Flush;
// -- scrub --
+ virtual bool _range_available_for_scrub(
+ const hobject_t &begin, const hobject_t &end);
virtual void _scrub(ScrubMap& map);
virtual void _scrub_clear_state();
virtual void _scrub_finish();
// set if writes for this object are blocked on another objects recovery
ObjectContextRef blocked_by; // object blocking our writes
set<ObjectContextRef> blocking; // objects whose writes we block
+ bool requeue_scrub_on_unblock; // true if we need to requeue scrub on unblock
// any entity in obs.oi.watchers MUST be in either watchers or unconnected_watchers.
map<pair<uint64_t, entity_name_t>, WatchRef> watchers;
destructor_callback(0),
lock("ReplicatedPG::ObjectContext::lock"),
unstable_writes(0), readers(0), writers_waiting(0), readers_waiting(0),
- blocked(false) {}
+ blocked(false), requeue_scrub_on_unblock(false) {}
~ObjectContext() {
assert(rwstate.empty());