If we do not have a whole-object digest, record one after a deep scrub.
Note that we make no particular attempt to avoid this on frequently
changing objects where the digest will quickly be invalidated.
Signed-off-by: Sage Weil <sage@redhat.com>
scrubber.missing,
scrubber.inconsistent,
authoritative,
+ scrubber.missing_digest,
scrubber.shallow_errors,
scrubber.deep_errors,
info.pgid, acting,
// Map from object with errors to good peer
map<hobject_t, pair<ScrubMap::object, pg_shard_t> > authoritative;
+ // Objects who need digest updates
+ map<hobject_t, pair<uint32_t,uint32_t> > missing_digest;
+
// chunky scrub
hobject_t start, end;
eversion_t subset_last_update;
inconsistent.clear();
missing.clear();
authoritative.clear();
+ missing_digest.clear();
}
} scrubber;
map<hobject_t, set<pg_shard_t> > &missing,
map<hobject_t, set<pg_shard_t> > &inconsistent,
map<hobject_t, pg_shard_t> &authoritative,
+ map<hobject_t, pair<uint32_t,uint32_t> > &missing_digest,
int &shallow_errors, int &deep_errors,
const spg_t& pgid,
const vector<int> &acting,
}
assert(auth != maps.end());
+ ScrubMap::object& auth_object = auth->second->objects[*k];
set<pg_shard_t> cur_missing;
set<pg_shard_t> cur_inconsistent;
for (j = maps.begin(); j != maps.end(); ++j) {
stringstream ss;
enum scrub_error_type error =
be_compare_scrub_objects(auth->first,
- auth->second->objects[*k],
+ auth_object,
auth_oi,
okseed,
j->second->objects[*k],
if (!cur_inconsistent.empty() || !cur_missing.empty()) {
authoritative[*k] = auth->first;
}
+ if (okseed &&
+ auth_object.digest_present && auth_object.omap_digest_present &&
+ (!auth_oi.is_data_digest() || !auth_oi.is_omap_digest())) {
+ dout(20) << __func__ << " noting missing digest on " << *k << dendl;
+ missing_digest[*k] = make_pair(auth_object.digest,
+ auth_object.omap_digest);
+ }
+
}
}
map<hobject_t, set<pg_shard_t> > &missing,
map<hobject_t, set<pg_shard_t> > &inconsistent,
map<hobject_t, pg_shard_t> &authoritative,
+ map<hobject_t, pair<uint32_t,uint32_t> > &missing_digest,
int &shallow_errors, int &deep_errors,
const spg_t& pgid,
const vector<int> &acting,
return result;
}
-void ReplicatedPG::finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc)
+void ReplicatedPG::finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc,
+ bool scrub_ok)
{
const hobject_t& soid = ctx->obs->oi.soid;
dout(20) << __func__ << " " << soid << " " << ctx
pending_backfill_updates[soid].stats.add(ctx->delta_stats);
}
- if (scrubber.active) {
+ if (!scrub_ok && scrubber.active) {
assert(soid < scrubber.start || soid >= scrubber.end);
if (soid < scrubber.start)
scrub_cstat.add(ctx->delta_stats);
<< " expected clone " << next_clone;
++scrubber.shallow_errors;
}
+
+ if (scrubber.shallow_errors == 0) {
+ for (map<hobject_t,pair<uint32_t,uint32_t> >::iterator p =
+ scrubber.missing_digest.begin();
+ p != scrubber.missing_digest.end();
+ ++p) {
+ dout(10) << __func__ << " recording digests for " << p->first << dendl;
+ ObjectContextRef obc = get_object_context(p->first, false);
+ assert(obc);
+ RepGather *repop = simple_repop_create(obc);
+ OpContext *ctx = repop->ctx;
+ ctx->at_version = get_next_version();
+ ctx->new_obs.oi.set_data_digest(p->second.first);
+ ctx->new_obs.oi.set_omap_digest(p->second.second);
+ finish_ctx(ctx, pg_log_entry_t::MODIFY, true, true);
+ simple_repop_submit(repop);
+ }
+ }
dout(10) << "_scrub (" << mode << ") finish" << dendl;
}
const hobject_t& head, const hobject_t& coid,
object_info_t *poi);
void execute_ctx(OpContext *ctx);
- void finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc=true);
+ void finish_ctx(OpContext *ctx, int log_op_type, bool maintain_ssc=true,
+ bool scrub_ok=false);
void reply_ctx(OpContext *ctx, int err);
void reply_ctx(OpContext *ctx, int err, eversion_t v, version_t uv);
void make_writeable(OpContext *ctx);