#include "ReplicatedPG.h"
#include "OSD.h"
#include "OpRequest.h"
+#include "ScrubStore.h"
#include "objclass/objclass.h"
#include "common/errno.h"
+#include "common/scrub_types.h"
#include "common/perf_counters.h"
#include "messages/MOSDOp.h"
const char *mode,
bool allow_incomplete_clones,
boost::optional<snapid_t> target,
- vector<snapid_t>::reverse_iterator *curclone)
+ vector<snapid_t>::reverse_iterator *curclone,
+ inconsistent_snapset_wrapper &e)
{
assert(head);
assert(snapset);
clog->error() << mode << " " << pgid << " " << head.get()
<< " expected clone " << next_clone;
++scrubber.shallow_errors;
+ e.set_clone_missing(next_clone.snap);
}
// Clones are descending
++(*curclone);
boost::optional<SnapSet> snapset; // If initialized so will head (above)
vector<snapid_t>::reverse_iterator curclone; // Defined only if snapset initialized
unsigned missing = 0;
+ inconsistent_snapset_wrapper snap_error;
bufferlist last_data;
for (map<hobject_t,ScrubMap::object, hobject_t::BitwiseComparator>::reverse_iterator
p = scrubmap.objects.rbegin(); p != scrubmap.objects.rend(); ++p) {
const hobject_t& soid = p->first;
+ snap_error = inconsistent_snapset_wrapper{soid};
object_stat_sum_t stat;
boost::optional<object_info_t> oi;
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " no '" << OI_ATTR << "' attr";
++scrubber.shallow_errors;
+ snap_error.set_ss_attr_missing();
} else {
bufferlist bv;
bv.push_back(p->second.attrs[OI_ATTR]);
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " can't decode '" << OI_ATTR << "' attr " << e.what();
++scrubber.shallow_errors;
+ snap_error.set_ss_attr_corrupted();
}
}
<< oi->size << ") adjusted for ondisk to ("
<< pgbackend->be_get_ondisk_size(oi->size)
<< ")";
+ snap_error.set_size_mismatch();
++scrubber.shallow_errors;
}
// Log any clones we were expecting to be there up to target
// This will set missing, but will be a no-op if snap.soid == *curclone.
missing += process_clones_to(head, snapset, osd->clog, info.pgid, mode,
- pool.info.allow_incomplete_clones(), target, &curclone);
+ pool.info.allow_incomplete_clones(), target, &curclone,
+ snap_error);
}
bool expected;
// Check doing_clones() again in case we ran process_clones_to()
if (head && !snapset) {
osd->clog->info() << mode << " " << info.pgid << " " << soid
<< " clone ignored due to missing snapset";
+ scrubber.store->add_snap_error(pool.id, snap_error);
continue;
}
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " is an unexpected clone";
++scrubber.shallow_errors;
+ snap_error.set_headless();
+ scrubber.store->add_snap_error(pool.id, snap_error);
continue;
}
if (missing) {
log_missing(missing, head, osd->clog, info.pgid, __func__, mode,
pool.info.allow_incomplete_clones());
+ scrubber.store->add_snap_error(pool.id, snap_error);
}
// Set this as a new head object
head = soid;
missing = 0;
+ snap_error = inconsistent_snapset_wrapper{head.get()};
dout(20) << __func__ << " " << mode << " new head " << head << dendl;
<< " no '" << SS_ATTR << "' attr";
++scrubber.shallow_errors;
snapset = boost::none;
+ snap_error.set_ss_attr_missing();
} else {
bufferlist bl;
bl.push_back(p->second.attrs[SS_ATTR]);
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " can't decode '" << SS_ATTR << "' attr " << e.what();
++scrubber.shallow_errors;
+ snap_error.set_ss_attr_corrupted();
}
}
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " snaps.seq not set";
++scrubber.shallow_errors;
+ snap_error.set_snapset_mismatch();
}
}
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " snapset.head_exists=false, but head exists";
++scrubber.shallow_errors;
+ snap_error.set_head_mismatch();
}
if (soid.is_snapdir() && snapset->head_exists) {
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " snapset.head_exists=true, but snapdir exists";
++scrubber.shallow_errors;
+ snap_error.set_head_mismatch();
}
}
} else {
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " is missing in clone_size";
++scrubber.shallow_errors;
+ snap_error.set_size_mismatch();
} else {
if (oi && oi->size != snapset->clone_size[soid.snap]) {
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " size " << oi->size << " != clone_size "
<< snapset->clone_size[*curclone];
++scrubber.shallow_errors;
+ snap_error.set_size_mismatch();
}
if (snapset->clone_overlap.count(soid.snap) == 0) {
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " is missing in clone_overlap";
++scrubber.shallow_errors;
- } else {
+ snap_error.set_size_mismatch();
+ } else {
// This checking is based on get_clone_bytes(). The first 2 asserts
// can't happen because we know we have a clone_size and
// a clone_overlap. Now we check that the interval_set won't
osd->clog->error() << mode << " " << info.pgid << " " << soid
<< " bad interval_set in clone_overlap";
++scrubber.shallow_errors;
+ snap_error.set_size_mismatch();
} else {
stat.num_bytes += snapset->get_clone_bytes(soid.snap);
}
<< " No more objects while processing " << head.get() << dendl;
missing += process_clones_to(head, snapset, osd->clog, info.pgid, mode,
- pool.info.allow_incomplete_clones(), all_clones, &curclone);
-
+ pool.info.allow_incomplete_clones(), all_clones, &curclone,
+ snap_error);
}
// There could be missing found by the test above or even
// before dropping out of the loop for the last head.
if (missing) {
log_missing(missing, head, osd->clog, info.pgid, __func__,
mode, pool.info.allow_incomplete_clones());
+ scrubber.store->add_snap_error(pool.id, snap_error);
}
for (map<hobject_t,pair<uint32_t,uint32_t>, hobject_t::BitwiseComparator>::const_iterator p =
hoid.build_hash_cache();
return "SCRUB_OBJ_" + hoid.to_str();
}
+
+string first_snap_key(int64_t pool)
+{
+ // scrub object is per spg_t object, so we can misuse the hash (pg.seed) for
+ // the representing the minimal and maximum keys. and this relies on how
+ // hobject_t::to_str() works: hex(pool).hex(revhash).
+ auto hoid = hobject_t(object_t(),
+ "",
+ 0,
+ 0x00000000,
+ pool,
+ "");
+ hoid.build_hash_cache();
+ return "SCRUB_SS_" + hoid.to_str();
+}
+
+string to_snap_key(int64_t pool, const librados::object_id_t& oid)
+{
+ auto hoid = hobject_t(object_t(oid.name),
+ oid.locator, // key
+ oid.snap,
+ 0x77777777, // hash
+ pool,
+ oid.nspace);
+ hoid.build_hash_cache();
+ return "SCRUB_SS_" + hoid.to_str();
+}
+
+string last_snap_key(int64_t pool)
+{
+ auto hoid = hobject_t(object_t(),
+ "",
+ 0,
+ 0xffffffff,
+ pool,
+ "");
+ hoid.build_hash_cache();
+ return "SCRUB_SS_" + hoid.to_str();
+}
}
namespace Scrub {
results[to_object_key(pool, e.object)] = bl;
}
+void Store::add_snap_error(int64_t pool, const inconsistent_snapset_wrapper& e)
+{
+ bufferlist bl;
+ e.encode(bl);
+ results[to_snap_key(pool, e.object)] = bl;
+}
+
bool Store::empty() const
{
return results.empty();
t->remove(coll, hoid);
}
+std::vector<bufferlist>
+Store::get_snap_errors(ObjectStore* store,
+ int64_t pool,
+ const librados::object_id_t& start,
+ uint64_t max_return)
+{
+ const string begin = (start.name.empty() ?
+ first_snap_key(pool) : to_snap_key(pool, start));
+ const string end = last_snap_key(pool);
+ return get_errors(store, begin, end, max_return);
+}
+
std::vector<bufferlist>
Store::get_object_errors(ObjectStore* store,
int64_t pool,
}
return errors;
}
+string to_snap_key(int64_t pool, const librados::object_id_t& oid)
+{
+ return "SCRUB_SS_" + std::to_string(pool) + "." + oid.name + oid.nspace;
+}
+
+
} // namespace Scrub