return out << ")";
}
+//pool_fsck_stats_t
+
+std::ostream& operator<<(std::ostream& out, const BlueStore::pool_fsck_stats_t& s)
+{
+ out << "(" << s.num_objects << " objects, "
+ << s.shared_blobs << " shared blobs, "
+ << s.omaps << " omaps, "
+ << s.omap_key_size << " bytes in omap keys, "
+ << s.omap_val_size << " bytes in omap vals, "
+ << s.stored << " bytes stored, "
+ << s.allocated << " bytes allocated"
+ << ")";
+ return out;
+}
+
namespace {
/*
int _fsck_sum_extents(
const PExtentVector& extents,
bool compressed,
- store_statfs_t& expected_statfs)
+ store_statfs_t& expected_statfs,
+ BlueStore::pool_fsck_stats_t& pool_fsck_stat)
{
for (auto e : extents) {
if (!e.is_valid())
continue;
expected_statfs.allocated += e.length;
+ pool_fsck_stat.allocated += e.length;
if (compressed) {
expected_statfs.data_compressed_allocated += e.length;
}
uint64_t granularity,
BlueStoreRepairer* repairer,
store_statfs_t& expected_statfs,
+ BlueStore::pool_fsck_stats_t& pool_fsck_stat,
FSCKDepth depth)
{
dout(30) << __func__ << " " << ctx_descr << ", extents " << extents << dendl;
if (!e.is_valid())
continue;
expected_statfs.allocated += e.length;
+ pool_fsck_stat.allocated += e.length;
if (compressed) {
expected_statfs.data_compressed_allocated += e.length;
}
const bufferlist& value,
mempool::bluestore_fsck::list<string>* expecting_shards,
map<BlobRef, bluestore_blob_t::unused_t>* referenced,
- const BlueStore::FSCK_ObjectCtx& ctx)
+ BlueStore::FSCK_ObjectCtx& ctx)
{
auto& errors = ctx.errors;
auto& num_objects = ctx.num_objects;
auto& sb_info = ctx.sb_info;
auto& sb_ref_counts = ctx.sb_ref_counts;
auto repairer = ctx.repairer;
+ pool_fsck_stats_t* pool_fsck_stat =
+ &ctx.per_pool_fsck_stats[c->cid.is_pg() ? c->cid.pool() : META_POOL_ID];
store_statfs_t* res_statfs = (per_pool_stat_collection || repairer) ?
&ctx.expected_pool_statfs[pool_id] :
OnodeRef o;
o.reset(Onode::create_decode(c, oid, key, value));
++num_objects;
-
+ ++pool_fsck_stat->num_objects;
num_spanning_blobs += o->extent_map.spanning_blob_map.size();
o->extent_map.fault_range(db, 0, OBJECT_MAX_SIZE);
}
pos = l.logical_offset + l.length;
res_statfs->data_stored += l.length;
+ pool_fsck_stat->stored += l.length;
ceph_assert(l.blob);
const bluestore_blob_t& blob = l.blob->get_blob();
blob.is_compressed(),
*used_blocks,
fm->get_alloc_size(),
- repairer,
+ repairer,
*res_statfs,
+ *pool_fsck_stat,
depth);
} else {
errors += _fsck_sum_extents(
blob.get_extents(),
blob.is_compressed(),
- *res_statfs);
+ *res_statfs,
+ *pool_fsck_stat);
}
} // for (auto& i : ref_map)
uint64_t num_spanning_blobs = 0;
store_statfs_t expected_store_statfs;
BlueStore::per_pool_statfs expected_pool_statfs;
+ BlueStore::per_pool_fsck_stats_t per_pool_fsck_stats;
};
size_t batchCount;
*sb_ref_counts,
batch->expected_store_statfs,
batch->expected_pool_statfs,
+ batch->per_pool_fsck_stats,
repairer);
for (size_t i = 0; i < batch->entry_count; i++) {
it++) {
ctx.expected_pool_statfs[it->first].add(it->second);
}
+ for (auto it = batch.per_pool_fsck_stats.begin();
+ it != batch.per_pool_fsck_stats.end();
+ it++) {
+ ctx.per_pool_fsck_stats[it->first].add(it->second);
+ }
}
}
};
KeyValueDB::Iterator it;
store_statfs_t expected_store_statfs;
per_pool_statfs expected_pool_statfs;
+ per_pool_fsck_stats_t per_pool_fsck_stats;
sb_info_space_efficient_map_t sb_info;
shared_blob_2hash_tracker_t sb_ref_counts(
sb_ref_counts,
expected_store_statfs,
expected_pool_statfs,
+ per_pool_fsck_stats,
repair ? &repairer : nullptr);
_fsck_check_objects(depth, ctx);
}
std::stringstream ss;
ss << "sbid 0x" << std::hex << sbid << std::dec;
+
+ pool_fsck_stats_t& ppfs = per_pool_fsck_stats[sbi.pool_id];
+ ppfs.shared_blobs++;
errors += _fsck_check_extents(ss.str(),
extents,
sbi.allocated_chunks < 0,
fm->get_alloc_size(),
repair ? &repairer : nullptr,
*expected_statfs,
+ ppfs,
depth);
}
}
it = db->get_iterator(PREFIX_PGMETA_OMAP, KeyValueDB::ITERATOR_NOCACHE);
if (it) {
uint64_t last_omap_head = 0;
+ pool_fsck_stats_t& ppfs = per_pool_fsck_stats[META_POOL_ID];
for (it->lower_bound(string()); it->valid(); it->next()) {
uint64_t omap_head;
_key_decode_u64(it->key().c_str(), &omap_head);
+ ppfs.omaps++;
+ ppfs.omap_key_size += it->key().size();
+ ppfs.omap_val_size += it->value().length();
if (used_omap_head.count(omap_head) == 0 &&
omap_head != last_omap_head) {
pair<string,string> rk = it->raw_key();
const char *c = k.c_str();
c = _key_decode_u64(c, &pool);
c = _key_decode_u64(c, &omap_head);
+ auto p =
+ pool > 0 ? pool : META_POOL_ID; // we erroneously use pool==0 for
+ // meta (aka pool==-1) objects
+ // (see #64153)
+ // hence treat it as meta
+ pool_fsck_stats_t& ppfs = per_pool_fsck_stats[p];
+ ppfs.omaps++;
+ ppfs.omap_key_size += it->key().size();
+ ppfs.omap_val_size += it->value().length();
if (used_omap_head.count(omap_head) == 0 &&
omap_head != last_omap_head) {
pair<string,string> rk = it->raw_key();
c = _key_decode_u64(c, &pool);
c = _key_decode_u32(c, &hash);
c = _key_decode_u64(c, &omap_head);
+ auto p =
+ pool > 0 ? pool : META_POOL_ID; // we erroneously use pool==0 for
+ // meta (aka pool==-1) objects
+ // (see #64153)
+ // hence treat it as meta
+ pool_fsck_stats_t& ppfs = per_pool_fsck_stats[p];
+ ppfs.omaps++;
+ ppfs.omap_key_size += it->key().size();
+ ppfs.omap_val_size += it->value().length();
if (used_omap_head.count(omap_head) == 0 &&
omap_head != last_omap_head) {
fsck_derr(errors, MAX_FSCK_ERROR_LINES)
<< num_spanning_blobs << " spanning, "
<< num_shared_blobs << " shared."
<< dendl;
+ dout(2) << __func__ << " Per-pool stats:"
+ << dendl;
+ for (auto& p : per_pool_fsck_stats) {
+ dout(2) << __func__
+ << " pool "
+ << p.first << " -> " << p.second
+ << dendl;
+ }
utime_t duration = ceph_clock_now() - start;
dout(1) << __func__ << " <<<FINISH>>> with " << errors << " errors, "
using per_pool_statfs =
mempool::bluestore_fsck::map<uint64_t, store_statfs_t>;
+ struct pool_fsck_stats_t {
+ uint64_t num_objects = 0;
+ uint64_t shared_blobs = 0;
+ uint64_t omaps = 0;
+ uint64_t omap_key_size = 0;
+ uint64_t omap_val_size = 0;
+ uint64_t stored = 0;
+ uint64_t allocated = 0;
+
+ void add(const pool_fsck_stats_t& other) {
+ num_objects += other.num_objects;
+ shared_blobs += other.shared_blobs;
+ omaps += other.omaps;
+ omap_key_size += other.omap_key_size;
+ omap_val_size += other.omap_val_size;
+ stored += other.stored;
+ allocated += other.allocated;
+ }
+ friend std::ostream& operator<<(std::ostream& out, const pool_fsck_stats_t& s);
+ };
+ using per_pool_fsck_stats_t =
+ mempool::bluestore_fsck::map<int64_t, pool_fsck_stats_t>; // pool_id -> stats
+
enum FSCKDepth {
FSCK_REGULAR,
FSCK_DEEP,
uint64_t granularity,
BlueStoreRepairer* repairer,
store_statfs_t& expected_statfs,
+ pool_fsck_stats_t& pool_fsck_stat,
FSCKDepth depth);
void _fsck_check_statfs(
store_statfs_t& expected_store_statfs;
per_pool_statfs& expected_pool_statfs;
+ per_pool_fsck_stats_t& per_pool_fsck_stats;
BlueStoreRepairer* repairer;
FSCK_ObjectCtx(int64_t& e,
shared_blob_2hash_tracker_t& _sb_ref_counts,
store_statfs_t& _store_statfs,
per_pool_statfs& _pool_statfs,
+ per_pool_fsck_stats_t& _per_pool_fsck_stats,
BlueStoreRepairer* _repairer) :
errors(e),
warnings(w),
sb_ref_counts(_sb_ref_counts),
expected_store_statfs(_store_statfs),
expected_pool_statfs(_pool_statfs),
+ per_pool_fsck_stats(_per_pool_fsck_stats),
repairer(_repairer) {
}
};
const ceph::buffer::list& value,
mempool::bluestore_fsck::list<std::string>* expecting_shards,
std::map<BlobRef, bluestore_blob_t::unused_t>* referenced,
- const BlueStore::FSCK_ObjectCtx& ctx);
+ BlueStore::FSCK_ObjectCtx& ctx);
#ifdef CEPH_BLUESTORE_TOOL_RESTORE_ALLOCATION
int push_allocation_to_rocksdb();
int read_allocation_from_drive_for_bluestore_tool();