From: Sage Weil Date: Tue, 23 Jul 2019 22:46:11 +0000 (-0500) Subject: mon/PGMap: add in actual omap usage into per-pool stats X-Git-Tag: v15.1.0~1915^2~13 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=b207973ae998e67c3f00a6cbd6dba92aa45dc7b2;p=ceph.git mon/PGMap: add in actual omap usage into per-pool stats This is a minimal change: we aren't separately reporting data vs omap usage (like we do in 'osd df' output for individual osds). Signed-off-by: Sage Weil --- diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index fdab111b344f..76404a965388 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -808,8 +808,9 @@ void PGMapDigest::dump_pool_stats_full( } float raw_used_rate = osd_map.pool_raw_used_rate(pool_id); bool per_pool = use_per_pool_stats(); + bool per_pool_omap = use_per_pool_omap_stats(); dump_object_stat_sum(tbl, f, stat, avail, raw_used_rate, verbose, per_pool, - pool); + per_pool_omap, pool); if (f) { f->close_section(); // stats f->close_section(); // pool @@ -891,7 +892,7 @@ void PGMapDigest::dump_cluster_stats(stringstream *ss, void PGMapDigest::dump_object_stat_sum( TextTable &tbl, ceph::Formatter *f, const pool_stat_t &pool_stat, uint64_t avail, - float raw_used_rate, bool verbose, bool per_pool, + float raw_used_rate, bool verbose, bool per_pool, bool per_pool_omap, const pg_pool_t *pool) { const object_stat_sum_t &sum = pool_stat.stats.sum; @@ -901,7 +902,7 @@ void PGMapDigest::dump_object_stat_sum( raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies; } - uint64_t used_bytes = pool_stat.get_allocated_bytes(per_pool); + uint64_t used_bytes = pool_stat.get_allocated_bytes(per_pool, per_pool_omap); float used = 0.0; // note avail passed in is raw_avail, calc raw_used here. @@ -913,7 +914,8 @@ void PGMapDigest::dump_object_stat_sum( } auto avail_res = raw_used_rate ? avail / raw_used_rate : 0; // an approximation for actually stored user data - auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate, per_pool); + auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate, per_pool, + per_pool_omap); if (f) { f->dump_int("stored", stored_normalized); f->dump_int("objects", sum.num_objects); @@ -932,7 +934,8 @@ void PGMapDigest::dump_object_stat_sum( f->dump_int("compress_bytes_used", statfs.data_compressed_allocated); f->dump_int("compress_under_bytes", statfs.data_compressed_original); // Stored by user amplified by replication - f->dump_int("stored_raw", pool_stat.get_user_bytes(1.0, per_pool)); + f->dump_int("stored_raw", pool_stat.get_user_bytes(1.0, per_pool, + per_pool_omap)); } } else { tbl << stringify(byte_u_t(stored_normalized)); diff --git a/src/mon/PGMap.h b/src/mon/PGMap.h index 1fc92ebc7d16..20c1cac2777a 100644 --- a/src/mon/PGMap.h +++ b/src/mon/PGMap.h @@ -74,6 +74,9 @@ public: bool use_per_pool_stats() const { return osd_sum.num_osds == osd_sum.num_per_pool_osds; } + bool use_per_pool_omap_stats() const { + return osd_sum.num_osds == osd_sum.num_per_pool_omap_osds; + } // recent deltas, and summation /** @@ -175,6 +178,7 @@ public: float raw_used_rate, bool verbose, bool per_pool, + bool per_pool_omap, const pg_pool_t *pool); size_t get_num_pg_by_osd(int osd) const { diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 4d6242858dfe..e7c50752fb8d 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -2513,7 +2513,7 @@ struct pool_stat_t { // In legacy mode used and netto values are the same. But for new per-pool // collection 'used' provides amount of space ALLOCATED at all related OSDs // and 'netto' is amount of stored user data. - uint64_t get_allocated_bytes(bool per_pool) const { + uint64_t get_allocated_bytes(bool per_pool, bool per_pool_omap) const { uint64_t allocated_bytes; if (per_pool) { allocated_bytes = store_stats.allocated; @@ -2522,11 +2522,16 @@ struct pool_stat_t { allocated_bytes = stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive; } - // omap is not broken out by pool by nautilus bluestore - allocated_bytes += stats.sum.num_omap_bytes; + if (per_pool_omap) { + allocated_bytes += store_stats.omap_allocated; + } else { + // omap is not broken out by pool by nautilus bluestore + allocated_bytes += stats.sum.num_omap_bytes; + } return allocated_bytes; } - uint64_t get_user_bytes(float raw_used_rate, bool per_pool) const { + uint64_t get_user_bytes(float raw_used_rate, bool per_pool, + bool per_pool_omap) const { uint64_t user_bytes; if (per_pool) { user_bytes = raw_used_rate ? store_stats.data_stored / raw_used_rate : 0; @@ -2535,8 +2540,12 @@ struct pool_stat_t { user_bytes = stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive; } - // omap is not broken out by pool by nautilus bluestore - user_bytes += stats.sum.num_omap_bytes; + if (per_pool_omap) { + user_bytes += store_stats.omap_allocated; + } else { + // omap is not broken out by pool by nautilus bluestore + user_bytes += stats.sum.num_omap_bytes; + } return user_bytes; } diff --git a/src/test/mon/PGMap.cc b/src/test/mon/PGMap.cc index 73007e0d5090..70535b404159 100644 --- a/src/test/mon/PGMap.cc +++ b/src/test/mon/PGMap.cc @@ -80,7 +80,7 @@ TEST(pgmap, dump_object_stat_sum_0) pool.size = 2; pool.type = pg_pool_t::TYPE_REPLICATED; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, - pool.get_size(), verbose, true, &pool); + pool.get_size(), verbose, true, true, &pool); float copies_rate = (static_cast(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies) * pool.get_size(); @@ -117,7 +117,7 @@ TEST(pgmap, dump_object_stat_sum_1) pool.size = 2; pool.type = pg_pool_t::TYPE_REPLICATED; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, - pool.get_size(), verbose, true, &pool); + pool.get_size(), verbose, true, true, &pool); unsigned col = 0; ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++)); @@ -148,7 +148,7 @@ TEST(pgmap, dump_object_stat_sum_2) pool.type = pg_pool_t::TYPE_REPLICATED; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, - pool.get_size(), verbose, true, &pool); + pool.get_size(), verbose, true, true, &pool); unsigned col = 0; ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++));