From: Michael J. Kidd Date: Fri, 19 Apr 2024 14:20:22 +0000 (-0700) Subject: PGMap: remove pool max_avail scale factor X-Git-Tag: v20.0.0~1292^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=4de57e904cffbf134b25b28f46caccebfc4456b1;p=ceph.git PGMap: remove pool max_avail scale factor The scaling of max_avail by the ratio of non-degraded to total objects count results in the reported max_avail increasing proportionally to the number of OSDs marked `down` but not `out`. This is counter intuitive since OSDs going `down` should never result in more space being available. Removing the scale factor allows max_avail to remain unchanged until the OSDs are marked `out`. Signed-off-by: Michael J. Kidd --- diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index 16fa71c84c069..f57a50c6d22d3 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -882,10 +882,6 @@ void PGMapDigest::dump_object_stat_sum( const object_stat_sum_t &sum = pool_stat.stats.sum; const store_statfs_t statfs = pool_stat.store_stats; - if (sum.num_object_copies > 0) { - raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies; - } - uint64_t used_data_bytes = pool_stat.get_allocated_data_bytes(per_pool); uint64_t used_omap_bytes = pool_stat.get_allocated_omap_bytes(per_pool_omap); uint64_t used_bytes = used_data_bytes + used_omap_bytes; diff --git a/src/test/mon/PGMap.cc b/src/test/mon/PGMap.cc index 6c052fe5f36eb..43d6de4c78341 100644 --- a/src/test/mon/PGMap.cc +++ b/src/test/mon/PGMap.cc @@ -83,23 +83,20 @@ TEST(pgmap, dump_object_stat_sum_0) pool.tier_of = 0; PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail, pool.get_size(), verbose, true, true, &pool); - float copies_rate = - (static_cast(sum.num_object_copies - sum.num_objects_degraded) / - sum.num_object_copies) * pool.get_size(); + float used_percent = (float)statfs.allocated / (statfs.allocated + avail) * 100; - uint64_t stored = statfs.data_stored / copies_rate; unsigned col = 0; - ASSERT_EQ(stringify(byte_u_t(stored)), tbl.get(0, col++)); - ASSERT_EQ(stringify(byte_u_t(stored)), tbl.get(0, col++)); + ASSERT_EQ(stringify(byte_u_t(statfs.data_stored/pool.get_size())), tbl.get(0, col++)); + ASSERT_EQ(stringify(byte_u_t(statfs.data_stored/pool.get_size())), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(sum.num_objects)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.allocated)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(statfs.allocated)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++)); ASSERT_EQ(percentify(used_percent), tbl.get(0, col++)); - ASSERT_EQ(stringify(byte_u_t(avail/copies_rate)), tbl.get(0, col++)); + ASSERT_EQ(stringify(byte_u_t(avail/pool.get_size())), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(pool.quota_max_objects)), tbl.get(0, col++)); ASSERT_EQ(stringify(byte_u_t(pool.quota_max_bytes)), tbl.get(0, col++)); ASSERT_EQ(stringify(si_u_t(sum.num_objects_dirty)), tbl.get(0, col++));