From: Sage Weil Date: Wed, 24 Jul 2019 15:03:24 +0000 (-0500) Subject: osd/osd_types: separate get_{user,allocated}_bytes() into data and omap variants X-Git-Tag: v15.1.0~1915^2~11 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=ab2eb6b832e07af4cd22e4d26c817bae254d6844;p=ceph-ci.git osd/osd_types: separate get_{user,allocated}_bytes() into data and omap variants Signed-off-by: Sage Weil --- diff --git a/src/librados/librados_c.cc b/src/librados/librados_c.cc index ac913d1ebdf..091cb9cf73b 100644 --- a/src/librados/librados_c.cc +++ b/src/librados/librados_c.cc @@ -1018,11 +1018,13 @@ extern "C" int _rados_ioctx_pool_stat(rados_ioctx_t io, } ::pool_stat_t& r = rawresult[pool_name]; - uint64_t allocated_bytes = r.get_allocated_bytes(per_pool); + uint64_t allocated_bytes = r.get_allocated_data_bytes(per_pool) + + r.get_allocated_omap_bytes(per_pool); // FIXME: raw_used_rate is unknown hence use 1.0 here // meaning we keep net amount aggregated over all replicas // Not a big deal so far since this field isn't exposed - uint64_t user_bytes = r.get_user_bytes(1.0, per_pool); + uint64_t user_bytes = r.get_user_data_bytes(1.0, per_pool) + + r.get_user_omap_bytes(1.0, per_pool); stats->num_kb = shift_round_up(allocated_bytes, 10); stats->num_bytes = allocated_bytes; diff --git a/src/librados/librados_cxx.cc b/src/librados/librados_cxx.cc index 53c24cf7f3c..e4f11fe54a4 100644 --- a/src/librados/librados_cxx.cc +++ b/src/librados/librados_cxx.cc @@ -2578,11 +2578,13 @@ int librados::Rados::get_pool_stats(std::list& v, pool_stat_t& pv = result[p->first]; auto& pstat = p->second; store_statfs_t &statfs = pstat.store_stats; - uint64_t allocated_bytes = pstat.get_allocated_bytes(per_pool); + uint64_t allocated_bytes = pstat.get_allocated_data_bytes(per_pool) + + pstat.get_allocated_omap_bytes(per_pool); // FIXME: raw_used_rate is unknown hence use 1.0 here // meaning we keep net amount aggregated over all replicas // Not a big deal so far since this field isn't exposed - uint64_t user_bytes = pstat.get_user_bytes(1.0, per_pool); + uint64_t user_bytes = pstat.get_user_data_bytes(1.0, per_pool) + + pstat.get_user_omap_bytes(1.0, per_pool); object_stat_sum_t *sum = &p->second.stats.sum; pv.num_kb = shift_round_up(allocated_bytes, 10); diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index 1e580541bcc..3210b7ce503 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -902,7 +902,9 @@ void PGMapDigest::dump_object_stat_sum( raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies; } - uint64_t used_bytes = pool_stat.get_allocated_bytes(per_pool, per_pool_omap); + uint64_t used_data_bytes = pool_stat.get_allocated_data_bytes(per_pool); + uint64_t used_omap_bytes = pool_stat.get_allocated_omap_bytes(per_pool_omap); + uint64_t used_bytes = used_data_bytes + used_omap_bytes; float used = 0.0; // note avail passed in is raw_avail, calc raw_used here. @@ -914,8 +916,11 @@ void PGMapDigest::dump_object_stat_sum( } auto avail_res = raw_used_rate ? avail / raw_used_rate : 0; // an approximation for actually stored user data - auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate, per_pool, - per_pool_omap); + auto stored_data_normalized = pool_stat.get_user_data_bytes( + raw_used_rate, per_pool); + auto stored_omap_normalized = pool_stat.get_user_omap_bytes( + raw_used_rate, per_pool_omap); + auto stored_normalized = stored_data_normalized + stored_omap_normalized; // same, amplied by replication or EC auto stored_raw = stored_normalized * raw_used_rate; if (f) { diff --git a/src/osd/osd_types.h b/src/osd/osd_types.h index 598b1478e61..776085c8b74 100644 --- a/src/osd/osd_types.h +++ b/src/osd/osd_types.h @@ -2513,44 +2513,45 @@ struct pool_stat_t { // In legacy mode used and netto values are the same. But for new per-pool // collection 'used' provides amount of space ALLOCATED at all related OSDs // and 'netto' is amount of stored user data. - uint64_t get_allocated_bytes(bool per_pool, bool per_pool_omap) const { - uint64_t allocated_bytes; + uint64_t get_allocated_data_bytes(bool per_pool) const { if (per_pool) { - allocated_bytes = store_stats.allocated; + return store_stats.allocated; } else { // legacy mode, use numbers from 'stats' - allocated_bytes = stats.sum.num_bytes + - stats.sum.num_bytes_hit_set_archive; + return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive; } + } + uint64_t get_allocated_omap_bytes(bool per_pool_omap) const { if (per_pool_omap) { - allocated_bytes += store_stats.omap_allocated; + return store_stats.omap_allocated; } else { - // omap is not broken out by pool by nautilus bluestore - allocated_bytes += stats.sum.num_omap_bytes; + // omap is not broken out by pool by nautilus bluestore; report the + // scrub value. this will be imprecise in that it won't account for + // any storage overhead/efficiency. + return stats.sum.num_omap_bytes; } - return allocated_bytes; } - uint64_t get_user_bytes(float raw_used_rate, ///< space amp factor - bool per_pool, - bool per_pool_omap) const { + uint64_t get_user_data_bytes(float raw_used_rate, ///< space amp factor + bool per_pool) const { // NOTE: we need the space amp factor so that we can work backwards from // the raw utilization to the amount of data that the user actually stored. - uint64_t user_bytes; if (per_pool) { - user_bytes = raw_used_rate ? store_stats.data_stored / raw_used_rate : 0; + return raw_used_rate ? store_stats.data_stored / raw_used_rate : 0; } else { // legacy mode, use numbers from 'stats'. note that we do NOT use the // raw_used_rate factor here because we are working from the PG stats // directly. - user_bytes = stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive; + return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive; } + } + uint64_t get_user_omap_bytes(float raw_used_rate, ///< space amp factor + bool per_pool_omap) const { if (per_pool_omap) { - user_bytes += store_stats.omap_allocated; + return raw_used_rate ? store_stats.omap_allocated / raw_used_rate : 0; } else { - // omap is not broken out by pool by nautilus bluestore - user_bytes += stats.sum.num_omap_bytes; + // omap usage is lazily reported during scrub; this value may lag. + return stats.sum.num_omap_bytes; } - return user_bytes; } void dump(ceph::Formatter *f) const;