}
::pool_stat_t& r = rawresult[pool_name];
- uint64_t allocated_bytes = r.get_allocated_bytes(per_pool);
+ uint64_t allocated_bytes = r.get_allocated_data_bytes(per_pool) +
+ r.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
- uint64_t user_bytes = r.get_user_bytes(1.0, per_pool);
+ uint64_t user_bytes = r.get_user_data_bytes(1.0, per_pool) +
+ r.get_user_omap_bytes(1.0, per_pool);
stats->num_kb = shift_round_up(allocated_bytes, 10);
stats->num_bytes = allocated_bytes;
pool_stat_t& pv = result[p->first];
auto& pstat = p->second;
store_statfs_t &statfs = pstat.store_stats;
- uint64_t allocated_bytes = pstat.get_allocated_bytes(per_pool);
+ uint64_t allocated_bytes = pstat.get_allocated_data_bytes(per_pool) +
+ pstat.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
- uint64_t user_bytes = pstat.get_user_bytes(1.0, per_pool);
+ uint64_t user_bytes = pstat.get_user_data_bytes(1.0, per_pool) +
+ pstat.get_user_omap_bytes(1.0, per_pool);
object_stat_sum_t *sum = &p->second.stats.sum;
pv.num_kb = shift_round_up(allocated_bytes, 10);
raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies;
}
- uint64_t used_bytes = pool_stat.get_allocated_bytes(per_pool, per_pool_omap);
+ uint64_t used_data_bytes = pool_stat.get_allocated_data_bytes(per_pool);
+ uint64_t used_omap_bytes = pool_stat.get_allocated_omap_bytes(per_pool_omap);
+ uint64_t used_bytes = used_data_bytes + used_omap_bytes;
float used = 0.0;
// note avail passed in is raw_avail, calc raw_used here.
}
auto avail_res = raw_used_rate ? avail / raw_used_rate : 0;
// an approximation for actually stored user data
- auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate, per_pool,
- per_pool_omap);
+ auto stored_data_normalized = pool_stat.get_user_data_bytes(
+ raw_used_rate, per_pool);
+ auto stored_omap_normalized = pool_stat.get_user_omap_bytes(
+ raw_used_rate, per_pool_omap);
+ auto stored_normalized = stored_data_normalized + stored_omap_normalized;
// same, amplied by replication or EC
auto stored_raw = stored_normalized * raw_used_rate;
if (f) {
// In legacy mode used and netto values are the same. But for new per-pool
// collection 'used' provides amount of space ALLOCATED at all related OSDs
// and 'netto' is amount of stored user data.
- uint64_t get_allocated_bytes(bool per_pool, bool per_pool_omap) const {
- uint64_t allocated_bytes;
+ uint64_t get_allocated_data_bytes(bool per_pool) const {
if (per_pool) {
- allocated_bytes = store_stats.allocated;
+ return store_stats.allocated;
} else {
// legacy mode, use numbers from 'stats'
- allocated_bytes = stats.sum.num_bytes +
- stats.sum.num_bytes_hit_set_archive;
+ return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive;
}
+ }
+ uint64_t get_allocated_omap_bytes(bool per_pool_omap) const {
if (per_pool_omap) {
- allocated_bytes += store_stats.omap_allocated;
+ return store_stats.omap_allocated;
} else {
- // omap is not broken out by pool by nautilus bluestore
- allocated_bytes += stats.sum.num_omap_bytes;
+ // omap is not broken out by pool by nautilus bluestore; report the
+ // scrub value. this will be imprecise in that it won't account for
+ // any storage overhead/efficiency.
+ return stats.sum.num_omap_bytes;
}
- return allocated_bytes;
}
- uint64_t get_user_bytes(float raw_used_rate, ///< space amp factor
- bool per_pool,
- bool per_pool_omap) const {
+ uint64_t get_user_data_bytes(float raw_used_rate, ///< space amp factor
+ bool per_pool) const {
// NOTE: we need the space amp factor so that we can work backwards from
// the raw utilization to the amount of data that the user actually stored.
- uint64_t user_bytes;
if (per_pool) {
- user_bytes = raw_used_rate ? store_stats.data_stored / raw_used_rate : 0;
+ return raw_used_rate ? store_stats.data_stored / raw_used_rate : 0;
} else {
// legacy mode, use numbers from 'stats'. note that we do NOT use the
// raw_used_rate factor here because we are working from the PG stats
// directly.
- user_bytes = stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive;
+ return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive;
}
+ }
+ uint64_t get_user_omap_bytes(float raw_used_rate, ///< space amp factor
+ bool per_pool_omap) const {
if (per_pool_omap) {
- user_bytes += store_stats.omap_allocated;
+ return raw_used_rate ? store_stats.omap_allocated / raw_used_rate : 0;
} else {
- // omap is not broken out by pool by nautilus bluestore
- user_bytes += stats.sum.num_omap_bytes;
+ // omap usage is lazily reported during scrub; this value may lag.
+ return stats.sum.num_omap_bytes;
}
- return user_bytes;
}
void dump(ceph::Formatter *f) const;