}
int librados::RadosClient::get_pool_stats(std::list<string>& pools,
- map<string,::pool_stat_t>& result)
+ map<string,::pool_stat_t> *result,
+ bool *per_pool)
{
Mutex mylock("RadosClient::get_pool_stats::mylock");
Cond cond;
bool done;
int ret = 0;
- objecter->get_pool_stats(pools, &result, new C_SafeCond(&mylock, &cond, &done,
- &ret));
+ objecter->get_pool_stats(pools, result, per_pool,
+ new C_SafeCond(&mylock, &cond, &done,
+ &ret));
mylock.Lock();
while (!done)
bool wait_latest_map = false);
int pool_list(std::list<std::pair<int64_t, string> >& ls);
- int get_pool_stats(std::list<string>& ls, map<string,::pool_stat_t>& result);
+ int get_pool_stats(std::list<string>& ls, map<string,::pool_stat_t> *result,
+ bool *per_pool);
int get_fs_stats(ceph_statfs& result);
bool get_pool_is_selfmanaged_snaps_mode(const std::string& pool);
ls.push_back(pool_name);
map<string, ::pool_stat_t> rawresult;
- err = io_ctx_impl->client->get_pool_stats(ls, rawresult);
+ bool per_pool = false;
+ err = io_ctx_impl->client->get_pool_stats(ls, &rawresult, &per_pool);
if (err) {
tracepoint(librados, rados_ioctx_pool_stat_exit, err, stats);
return err;
}
::pool_stat_t& r = rawresult[pool_name];
- uint64_t allocated_bytes = r.get_allocated_bytes();
+ uint64_t allocated_bytes = r.get_allocated_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
- uint64_t user_bytes = r.get_user_bytes(1.0);
+ uint64_t user_bytes = r.get_user_bytes(1.0, per_pool);
stats->num_kb = shift_round_up(allocated_bytes, 10);
stats->num_bytes = allocated_bytes;
stats_map& result)
{
map<string,::pool_stat_t> rawresult;
- int r = client->get_pool_stats(v, rawresult);
+ bool per_pool = false;
+ int r = client->get_pool_stats(v, &rawresult, &per_pool);
for (map<string,::pool_stat_t>::iterator p = rawresult.begin();
p != rawresult.end();
++p) {
pool_stat_t& pv = result[p->first];
auto& pstat = p->second;
store_statfs_t &statfs = pstat.store_stats;
- uint64_t allocated_bytes = pstat.get_allocated_bytes();
+ uint64_t allocated_bytes = pstat.get_allocated_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
- uint64_t user_bytes = pstat.get_user_bytes(1.0);
+ uint64_t user_bytes = pstat.get_user_bytes(1.0, per_pool);
object_stat_sum_t *sum = &p->second.stats.sum;
pv.num_kb = shift_round_up(allocated_bytes, 10);
#define CEPH_MGETPOOLSTATSREPLY_H
class MGetPoolStatsReply : public PaxosServiceMessage {
+ static constexpr int HEAD_VERSION = 2;
+ static constexpr int COMPAT_VERSION = 1;
+
public:
uuid_d fsid;
std::map<std::string,pool_stat_t> pool_stats;
+ bool per_pool = false;
- MGetPoolStatsReply() : PaxosServiceMessage{MSG_GETPOOLSTATSREPLY, 0} {}
+ MGetPoolStatsReply() : PaxosServiceMessage{MSG_GETPOOLSTATSREPLY, 0,
+ HEAD_VERSION, COMPAT_VERSION} {}
MGetPoolStatsReply(uuid_d& f, ceph_tid_t t, version_t v) :
- PaxosServiceMessage{MSG_GETPOOLSTATSREPLY, v},
+ PaxosServiceMessage{MSG_GETPOOLSTATSREPLY, v,
+ HEAD_VERSION, COMPAT_VERSION},
fsid(f) {
set_tid(t);
}
public:
std::string_view get_type_name() const override { return "getpoolstats"; }
void print(std::ostream& out) const override {
- out << "getpoolstatsreply(" << get_tid() << " v" << version << ")";
+ out << "getpoolstatsreply(" << get_tid();
+ if (per_pool)
+ out << " per_pool";
+ out << " v" << version << ")";
}
void encode_payload(uint64_t features) override {
paxos_encode();
encode(fsid, payload);
encode(pool_stats, payload, features);
+ encode(per_pool, payload);
}
void decode_payload() override {
using ceph::decode;
paxos_decode(p);
decode(fsid, p);
decode(pool_stats, p);
+ if (header.version >= 2) {
+ decode(per_pool, p);
+ } else {
+ per_pool = false;
+ }
}
private:
template<class T, typename... Args>
}
epoch_t ver = get_last_committed();
auto reply = new MGetPoolStatsReply(m->fsid, m->get_tid(), ver);
+ reply->per_pool = digest.use_per_pool_stats();
for (const auto& pool_name : m->pools) {
const auto pool_id = mon->osdmon()->osdmap.lookup_pg_pool_name(pool_name);
if (pool_id == -ENOENT)
<< pool_id;
}
float raw_used_rate = osd_map.pool_raw_used_rate(pool_id);
- dump_object_stat_sum(tbl, f, stat, avail, raw_used_rate, verbose, pool);
+ bool per_pool = use_per_pool_stats();
+ dump_object_stat_sum(tbl, f, stat, avail, raw_used_rate, verbose, per_pool,
+ pool);
if (f) {
f->close_section(); // stats
f->close_section(); // pool
f->dump_int("total_used_bytes", osd_sum.statfs.get_used());
f->dump_int("total_used_raw_bytes", osd_sum.statfs.get_used_raw());
f->dump_float("total_used_raw_ratio", osd_sum.statfs.get_used_raw_ratio());
+ f->dump_unsigned("num_osds", osd_sum.num_osds);
+ f->dump_unsigned("num_per_pool_osds", osd_sum.num_per_pool_osds);
f->close_section();
f->open_object_section("stats_by_class");
for (auto& i : osd_sum_by_class) {
void PGMapDigest::dump_object_stat_sum(
TextTable &tbl, ceph::Formatter *f,
const pool_stat_t &pool_stat, uint64_t avail,
- float raw_used_rate, bool verbose,
+ float raw_used_rate, bool verbose, bool per_pool,
const pg_pool_t *pool)
{
const object_stat_sum_t &sum = pool_stat.stats.sum;
if (sum.num_object_copies > 0) {
raw_used_rate *= (float)(sum.num_object_copies - sum.num_objects_degraded) / sum.num_object_copies;
}
-
- uint64_t used_bytes = pool_stat.get_allocated_bytes();
+
+ uint64_t used_bytes = pool_stat.get_allocated_bytes(per_pool);
float used = 0.0;
// note avail passed in is raw_avail, calc raw_used here.
}
auto avail_res = raw_used_rate ? avail / raw_used_rate : 0;
// an approximation for actually stored user data
- auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate);
+ auto stored_normalized = pool_stat.get_user_bytes(raw_used_rate, per_pool);
if (f) {
f->dump_int("stored", stored_normalized);
f->dump_int("objects", sum.num_objects);
f->dump_int("compress_bytes_used", statfs.data_compressed_allocated);
f->dump_int("compress_under_bytes", statfs.data_compressed_original);
// Stored by user amplified by replication
- f->dump_int("stored_raw", pool_stat.get_user_bytes(1.0));
+ f->dump_int("stored_raw", pool_stat.get_user_bytes(1.0, per_pool));
}
} else {
tbl << stringify(byte_u_t(stored_normalized));
mempool::pgmap::map<int64_t,interval_set<snapid_t>> purged_snaps;
+ bool use_per_pool_stats() const {
+ return osd_sum.num_osds == osd_sum.num_per_pool_osds;
+ }
+
// recent deltas, and summation
/**
* keep track of last deltas for each pool, calculated using
const pool_stat_t &pool_stat,
uint64_t avail,
float raw_used_rate,
- bool verbose, const pg_pool_t *pool);
+ bool verbose,
+ bool per_pool,
+ const pg_pool_t *pool);
size_t get_num_pg_by_osd(int osd) const {
auto p = num_pg_by_osd.find(osd);
// In legacy mode used and netto values are the same. But for new per-pool
// collection 'used' provides amount of space ALLOCATED at all related OSDs
// and 'netto' is amount of stored user data.
- uint64_t get_allocated_bytes() const {
+ uint64_t get_allocated_bytes(bool per_pool) const {
uint64_t allocated_bytes;
- if (num_store_stats) {
+ if (per_pool) {
allocated_bytes = store_stats.allocated;
} else {
// legacy mode, use numbers from 'stats'
allocated_bytes += stats.sum.num_omap_bytes;
return allocated_bytes;
}
- uint64_t get_user_bytes(float raw_used_rate) const {
+ uint64_t get_user_bytes(float raw_used_rate, bool per_pool) const {
uint64_t user_bytes;
- if (num_store_stats) {
+ if (per_pool) {
user_bytes = raw_used_rate ? store_stats.data_stored / raw_used_rate : 0;
} else {
// legacy mode, use numbers from 'stats'
void Objecter::get_pool_stats(list<string>& pools,
map<string,pool_stat_t> *result,
+ bool *per_pool,
Context *onfinish)
{
ldout(cct, 10) << "get_pool_stats " << pools << dendl;
op->tid = ++last_tid;
op->pools = pools;
op->pool_stats = result;
+ op->per_pool = per_pool;
op->onfinish = onfinish;
if (mon_timeout > timespan(0)) {
op->ontimeout = timer.add_event(mon_timeout,
PoolStatOp *op = poolstat_ops[tid];
ldout(cct, 10) << "have request " << tid << " at " << op << dendl;
*op->pool_stats = m->pool_stats;
+ *op->per_pool = m->per_pool;
if (m->version > last_seen_pgmap_version) {
last_seen_pgmap_version = m->version;
}
std::list<std::string> pools;
std::map<std::string,pool_stat_t> *pool_stats;
+ bool *per_pool;
Context *onfinish;
uint64_t ontimeout;
void _poolstat_submit(PoolStatOp *op);
public:
void handle_get_pool_stats_reply(MGetPoolStatsReply *m);
- void get_pool_stats(std::list<std::string>& pools, std::map<std::string,pool_stat_t> *result,
+ void get_pool_stats(std::list<std::string>& pools,
+ std::map<std::string,pool_stat_t> *result,
+ bool *per_pool,
Context *onfinish);
int pool_stat_op_cancel(ceph_tid_t tid, int r);
void _finish_pool_stat_op(PoolStatOp *op, int r);
pool.size = 2;
pool.type = pg_pool_t::TYPE_REPLICATED;
PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail,
- pool.get_size(), verbose, &pool);
+ pool.get_size(), verbose, true, &pool);
float copies_rate =
(static_cast<float>(sum.num_object_copies - sum.num_objects_degraded) /
sum.num_object_copies) * pool.get_size();
pool.size = 2;
pool.type = pg_pool_t::TYPE_REPLICATED;
PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail,
- pool.get_size(), verbose, &pool);
+ pool.get_size(), verbose, true, &pool);
unsigned col = 0;
ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++));
ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++));
pool.type = pg_pool_t::TYPE_REPLICATED;
PGMap::dump_object_stat_sum(tbl, nullptr, pool_stat, avail,
- pool.get_size(), verbose, &pool);
+ pool.get_size(), verbose, true, &pool);
unsigned col = 0;
ASSERT_EQ(stringify(byte_u_t(0)), tbl.get(0, col++));
ASSERT_EQ(stringify(si_u_t(0)), tbl.get(0, col++));