f->dump_unsigned("num_pgs", num_pgs);
f->dump_unsigned("kb", kb);
f->dump_unsigned("kb_used", kb_used);
+ f->dump_unsigned("kb_used_data", kb_used_data);
+ f->dump_unsigned("kb_used_omap", kb_used_omap);
+ f->dump_unsigned("kb_used_meta", kb_used_meta);
f->dump_unsigned("kb_avail", kb_avail);
f->open_array_section("hb_peers");
for (auto p : hb_peers)
void osd_stat_t::encode(bufferlist &bl, uint64_t features) const
{
- ENCODE_START(7, 2, bl);
+ ENCODE_START(8, 2, bl);
encode(kb, bl);
encode(kb_used, bl);
encode(kb_avail, bl);
encode(up_from, bl);
encode(seq, bl);
encode(num_pgs, bl);
+ encode(kb_used_data, bl);
+ encode(kb_used_omap, bl);
+ encode(kb_used_meta, bl);
ENCODE_FINISH(bl);
}
void osd_stat_t::decode(bufferlist::iterator &bl)
{
- DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl);
+ DECODE_START_LEGACY_COMPAT_LEN(8, 2, 2, bl);
decode(kb, bl);
decode(kb_used, bl);
decode(kb_avail, bl);
if (struct_v >= 7) {
decode(num_pgs, bl);
}
+ if (struct_v >= 8) {
+ decode(kb_used_data, bl);
+ decode(kb_used_omap, bl);
+ decode(kb_used_meta, bl);
+ } else {
+ kb_used_data = kb_used;
+ kb_used_omap = 0;
+ kb_used_meta = 0;
+ }
DECODE_FINISH(bl);
}
o.push_back(new osd_stat_t);
o.push_back(new osd_stat_t);
- o.back()->kb = 1;
- o.back()->kb_used = 2;
+ o.back()->kb = 9;
+ o.back()->kb_used = 6;
+ o.back()->kb_used_data = 3;
+ o.back()->kb_used_omap = 2;
+ o.back()->kb_used_meta = 1;
o.back()->kb_avail = 3;
o.back()->hb_peers.push_back(7);
o.back()->snap_trim_queue_len = 8;
return total == other.total
&& available == other.available
&& allocated == other.allocated
- && stored == other.stored
- && compressed == other.compressed
- && compressed_allocated == other.compressed_allocated
- && compressed_original == other.compressed_original;
+ && data_stored == other.data_stored
+ && data_compressed == other.data_compressed
+ && data_compressed_allocated == other.data_compressed_allocated
+ && data_compressed_original == other.data_compressed_original
+ && omap_allocated == other.omap_allocated
+ && internal_metadata == other.internal_metadata;
}
void store_statfs_t::dump(Formatter *f) const
f->dump_int("total", total);
f->dump_int("available", available);
f->dump_int("allocated", allocated);
- f->dump_int("stored", stored);
- f->dump_int("compressed", compressed);
- f->dump_int("compressed_allocated", compressed_allocated);
- f->dump_int("compressed_original", compressed_original);
+ f->dump_int("data_stored", data_stored);
+ f->dump_int("data_compressed", data_compressed);
+ f->dump_int("data_compressed_allocated", data_compressed_allocated);
+ f->dump_int("data_compressed_original", data_compressed_original);
+ f->dump_int("omap_allocated", omap_allocated);
+ f->dump_int("internal_metadata", internal_metadata);
}
ostream& operator<<(ostream& out, const store_statfs_t &s)
out << std::hex
<< "store_statfs(0x" << s.available
<< "/0x" << s.total
- << ", stored 0x" << s.stored
+ << ", data 0x" << s.data_stored
<< "/0x" << s.allocated
- << ", compress 0x" << s.compressed
- << "/0x" << s.compressed_allocated
- << "/0x" << s.compressed_original
+ << ", compress 0x" << s.data_compressed
+ << "/0x" << s.data_compressed_allocated
+ << "/0x" << s.data_compressed_original
+ << ", omap 0x" << s.omap_allocated
+ << ", meta 0x" << s.internal_metadata
<< std::dec
<< ")";
return out;
* aggregate stats for an osd
*/
struct osd_stat_t {
- int64_t kb, kb_used, kb_avail;
+ int64_t kb = 0; ///< total device size
+ int64_t kb_used = 0; ///< total used
+ int64_t kb_used_data = 0; ///< total used by object data
+ int64_t kb_used_omap = 0; ///< total used by omap data
+ int64_t kb_used_meta = 0; ///< total used by internal metadata
+ int64_t kb_avail = 0; ///< total available/free
+
vector<int> hb_peers;
- int32_t snap_trim_queue_len, num_snap_trimming;
+ int32_t snap_trim_queue_len = 0, num_snap_trimming = 0;
pow2_hist_t op_queue_age_hist;
uint32_t num_pgs = 0;
- osd_stat_t() : kb(0), kb_used(0), kb_avail(0),
- snap_trim_queue_len(0), num_snap_trimming(0) {}
-
void add(const osd_stat_t& o) {
kb += o.kb;
kb_used += o.kb_used;
+ kb_used_data += o.kb_used_data;
+ kb_used_omap += o.kb_used_omap;
+ kb_used_meta += o.kb_used_meta;
kb_avail += o.kb_avail;
snap_trim_queue_len += o.snap_trim_queue_len;
num_snap_trimming += o.num_snap_trimming;
void sub(const osd_stat_t& o) {
kb -= o.kb;
kb_used -= o.kb_used;
+ kb_used_data -= o.kb_used_data;
+ kb_used_omap -= o.kb_used_omap;
+ kb_used_meta -= o.kb_used_meta;
kb_avail -= o.kb_avail;
snap_trim_queue_len -= o.snap_trim_queue_len;
num_snap_trimming -= o.num_snap_trimming;
inline bool operator==(const osd_stat_t& l, const osd_stat_t& r) {
return l.kb == r.kb &&
l.kb_used == r.kb_used &&
+ l.kb_used_data == r.kb_used_data &&
+ l.kb_used_omap == r.kb_used_omap &&
+ l.kb_used_meta == r.kb_used_meta &&
l.kb_avail == r.kb_avail &&
l.snap_trim_queue_len == r.snap_trim_queue_len &&
l.num_snap_trimming == r.num_snap_trimming &&
return !(l == r);
}
-
-
inline ostream& operator<<(ostream& out, const osd_stat_t& s) {
- return out << "osd_stat(" << byte_u_t(s.kb_used << 10) << " used, "
+ return out << "osd_stat("
+ << byte_u_t(s.kb_used << 10) << " used ("
+ << byte_u_t(s.kb_used_data << 10) << " data, "
+ << byte_u_t(s.kb_used_omap << 10) << " omap, "
+ << byte_u_t(s.kb_used_meta << 10) << " meta), "
<< byte_u_t(s.kb_avail << 10) << " avail, "
<< byte_u_t(s.kb << 10) << " total, "
<< "peers " << s.hb_peers
*/
struct store_statfs_t
{
- uint64_t total = 0; // Total bytes
- uint64_t available = 0; // Free bytes available
-
- int64_t allocated = 0; // Bytes allocated by the store
- int64_t stored = 0; // Bytes actually stored by the user
- int64_t compressed = 0; // Bytes stored after compression
- int64_t compressed_allocated = 0; // Bytes allocated for compressed data
- int64_t compressed_original = 0; // Bytes that were successfully compressed
+ uint64_t total = 0; ///< Total bytes
+ uint64_t available = 0; ///< Free bytes available
+
+ int64_t allocated = 0; ///< Bytes allocated by the store
+
+ int64_t data_stored = 0; ///< Bytes actually stored by the user
+ int64_t data_compressed = 0; ///< Bytes stored after compression
+ int64_t data_compressed_allocated = 0; ///< Bytes allocated for compressed data
+ int64_t data_compressed_original = 0; ///< Bytes that were compressed
+
+ int64_t omap_allocated = 0; ///< approx usage of omap data
+ int64_t internal_metadata = 0; ///< approx usage of internal metadata
void reset() {
*this = store_statfs_t();
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.stored, (unsigned)data.size());
- ASSERT_LE(statfs.compressed, (unsigned)data.size());
- ASSERT_EQ(statfs.compressed_original, (unsigned)data.size());
- ASSERT_LE(statfs.compressed_allocated, (unsigned)data.size());
+ ASSERT_EQ(statfs.data_stored, (unsigned)data.size());
+ ASSERT_LE(statfs.data_compressed, (unsigned)data.size());
+ ASSERT_EQ(statfs.data_compressed_original, (unsigned)data.size());
+ ASSERT_LE(statfs.data_compressed_allocated, (unsigned)data.size());
}
}
std::string data2;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
- ASSERT_EQ( 0u, statfs.stored);
+ ASSERT_EQ( 0u, statfs.data_stored);
ASSERT_EQ(g_conf->bluestore_block_size, statfs.total);
ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf->bluestore_block_size);
//force fsck
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(5, statfs.stored);
+ ASSERT_EQ(5, statfs.data_stored);
ASSERT_EQ(0x10000, statfs.allocated);
- ASSERT_EQ(0, statfs.compressed);
- ASSERT_EQ(0, statfs.compressed_original);
- ASSERT_EQ(0, statfs.compressed_allocated);
+ ASSERT_EQ(0, statfs.data_compressed);
+ ASSERT_EQ(0, statfs.data_compressed_original);
+ ASSERT_EQ(0, statfs.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x30005, statfs.stored);
+ ASSERT_EQ(0x30005, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
- ASSERT_LE(statfs.compressed, 0x10000);
- ASSERT_EQ(0x20000, statfs.compressed_original);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_LE(statfs.data_compressed, 0x10000);
+ ASSERT_EQ(0x20000, statfs.data_compressed_original);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x30005 - 3 - 9, statfs.stored);
+ ASSERT_EQ(0x30005 - 3 - 9, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
- ASSERT_LE(statfs.compressed, 0x10000);
- ASSERT_EQ(0x20000 - 9, statfs.compressed_original);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_LE(statfs.data_compressed, 0x10000);
+ ASSERT_EQ(0x20000 - 9, statfs.data_compressed_original);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x30001 - 9 + 0x1000, statfs.stored);
+ ASSERT_EQ(0x30001 - 9 + 0x1000, statfs.data_stored);
ASSERT_EQ(0x40000, statfs.allocated);
- ASSERT_LE(statfs.compressed, 0x10000);
- ASSERT_EQ(0x20000 - 9 - 0x1000, statfs.compressed_original);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_LE(statfs.data_compressed, 0x10000);
+ ASSERT_EQ(0x20000 - 9 - 0x1000, statfs.data_compressed_original);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x30000 + 0x1001, statfs.stored);
+ ASSERT_EQ(0x30000 + 0x1001, statfs.data_stored);
ASSERT_EQ(0x40000, statfs.allocated);
- ASSERT_LE(statfs.compressed, 0);
- ASSERT_EQ(0, statfs.compressed_original);
- ASSERT_EQ(0, statfs.compressed_allocated);
+ ASSERT_LE(statfs.data_compressed, 0);
+ ASSERT_EQ(0, statfs.data_compressed_original);
+ ASSERT_EQ(0, statfs.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0u, statfs.allocated);
- ASSERT_EQ(0u, statfs.stored);
- ASSERT_EQ(0u, statfs.compressed_original);
- ASSERT_EQ(0u, statfs.compressed);
- ASSERT_EQ(0u, statfs.compressed_allocated);
+ ASSERT_EQ(0u, statfs.data_stored);
+ ASSERT_EQ(0u, statfs.data_compressed_original);
+ ASSERT_EQ(0u, statfs.data_compressed);
+ ASSERT_EQ(0u, statfs.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs;
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x40000 - 2, statfs.stored);
+ ASSERT_EQ(0x40000 - 2, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
- ASSERT_LE(statfs.compressed, 0x10000);
- ASSERT_EQ(0x20000, statfs.compressed_original);
- ASSERT_EQ(0x10000, statfs.compressed_allocated);
+ ASSERT_LE(statfs.data_compressed, 0x10000);
+ ASSERT_EQ(0x20000, statfs.data_compressed_original);
+ ASSERT_EQ(0x10000, statfs.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
struct store_statfs_t statfs2;
r = store->statfs(&statfs2);
ASSERT_EQ(r, 0);
- ASSERT_GT(statfs2.stored, statfs.stored);
+ ASSERT_GT(statfs2.data_stored, statfs.data_stored);
ASSERT_EQ(statfs2.allocated, statfs.allocated);
- ASSERT_GT(statfs2.compressed, statfs.compressed);
- ASSERT_GT(statfs2.compressed_original, statfs.compressed_original);
- ASSERT_EQ(statfs2.compressed_allocated, statfs.compressed_allocated);
+ ASSERT_GT(statfs2.data_compressed, statfs.data_compressed);
+ ASSERT_GT(statfs2.data_compressed_original, statfs.data_compressed_original);
+ ASSERT_EQ(statfs2.data_compressed_allocated, statfs.data_compressed_allocated);
}
{
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
- ASSERT_EQ( 0u, statfs.stored);
- ASSERT_EQ( 0u, statfs.compressed_original);
- ASSERT_EQ( 0u, statfs.compressed);
- ASSERT_EQ( 0u, statfs.compressed_allocated);
+ ASSERT_EQ( 0u, statfs.data_stored);
+ ASSERT_EQ( 0u, statfs.data_compressed_original);
+ ASSERT_EQ( 0u, statfs.data_compressed);
+ ASSERT_EQ( 0u, statfs.data_compressed_allocated);
}
}
ASSERT_EQ(r, 0);
ASSERT_EQ(g_conf->bluestore_block_size, statfs.total);
ASSERT_EQ(0u, statfs.allocated);
- ASSERT_EQ(0u, statfs.stored);
+ ASSERT_EQ(0u, statfs.data_stored);
ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf->bluestore_block_size);
}
std::string data;
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(0x20000, statfs.stored);
+ ASSERT_EQ(0x20000, statfs.data_stored);
ASSERT_EQ(0x20000, statfs.allocated);
r = store->read(ch, hoid, 0, data.size(), newdata);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x20000, statfs.allocated);
- ASSERT_EQ(0x20000, statfs.stored);
+ ASSERT_EQ(0x20000, statfs.data_stored);
r = store->read(ch, hoid, 0x20000-1, 21, newdata);
ASSERT_EQ(r, (int)21);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30000, statfs.allocated);
- ASSERT_EQ(0x20003, statfs.stored);
+ ASSERT_EQ(0x20003, statfs.data_stored);
r = store->read(ch, hoid, 0x10000-1, 0x10000+22, newdata);
ASSERT_EQ(r, (int)0x10000+22);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x10000, statfs.allocated);
- ASSERT_EQ(0x10000, statfs.stored);
+ ASSERT_EQ(0x10000, statfs.data_stored);
r = store->read(ch, hoid, 0, 0x30000, newdata);
ASSERT_EQ(r, (int)0x30000);
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
- ASSERT_EQ( 0u, statfs.stored);
- ASSERT_EQ( 0u, statfs.compressed_original);
- ASSERT_EQ( 0u, statfs.compressed);
- ASSERT_EQ( 0u, statfs.compressed_allocated);
+ ASSERT_EQ( 0u, statfs.data_stored);
+ ASSERT_EQ( 0u, statfs.data_compressed_original);
+ ASSERT_EQ( 0u, statfs.data_compressed);
+ ASSERT_EQ( 0u, statfs.data_compressed_allocated);
}
}
#endif
}
test_obj.wait_for_done();
test_obj.statfs(res_stat);
- if (!(res_stat.stored <= max_object_size) ||
+ if (!(res_stat.data_stored <= max_object_size) ||
!(res_stat.allocated <= max_object_size)) {
// this will provide more insight on the mismatch and
// helps to avoid any races during stats collection
test_obj.fsck(false);
// retrieving stats once again and assert if still broken
test_obj.statfs(res_stat);
- ASSERT_LE(res_stat.stored, max_object_size);
+ ASSERT_LE(res_stat.data_stored, max_object_size);
ASSERT_LE(res_stat.allocated, max_object_size);
}
test_obj.shutdown();
WRITE_AT(0, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset - 2 * overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x20000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0u);
}
WRITE_AT(write_offset - overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x20000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x10000u);
}
WRITE_AT(write_offset - 3 * overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x20000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u);
}
WRITE_AT(write_offset + 1, overlap_offset-1);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x20000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u);
}
WRITE_AT(write_offset + 1, overlap_offset);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x3ffffu);
}
WRITE_AT(0, buf_len-1);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u);
}
WRITE_AT(overlap_offset * 2 + 1, overlap_offset-2);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x10000);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u);
}
WRITE_AT(overlap_offset + 1, overlap_offset-2);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
- ASSERT_EQ(statfs.compressed_allocated, 0x0);
+ ASSERT_EQ(statfs.data_compressed_allocated, 0x0);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40007u);
}
ASSERT_EQ(bstore->statfs(&statfs0), 0);
statfs = statfs0;
statfs.allocated += 0x10000;
- statfs.stored += 0x10000;
+ statfs.data_stored += 0x10000;
ASSERT_FALSE(statfs0 == statfs);
bstore->inject_statfs(statfs);
bstore->umount();