This allows to view RocksDB sharding information via both admin socket and osd metadata commands.
Fixes: https://tracker.ceph.com/issues/71609
Signed-off-by: Garry Drankovich <garry.drankovich@clyso.com>
this,
"print compression stats, per collection");
ceph_assert(r == 0);
+ r = admin_socket->register_command(
+ "bluestore show sharding ",
+ this,
+ "print RocksDB sharding");
+ ceph_assert(r == 0);
}
}
}
f->close_section();
return 0;
+ } else if (command == "bluestore show sharding") {
+ int r = 0;
+ std::string sharding;
+ if (store.get_db_sharding(sharding)) {
+ out.append(sharding + '\n');
+ } else {
+ r = -EFAULT;
+ ss << "Failed to get sharding" << std::endl;
+ }
+ return r;
} else {
ss << "Invalid command" << std::endl;
r = -ENOSYS;
return r;
}
+bool BlueStore::get_db_sharding(std::string& res_sharding)
+{
+ bool ret = false;
+ RocksDBStore* rdb = dynamic_cast<RocksDBStore*>(db);
+ if (db) {
+ ret = rdb->get_sharding(res_sharding);
+ }
+ return ret;
+}
+
int BlueStore::dump_bluefs_sizes(ostream& out)
{
int r = _open_db_and_around(true);
(*pm)["bluestore_allocator"] = alloc ? alloc->get_type() : "null";
(*pm)["bluestore_write_mode"] = use_write_v2 ? "new" : "classic";
(*pm)["bluestore_onode_segmentation"] = segment_size == 0 ? "inactive" : "active";
+ std::string sharding;
+ if (get_db_sharding(sharding)) {
+ (*pm)["bluestore_db_sharding"] = sharding;
+ }
}
int BlueStore::get_numa_node(
int expand_devices(std::ostream& out);
std::string get_device_path(unsigned id);
+ bool get_db_sharding(std::string& res_sharding);
+
int dump_bluefs_sizes(std::ostream& out);
void trim_free_space(const std::string& type, std::ostream& outss);
static int zap_device(CephContext* cct, const std::string& dev);
+
public:
int statfs(struct store_statfs_t *buf,
osd_alert_list_t* alerts = nullptr) override;