]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd/osd_admin: add osd command to dump store
authorchunmei liu <chunmei.liu@ibm.com>
Thu, 19 Feb 2026 23:02:43 +0000 (15:02 -0800)
committerchunmei liu <chunmei.liu@ibm.com>
Thu, 19 Feb 2026 23:31:02 +0000 (15:31 -0800)
 shards info

Signed-off-by: chunmei liu <chunmei.liu@ibm.com>
src/crimson/admin/osd_admin.cc
src/crimson/admin/osd_admin.h
src/crimson/osd/osd.cc
src/crimson/osd/pg_map.cc
src/crimson/osd/pg_map.h
src/crimson/osd/shard_services.h

index b36cdd7b0a1273059d6dbe777b1b78ed837a58c3..3828c5251759739a5bb86f7d4483e4df41b4ff72 100644 (file)
@@ -618,4 +618,28 @@ private:
 template std::unique_ptr<AdminSocketHook>
 make_asok_hook<DumpRecoveryReservationsHook>(crimson::osd::ShardServices& shard_services);
 
+class StoreShardNumsHook : public AdminSocketHook {
+public:
+  explicit StoreShardNumsHook(crimson::osd::ShardServices& shard_services) :
+    AdminSocketHook{"dump_store_shards", "", "show store shards on each osd shard"},
+    shard_services(shard_services)
+  {}
+  seastar::future<tell_result_t> call(const cmdmap_t&,
+                                     std::string_view format,
+                                     ceph::bufferlist&& input) const final
+  {
+    LOG_PREFIX(AdminSocketHook::StoreShardNumsHook);
+    DEBUG("");
+    unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
+    f->open_object_section("Store shards");
+    co_await shard_services.dump_store_shards(f.get());
+    f->close_section();
+    co_return std::move(f);
+  }
+private:
+  crimson::osd::ShardServices& shard_services;
+};
+template std::unique_ptr<AdminSocketHook>
+make_asok_hook<StoreShardNumsHook>(crimson::osd::ShardServices& shard_services);
+
 } // namespace crimson::admin
index b33ea005fb8c0b64572bc634d09acf2410168d0f..bb877decf148dae02242f996d5a808fb7b4af366 100644 (file)
@@ -23,6 +23,7 @@ class DumpInFlightOpsHook;
 class DumpHistoricOpsHook;
 class DumpSlowestHistoricOpsHook;
 class DumpRecoveryReservationsHook;
+class StoreShardNumsHook;
 
 template<class Hook, class... Args>
 std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args);
index e03d7c634da1b22a899f08ca1e9705bf9e0c7e41..0467afdae3d9df86f33b49ef2f11607d155b0e5e 100644 (file)
@@ -841,6 +841,8 @@ seastar::future<> OSD::start_asok_admin()
        std::as_const(get_shard_services().get_registry())));
     asok->register_command(
       make_asok_hook<DumpRecoveryReservationsHook>(get_shard_services()));
+    asok->register_command(
+      make_asok_hook<StoreShardNumsHook>(get_shard_services()));
   });
 }
 
index 7dd8d1cc10a26c4a9793c84eb25af0c636132217..21aa88751ac0f375789be068319f9c651c49daf6 100644 (file)
@@ -12,6 +12,51 @@ using std::make_pair;
 
 namespace crimson::osd {
 
+seastar::future<> PGShardMapping::dump_store_shards(Formatter *f) const {
+  f->dump_int("this shard id", seastar::this_shard_id());
+  f->dump_int("osd shard nums", seastar::smp::count);
+  f->dump_int("store_shard_nums", store_shard_nums);
+
+  for (const auto &i : core_to_num_pgs) {
+    f->open_object_section("core_pgs");
+    f->dump_int("core", i.first);
+    f->dump_int("num_pgs", i.second);
+    f->close_section();
+  }
+
+  if (seastar::smp::count < store_shard_nums) {
+    for (auto i = core_shard_to_num_pgs.begin();
+         i != core_shard_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_store");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("store");
+        f->dump_int("store_index", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  if(seastar::smp::count > store_shard_nums) {
+    for (auto i = core_alien_to_num_pgs.begin();
+         i != core_alien_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_alien");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("alien_core");
+        f->dump_int("alien_core_id", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  return seastar::now();
+}
+
 seastar::future<std::pair<core_id_t, unsigned int>> PGShardMapping::get_or_create_pg_mapping(
   spg_t pgid,
   core_id_t core_expected,
index cebdf86bee62f5081111556b47a14f5cc20d659b..a9e83a708947f00e2825daab0dd37c6a9b9a8856 100644 (file)
@@ -45,6 +45,8 @@ public:
 
   size_t get_num_pgs() const { return pg_to_core.size(); }
 
+  seastar::future<> dump_store_shards(Formatter *f) const;
+
   /// Map to cores in [min_core_mapping, core_mapping_limit)
   PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit, uint32_t store_shard_nums)
     : store_shard_nums(store_shard_nums) {
index bb60fd97c9086c3afe01428e18c63c234f6b6621..1a38d10a85f7fcb69b351f092b0ad8d424c5fe11 100644 (file)
@@ -498,6 +498,10 @@ public:
     return {get_reactor_utilization()};
   }
 
+  auto dump_store_shards(Formatter *f) const {
+    return pg_to_shard_mapping.dump_store_shards(f);
+  }
+
   auto create_split_pg_mapping(spg_t pgid, core_id_t core, uint32_t store_index) {
     return pg_to_shard_mapping.get_or_create_pg_mapping(pgid, core, store_index);
   }