]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd/osd_admin: add osd command to dump store shards info
authorchunmei liu <chunmei.liu@ibm.com>
Thu, 24 Jul 2025 13:33:06 +0000 (06:33 -0700)
committerChunmei Liu <chunmei.liu@ibm.com>
Wed, 28 Jan 2026 00:23:23 +0000 (00:23 +0000)
Signed-off-by: chunmei liu <chunmei.liu@ibm.com>
src/crimson/admin/osd_admin.cc
src/crimson/admin/osd_admin.h
src/crimson/osd/osd.cc
src/crimson/osd/pg_map.cc
src/crimson/osd/pg_map.h
src/crimson/osd/shard_services.h

index b14336051e21c053dc7826dd82c34f69d9fc8424..7dc03ed103cb2d6b8d7b8b90320866606329fb2f 100644 (file)
@@ -618,4 +618,28 @@ private:
 template std::unique_ptr<AdminSocketHook>
 make_asok_hook<DumpRecoveryReservationsHook>(crimson::osd::ShardServices& shard_services);
 
+class StoreShardNumsHook : public AdminSocketHook {
+public:
+  explicit StoreShardNumsHook(crimson::osd::ShardServices& shard_services) :
+    AdminSocketHook{"dump_store_shards", "", "show store shards on each osd shard"},
+    shard_services(shard_services)
+  {}
+  seastar::future<tell_result_t> call(const cmdmap_t&,
+                                     std::string_view format,
+                                     ceph::bufferlist&& input) const final
+  {
+    LOG_PREFIX(AdminSocketHook::StoreShardNumsHook);
+    DEBUG("");
+    unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
+    f->open_object_section("Store shards");
+    co_await shard_services.dump_store_shards(f.get());
+    f->close_section();
+    co_return std::move(f);
+  }
+private:
+  crimson::osd::ShardServices& shard_services;
+};
+template std::unique_ptr<AdminSocketHook>
+make_asok_hook<StoreShardNumsHook>(crimson::osd::ShardServices& shard_services);
+
 } // namespace crimson::admin
index b33ea005fb8c0b64572bc634d09acf2410168d0f..bb877decf148dae02242f996d5a808fb7b4af366 100644 (file)
@@ -23,6 +23,7 @@ class DumpInFlightOpsHook;
 class DumpHistoricOpsHook;
 class DumpSlowestHistoricOpsHook;
 class DumpRecoveryReservationsHook;
+class StoreShardNumsHook;
 
 template<class Hook, class... Args>
 std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args);
index f0d1f2962b4b1a55d3f2fc9052d2f1d75208191d..7a81159254b1a6f12885fbbdbf6da19dd35f0a8d 100644 (file)
@@ -833,6 +833,8 @@ seastar::future<> OSD::start_asok_admin()
        std::as_const(get_shard_services().get_registry())));
     asok->register_command(
       make_asok_hook<DumpRecoveryReservationsHook>(get_shard_services()));
+    asok->register_command(
+      make_asok_hook<StoreShardNumsHook>(get_shard_services()));
   });
 }
 
index 5444dc54dc2fa9f6eb1188f2195deefef01fe7c2..01504d2cd6512c64a8d62bfcc672930a14308770 100644 (file)
@@ -12,6 +12,51 @@ using std::make_pair;
 
 namespace crimson::osd {
 
+seastar::future<> PGShardMapping::dump_store_shards(Formatter *f) const {
+  f->dump_int("this shard id", seastar::this_shard_id());
+  f->dump_int("osd shard nums", seastar::smp::count);
+  f->dump_int("store_shard_nums", store_shard_nums);
+
+  for (const auto &i : core_to_num_pgs) {
+    f->open_object_section("core_pgs");
+    f->dump_int("core", i.first);
+    f->dump_int("num_pgs", i.second);
+    f->close_section();
+  }
+
+  if (seastar::smp::count < store_shard_nums) {
+    for (auto i = core_shard_to_num_pgs.begin();
+         i != core_shard_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_store");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("store");
+        f->dump_int("store_index", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  if(seastar::smp::count > store_shard_nums) {
+    for (auto i = core_alien_to_num_pgs.begin();
+         i != core_alien_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_alien");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("alien_core");
+        f->dump_int("alien_core_id", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  return seastar::now();
+}
+
 seastar::future<std::pair<core_id_t, unsigned int>> PGShardMapping::get_or_create_pg_mapping(
   spg_t pgid,
   core_id_t core_expected,
index 7006f19874bb5dc2611eec3e11679c1f77d64125..c112f8acde994fcc1aa105a7bbd20ba8da1554e1 100644 (file)
@@ -45,6 +45,8 @@ public:
 
   size_t get_num_pgs() const { return pg_to_core.size(); }
 
+  seastar::future<> dump_store_shards(Formatter *f) const;
+
   /// Map to cores in [min_core_mapping, core_mapping_limit)
   PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit, unsigned int store_shard_nums)
     : store_shard_nums(store_shard_nums) {
index e3eaede3fae49f8da702dcfd96e0319294f1510e..ea93900e17fbed2aa3e919121c7558bb3a7bc96b 100644 (file)
@@ -517,6 +517,10 @@ public:
     return {get_reactor_utilization()};
   }
 
+  auto dump_store_shards(Formatter *f) const {
+    return pg_to_shard_mapping.dump_store_shards(f);
+  }
+
   auto create_split_pg_mapping(spg_t pgid, core_id_t core, unsigned int store_index) {
     return pg_to_shard_mapping.get_or_create_pg_mapping(pgid, core, store_index);
   }