]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd/osd_admin: add osd command to dump store shards info
authorchunmei liu <chunmei.liu@ibm.com>
Mon, 9 Mar 2026 22:51:46 +0000 (15:51 -0700)
committerchunmei liu <chunmei.liu@ibm.com>
Thu, 12 Mar 2026 19:10:09 +0000 (12:10 -0700)
Signed-off-by: chunmei liu <chunmei.liu@ibm.com>
src/crimson/admin/osd_admin.cc
src/crimson/admin/osd_admin.h
src/crimson/osd/osd.cc
src/crimson/osd/pg_map.cc
src/crimson/osd/pg_map.h
src/crimson/osd/shard_services.h

index d9f8daa5b9c573be28d89d7b94c371b583684a87..e69edaae15083460aa415da3de7b65259a59fdda 100644 (file)
@@ -640,4 +640,28 @@ public:
 };
 template std::unique_ptr<AdminSocketHook> make_asok_hook<DumpReactorBackendHook>();
 
+class StoreShardNumsHook : public AdminSocketHook {
+public:
+  explicit StoreShardNumsHook(crimson::osd::ShardServices& shard_services) :
+    AdminSocketHook{"dump_store_shards", "", "show store shards on each osd shard"},
+    shard_services(shard_services)
+  {}
+  seastar::future<tell_result_t> call(const cmdmap_t&,
+                                     std::string_view format,
+                                     ceph::bufferlist&& input) const final
+  {
+    LOG_PREFIX(AdminSocketHook::StoreShardNumsHook);
+    DEBUG("");
+    unique_ptr<Formatter> f{Formatter::create(format, "json-pretty", "json-pretty")};
+    f->open_object_section("Store shards");
+    co_await shard_services.dump_store_shards(f.get());
+    f->close_section();
+    co_return std::move(f);
+  }
+private:
+  crimson::osd::ShardServices& shard_services;
+};
+template std::unique_ptr<AdminSocketHook>
+make_asok_hook<StoreShardNumsHook>(crimson::osd::ShardServices& shard_services);
+
 } // namespace crimson::admin
index 2a5d47c57f8baaec174ae12ad00590c5d114f0c4..334110e575ad16d830279f7c6a641b9751cc1981 100644 (file)
@@ -24,7 +24,7 @@ class DumpHistoricOpsHook;
 class DumpSlowestHistoricOpsHook;
 class DumpRecoveryReservationsHook;
 class DumpReactorBackendHook;
-
+class StoreShardNumsHook;
 
 template<class Hook, class... Args>
 std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args);
index afda91077ea119163f76126ef34907ed01b613c0..6dae3ec9d954a4bcd03f3f906124761290ef694b 100644 (file)
@@ -843,6 +843,8 @@ seastar::future<> OSD::start_asok_admin()
       make_asok_hook<DumpRecoveryReservationsHook>(get_shard_services()));
     asok->register_command(
       make_asok_hook<DumpReactorBackendHook>());
+    asok->register_command(
+      make_asok_hook<StoreShardNumsHook>(get_shard_services()));
   });
 }
 
index 10b4a0dcea770d3be4088c655a4147b5316c637b..a663234670778e50d94511ad0ca1428194e2f71e 100644 (file)
@@ -12,6 +12,51 @@ using std::make_pair;
 
 namespace crimson::osd {
 
+seastar::future<> PGShardMapping::dump_store_shards(Formatter *f) const {
+  f->dump_int("this shard id", seastar::this_shard_id());
+  f->dump_int("osd shard nums", seastar::smp::count);
+  f->dump_int("store_shard_nums", store_shard_nums);
+
+  for (const auto &i : core_to_num_pgs) {
+    f->open_object_section("core_pgs");
+    f->dump_int("core", i.first);
+    f->dump_int("num_pgs", i.second);
+    f->close_section();
+  }
+
+  if (seastar::smp::count < store_shard_nums) {
+    for (auto i = core_shard_to_num_pgs.begin();
+         i != core_shard_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_store");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("store");
+        f->dump_int("store_index", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  if(seastar::smp::count > store_shard_nums) {
+    for (auto i = core_alien_to_num_pgs.begin();
+         i != core_alien_to_num_pgs.end(); ++i) {
+      f->open_object_section("core_alien");
+      f->dump_int("core", i->first);
+      for (auto j = i->second.begin();
+           j != i->second.end(); ++j) {
+        f->open_object_section("alien_core");
+        f->dump_int("alien_core_id", j->first);
+        f->dump_int("num_pgs", j->second);
+        f->close_section();
+      }
+      f->close_section();
+    }
+  }
+  return seastar::now();
+}
+
 seastar::future<std::pair<core_id_t, store_index_t>> PGShardMapping::get_or_create_pg_mapping(
   spg_t pgid,
   core_id_t core_expected,
index 8ab92fccdacbcd2bd7bb4c8094a42ab1aa643c7b..13d071e92828a4c9a9a76852092c9867ce7f8795 100644 (file)
@@ -45,6 +45,8 @@ public:
 
   size_t get_num_pgs() const { return pg_to_core.size(); }
 
+  seastar::future<> dump_store_shards(Formatter *f) const;
+
   /// Map to cores in [min_core_mapping, core_mapping_limit)
   PGShardMapping(core_id_t min_core_mapping, core_id_t core_mapping_limit, uint32_t store_shard_nums)
     : store_shard_nums(store_shard_nums) {
index 8a5c8e4532923bbe98e6b7c7ac4bd14b22676e3e..e8586a01dac0a17803f050fa63802d094a8069ab 100644 (file)
@@ -498,6 +498,10 @@ public:
     return {get_reactor_utilization()};
   }
 
+  auto dump_store_shards(Formatter *f) const {
+    return pg_to_shard_mapping.dump_store_shards(f);
+  }
+
   auto create_split_pg_mapping(spg_t pgid, core_id_t core, store_index_t store_index) {
     return pg_to_shard_mapping.get_or_create_pg_mapping(pgid, core, store_index);
   }