]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/os/seastore: make register_metrics works for
authorChunmei Liu <chunmei.liu@ibm.com>
Fri, 17 Oct 2025 23:35:54 +0000 (23:35 +0000)
committerChunmei Liu <chunmei.liu@ibm.com>
Sat, 18 Oct 2025 04:06:35 +0000 (04:06 +0000)
 multiple store shards on one reactor

Signed-off-by: Chunmei Liu <chunmei.liu@ibm.com>
22 files changed:
src/crimson/os/seastore/async_cleaner.cc
src/crimson/os/seastore/async_cleaner.h
src/crimson/os/seastore/cache.cc
src/crimson/os/seastore/cache.h
src/crimson/os/seastore/extent_pinboard.cc
src/crimson/os/seastore/extent_pinboard.h
src/crimson/os/seastore/extent_placement_manager.cc
src/crimson/os/seastore/extent_placement_manager.h
src/crimson/os/seastore/journal.cc
src/crimson/os/seastore/journal.h
src/crimson/os/seastore/journal/circular_bounded_journal.cc
src/crimson/os/seastore/journal/circular_bounded_journal.h
src/crimson/os/seastore/journal/record_submitter.cc
src/crimson/os/seastore/journal/record_submitter.h
src/crimson/os/seastore/journal/segmented_journal.cc
src/crimson/os/seastore/journal/segmented_journal.h
src/crimson/os/seastore/lba/btree_lba_manager.cc
src/crimson/os/seastore/lba/btree_lba_manager.h
src/crimson/os/seastore/lba_manager.cc
src/crimson/os/seastore/lba_manager.h
src/crimson/os/seastore/transaction_manager.cc
src/crimson/os/seastore/transaction_manager.h

index f7592810a4012107425bfd5c2ad5d5de62c82083..74d37221ac8a49a57d6918b917d8f6cf3bf5d71c 100644 (file)
@@ -409,6 +409,7 @@ JournalTrimmerImpl::config_t::get_test(
 }
 
 JournalTrimmerImpl::JournalTrimmerImpl(
+  unsigned int store_index,
   BackrefManager &backref_manager,
   config_t config,
   backend_type_t type,
@@ -424,7 +425,7 @@ JournalTrimmerImpl::JournalTrimmerImpl(
   config.validate();
   ceph_assert(roll_start >= 0);
   ceph_assert(roll_size > 0);
-  register_metrics();
+  register_metrics(store_index);
 }
 
 void JournalTrimmerImpl::set_journal_head(journal_seq_t head)
@@ -730,16 +731,20 @@ JournalTrimmerImpl::trim_dirty()
   });
 }
 
-void JournalTrimmerImpl::register_metrics()
+void JournalTrimmerImpl::register_metrics(unsigned int store_index)
 {
   namespace sm = seastar::metrics;
   metrics.add_group("journal_trimmer", {
     sm::make_counter("dirty_journal_bytes",
                      [this] { return get_dirty_journal_size(); },
-                     sm::description("the size of the journal for dirty extents")),
+                     sm::description("the size of the journal for dirty extents"),
+                     {sm::label_instance("shard_store_index",
+                                         std::to_string(store_index))}),
     sm::make_counter("alloc_journal_bytes",
                      [this] { return get_alloc_journal_size(); },
-                     sm::description("the size of the journal for alloc info"))
+                     sm::description("the size of the journal for alloc info"),
+                     {sm::label_instance("shard_store_index",
+                                         std::to_string(store_index))}),
   });
 }
 
@@ -903,6 +908,7 @@ std::ostream &operator<<(
 }
 
 SegmentCleaner::SegmentCleaner(
+  unsigned int store_index,
   config_t config,
   SegmentManagerGroupRef&& sm_group,
   BackrefManager &backref_manager,
@@ -910,7 +916,8 @@ SegmentCleaner::SegmentCleaner(
   rewrite_gen_t max_rewrite_generation,
   bool detailed,
   bool is_cold)
-  : detailed(detailed),
+  : store_index(store_index),
+    detailed(detailed),
     is_cold(is_cold),
     config(config),
     sm_group(std::move(sm_group)),
@@ -956,96 +963,127 @@ void SegmentCleaner::register_metrics()
   metrics.add_group(prefix, {
     sm::make_counter("segments_number",
                     [this] { return segments.get_num_segments(); },
-                    sm::description("the number of segments")),
+                    sm::description("the number of segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segment_size",
                     [this] { return segments.get_segment_size(); },
-                    sm::description("the bytes of a segment")),
+                    sm::description("the bytes of a segment"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_in_journal",
                     [this] { return get_segments_in_journal(); },
-                    sm::description("the number of segments in journal")),
+                    sm::description("the number of segments in journal"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_type_journal",
                     [this] { return segments.get_num_type_journal(); },
-                    sm::description("the number of segments typed journal")),
+                    sm::description("the number of segments typed journal"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_type_ool",
                     [this] { return segments.get_num_type_ool(); },
-                    sm::description("the number of segments typed out-of-line")),
+                    sm::description("the number of segments typed out-of-line"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_open",
                     [this] { return segments.get_num_open(); },
-                    sm::description("the number of open segments")),
+                    sm::description("the number of open segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_empty",
                     [this] { return segments.get_num_empty(); },
-                    sm::description("the number of empty segments")),
+                    sm::description("the number of empty segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_closed",
                     [this] { return segments.get_num_closed(); },
-                    sm::description("the number of closed segments")),
+                    sm::description("the number of closed segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_counter("segments_count_open_journal",
                     [this] { return segments.get_count_open_journal(); },
-                    sm::description("the count of open journal segment operations")),
+                    sm::description("the count of open journal segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_count_open_ool",
                     [this] { return segments.get_count_open_ool(); },
-                    sm::description("the count of open ool segment operations")),
+                    sm::description("the count of open ool segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_count_release_journal",
                     [this] { return segments.get_count_release_journal(); },
-                    sm::description("the count of release journal segment operations")),
+                    sm::description("the count of release journal segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_count_release_ool",
                     [this] { return segments.get_count_release_ool(); },
-                    sm::description("the count of release ool segment operations")),
+                    sm::description("the count of release ool segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_count_close_journal",
                     [this] { return segments.get_count_close_journal(); },
-                    sm::description("the count of close journal segment operations")),
+                    sm::description("the count of close journal segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("segments_count_close_ool",
                     [this] { return segments.get_count_close_ool(); },
-                    sm::description("the count of close ool segment operations")),
+                    sm::description("the count of close ool segment operations"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_counter("total_bytes",
                     [this] { return segments.get_total_bytes(); },
-                    sm::description("the size of the space")),
+                    sm::description("the size of the space"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("available_bytes",
                     [this] { return segments.get_available_bytes(); },
-                    sm::description("the size of the space is available")),
+                    sm::description("the size of the space is available"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("unavailable_unreclaimable_bytes",
                     [this] { return get_unavailable_unreclaimable_bytes(); },
-                    sm::description("the size of the space is unavailable and unreclaimable")),
+                    sm::description("the size of the space is unavailable and unreclaimable"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("unavailable_reclaimable_bytes",
                     [this] { return get_unavailable_reclaimable_bytes(); },
-                    sm::description("the size of the space is unavailable and reclaimable")),
+                    sm::description("the size of the space is unavailable and reclaimable"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("used_bytes", stats.used_bytes,
-                    sm::description("the size of the space occupied by live extents")),
+                    sm::description("the size of the space occupied by live extents"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("unavailable_unused_bytes",
                     [this] { return get_unavailable_unused_bytes(); },
-                    sm::description("the size of the space is unavailable and not alive")),
+                    sm::description("the size of the space is unavailable and not alive"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_counter("projected_count", stats.projected_count,
-                   sm::description("the number of projected usage reservations")),
+                   sm::description("the number of projected usage reservations"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("projected_used_bytes_sum", stats.projected_used_bytes_sum,
-                   sm::description("the sum of the projected usage in bytes")),
+                   sm::description("the sum of the projected usage in bytes"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_counter("reclaimed_bytes", stats.reclaimed_bytes,
-                    sm::description("rewritten bytes due to reclaim")),
+                    sm::description("rewritten bytes due to reclaim"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("reclaimed_segment_bytes", stats.reclaimed_segment_bytes,
-                    sm::description("rewritten bytes due to reclaim")),
+                    sm::description("rewritten bytes due to reclaim"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("closed_journal_used_bytes", stats.closed_journal_used_bytes,
-                    sm::description("used bytes when close a journal segment")),
+                    sm::description("used bytes when close a journal segment"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("closed_journal_total_bytes", stats.closed_journal_total_bytes,
-                    sm::description("total bytes of closed journal segments")),
+                    sm::description("total bytes of closed journal segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("closed_ool_used_bytes", stats.closed_ool_used_bytes,
-                    sm::description("used bytes when close a ool segment")),
+                    sm::description("used bytes when close a ool segment"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("closed_ool_total_bytes", stats.closed_ool_total_bytes,
-                    sm::description("total bytes of closed ool segments")),
+                    sm::description("total bytes of closed ool segments"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_gauge("available_ratio",
                    [this] { return segments.get_available_ratio(); },
-                   sm::description("ratio of available space to total space")),
+                   sm::description("ratio of available space to total space"),
+                   {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_gauge("reclaim_ratio",
                    [this] { return get_reclaim_ratio(); },
-                   sm::description("ratio of reclaimable space to unavailable space")),
+                   sm::description("ratio of reclaimable space to unavailable space"),
+                   {sm::label_instance("shard_store_index", std::to_string(store_index))}),
 
     sm::make_histogram("segment_utilization_distribution",
                       [this]() -> seastar::metrics::histogram& {
                         return stats.segment_util;
                       },
-                      sm::description("utilization distribution of all segments"))
+                      sm::description("utilization distribution of all segments"),
+           {sm::label_instance("shard_store_index", std::to_string(store_index))})
   });
 }
 
@@ -1765,10 +1803,12 @@ void SegmentCleaner::print(std::ostream &os, bool is_detailed) const
 }
 
 RBMCleaner::RBMCleaner(
+  unsigned int store_index,
   RBMDeviceGroupRef&& rb_group,
   BackrefManager &backref_manager,
   bool detailed)
-  : detailed(detailed),
+  : store_index(store_index),
+    detailed(detailed),
     rb_group(std::move(rb_group)),
     backref_manager(backref_manager)
 {}
@@ -1962,12 +2002,15 @@ void RBMCleaner::register_metrics()
   metrics.add_group("rbm_cleaner", {
     sm::make_counter("total_bytes",
                     [this] { return get_total_bytes(); },
-                    sm::description("the size of the space")),
+                    sm::description("the size of the space"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("available_bytes",
                     [this] { return get_total_bytes() - get_journal_bytes() - stats.used_bytes; },
-                    sm::description("the size of the space is available")),
+                    sm::description("the size of the space is available"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("used_bytes", stats.used_bytes,
-                    sm::description("the size of the space occupied by live extents")),
+                    sm::description("the size of the space occupied by live extents"),
+         {sm::label_instance("shard_store_index", std::to_string(store_index))})
   });
 }
 
index 07fc8a81bf73343ddbba0b3015e27bcab4088484..5bd4ba86ba9000b9394d21de78d96e13ec6aa39e 100644 (file)
@@ -527,6 +527,7 @@ public:
   };
 
   JournalTrimmerImpl(
+    unsigned int store_index,
     BackrefManager &backref_manager,
     config_t config,
     backend_type_t type,
@@ -614,12 +615,13 @@ public:
   seastar::future<> trim();
 
   static JournalTrimmerImplRef create(
+      unsigned int store_index,
       BackrefManager &backref_manager,
       config_t config,
       backend_type_t type,
       device_off_t roll_start,
       device_off_t roll_size) {
-    return std::make_unique<JournalTrimmerImpl>(
+    return std::make_unique<JournalTrimmerImpl>(store_index,
         backref_manager, config, type, roll_start, roll_size);
   }
 
@@ -673,7 +675,7 @@ private:
     return std::min(get_max_dirty_bytes_to_trim(),
                    config.rewrite_dirty_bytes_per_cycle);
   }
-  void register_metrics();
+  void register_metrics(unsigned int store_index);
 
   ExtentCallbackInterface *extent_callback = nullptr;
   BackgroundListener *background_callback = nullptr;
@@ -1284,6 +1286,7 @@ public:
   };
 
   SegmentCleaner(
+    unsigned int store_index,
     config_t config,
     SegmentManagerGroupRef&& sm_group,
     BackrefManager &backref_manager,
@@ -1297,6 +1300,7 @@ public:
   }
 
   static SegmentCleanerRef create(
+      unsigned int store_index,
       config_t config,
       SegmentManagerGroupRef&& sm_group,
       BackrefManager &backref_manager,
@@ -1304,7 +1308,7 @@ public:
       rewrite_gen_t max_rewrite_generation,
       bool detailed,
       bool is_cold = false) {
-    return std::make_unique<SegmentCleaner>(
+    return std::make_unique<SegmentCleaner>(store_index,
         config, std::move(sm_group), backref_manager,
         ool_seq_allocator, max_rewrite_generation,
        detailed, is_cold);
@@ -1623,6 +1627,7 @@ private:
     }
   }
 
+  unsigned int store_index;
   const bool detailed;
   const bool is_cold;
   const config_t config;
@@ -1690,15 +1695,17 @@ using RBMCleanerRef = std::unique_ptr<RBMCleaner>;
 class RBMCleaner : public AsyncCleaner {
 public:
   RBMCleaner(
+    unsigned int store_index,
     RBMDeviceGroupRef&& rb_group,
     BackrefManager &backref_manager,
     bool detailed);
 
   static RBMCleanerRef create(
+      unsigned int store_index,
       RBMDeviceGroupRef&& rb_group,
       BackrefManager &backref_manager,
       bool detailed) {
-    return std::make_unique<RBMCleaner>(
+    return std::make_unique<RBMCleaner>(store_index,
       std::move(rb_group), backref_manager, detailed);
   }
 
@@ -1839,6 +1846,7 @@ public:
 private:
   bool equals(const RBMSpaceTracker &other) const;
 
+  unsigned int store_index;
   const bool detailed;
   RBMDeviceGroupRef rb_group;
   BackrefManager &backref_manager;
index 051a3762b90a9583e90f3f6a1a6d861511838450..b55d13294b583a4cfeafc0a0d8cbb600c262680d 100644 (file)
@@ -29,13 +29,14 @@ SET_SUBSYS(seastore_cache);
 namespace crimson::os::seastore {
 
 Cache::Cache(
-  ExtentPlacementManager &epm)
+  ExtentPlacementManager &epm,
+  unsigned int store_index)
   : epm(epm),
     pinboard(create_extent_pinboard(
       crimson::common::get_conf<Option::size_t>(
        "seastore_cachepin_size_pershard")))
 {
-  register_metrics();
+  register_metrics(store_index);
   segment_providers_by_device_id.resize(DEVICE_ID_MAX, nullptr);
 }
 
@@ -130,7 +131,7 @@ void Cache::dump_contents()
   DEBUG("exit");
 }
 
-void Cache::register_metrics()
+void Cache::register_metrics(unsigned int store_index)
 {
   LOG_PREFIX(Cache::register_metrics);
   DEBUG("");
@@ -147,37 +148,42 @@ void Cache::register_metrics()
   namespace sm = seastar::metrics;
   using src_t = Transaction::src_t;
 
-  std::map<src_t, sm::label_instance> labels_by_src {
-    {src_t::MUTATE, sm::label_instance("src", "MUTATE")},
-    {src_t::READ, sm::label_instance("src", "READ")},
-    {src_t::TRIM_DIRTY, sm::label_instance("src", "TRIM_DIRTY")},
-    {src_t::TRIM_ALLOC, sm::label_instance("src", "TRIM_ALLOC")},
-    {src_t::CLEANER_MAIN, sm::label_instance("src", "CLEANER_MAIN")},
-    {src_t::CLEANER_COLD, sm::label_instance("src", "CLEANER_COLD")},
+  std::map<src_t, std::vector<sm::label_instance>> labels_by_src {
+    {src_t::MUTATE, {sm::label_instance("src", "MUTATE")}},
+    {src_t::READ, {sm::label_instance("src", "READ")}},
+    {src_t::TRIM_DIRTY, {sm::label_instance("src", "TRIM_DIRTY")}},
+    {src_t::TRIM_ALLOC, {sm::label_instance("src", "TRIM_ALLOC")}},
+    {src_t::CLEANER_MAIN, {sm::label_instance("src", "CLEANER_MAIN")}},
+    {src_t::CLEANER_COLD, {sm::label_instance("src", "CLEANER_COLD")}},
   };
   assert(labels_by_src.size() == (std::size_t)src_t::MAX);
 
-  std::map<extent_types_t, sm::label_instance> labels_by_ext {
-    {extent_types_t::ROOT,                sm::label_instance("ext", "ROOT")},
-    {extent_types_t::LADDR_INTERNAL,      sm::label_instance("ext", "LADDR_INTERNAL")},
-    {extent_types_t::LADDR_LEAF,          sm::label_instance("ext", "LADDR_LEAF")},
-    {extent_types_t::DINK_LADDR_LEAF,     sm::label_instance("ext", "DINK_LADDR_LEAF")},
-    {extent_types_t::ROOT_META,           sm::label_instance("ext", "ROOT_META")},
-    {extent_types_t::OMAP_INNER,          sm::label_instance("ext", "OMAP_INNER")},
-    {extent_types_t::OMAP_LEAF,           sm::label_instance("ext", "OMAP_LEAF")},
-    {extent_types_t::ONODE_BLOCK_STAGED,  sm::label_instance("ext", "ONODE_BLOCK_STAGED")},
-    {extent_types_t::COLL_BLOCK,          sm::label_instance("ext", "COLL_BLOCK")},
-    {extent_types_t::OBJECT_DATA_BLOCK,   sm::label_instance("ext", "OBJECT_DATA_BLOCK")},
-    {extent_types_t::RETIRED_PLACEHOLDER, sm::label_instance("ext", "RETIRED_PLACEHOLDER")},
-    {extent_types_t::ALLOC_INFO,         sm::label_instance("ext", "ALLOC_INFO")},
-    {extent_types_t::JOURNAL_TAIL,        sm::label_instance("ext", "JOURNAL_TAIL")},
-    {extent_types_t::TEST_BLOCK,          sm::label_instance("ext", "TEST_BLOCK")},
-    {extent_types_t::TEST_BLOCK_PHYSICAL, sm::label_instance("ext", "TEST_BLOCK_PHYSICAL")},
-    {extent_types_t::BACKREF_INTERNAL,    sm::label_instance("ext", "BACKREF_INTERNAL")},
-    {extent_types_t::BACKREF_LEAF,        sm::label_instance("ext", "BACKREF_LEAF")}
+  std::map<extent_types_t, std::vector<sm::label_instance>> labels_by_ext {
+    {extent_types_t::ROOT,                {sm::label_instance("ext", "ROOT")}},
+    {extent_types_t::LADDR_INTERNAL,      {sm::label_instance("ext", "LADDR_INTERNAL")}},
+    {extent_types_t::LADDR_LEAF,          {sm::label_instance("ext", "LADDR_LEAF")}},
+    {extent_types_t::DINK_LADDR_LEAF,     {sm::label_instance("ext", "DINK_LADDR_LEAF")}},
+    {extent_types_t::ROOT_META,           {sm::label_instance("ext", "ROOT_META")}},
+    {extent_types_t::OMAP_INNER,          {sm::label_instance("ext", "OMAP_INNER")}},
+    {extent_types_t::OMAP_LEAF,           {sm::label_instance("ext", "OMAP_LEAF")}},
+    {extent_types_t::ONODE_BLOCK_STAGED,  {sm::label_instance("ext", "ONODE_BLOCK_STAGED")}},
+    {extent_types_t::COLL_BLOCK,          {sm::label_instance("ext", "COLL_BLOCK")}},
+    {extent_types_t::OBJECT_DATA_BLOCK,   {sm::label_instance("ext", "OBJECT_DATA_BLOCK")}},
+    {extent_types_t::RETIRED_PLACEHOLDER, {sm::label_instance("ext", "RETIRED_PLACEHOLDER")}},
+    {extent_types_t::ALLOC_INFO,         {sm::label_instance("ext", "ALLOC_INFO")}},
+    {extent_types_t::JOURNAL_TAIL,        {sm::label_instance("ext", "JOURNAL_TAIL")}},
+    {extent_types_t::TEST_BLOCK,          {sm::label_instance("ext", "TEST_BLOCK")}},
+    {extent_types_t::TEST_BLOCK_PHYSICAL, {sm::label_instance("ext", "TEST_BLOCK_PHYSICAL")}},
+    {extent_types_t::BACKREF_INTERNAL,    {sm::label_instance("ext", "BACKREF_INTERNAL")}},
+    {extent_types_t::BACKREF_LEAF,        {sm::label_instance("ext", "BACKREF_LEAF")}}
   };
   assert(labels_by_ext.size() == (std::size_t)extent_types_t::NONE);
-
+  for (auto& [src, src_label] : labels_by_src) {
+    src_label.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+  }
+  for (auto& [ext, ext_label] : labels_by_ext) {
+    ext_label.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+  }
   /*
    * trans_created
    */
@@ -206,34 +212,40 @@ void Cache::register_metrics()
         [this] {
           return stats.access.get_cache_access();
         },
-        sm::description("total number of cache accesses")
+        sm::description("total number of cache accesses"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "cache_hit",
         [this] {
           return stats.access.get_cache_hit();
         },
-        sm::description("total number of cache hits")
+        sm::description("total number of cache hits"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "refresh_parent_total",
         cursor_stats.num_refresh_parent_total,
-        sm::description("total number of refreshed cursors")
+        sm::description("total number of refreshed cursors"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "refresh_invalid_parent",
         cursor_stats.num_refresh_invalid_parent,
-        sm::description("total number of refreshed cursors with invalid parents")
+        sm::description("total number of refreshed cursors with invalid parents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "refresh_unviewable_parent",
         cursor_stats.num_refresh_unviewable_parent,
-        sm::description("total number of refreshed cursors with unviewable parents")
+        sm::description("total number of refreshed cursors with unviewable parents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "refresh_modified_viewable_parent",
         cursor_stats.num_refresh_modified_viewable_parent,
-        sm::description("total number of refreshed cursors with viewable but modified parents")
+        sm::description("total number of refreshed cursors with viewable but modified parents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
     }
   );
@@ -257,6 +269,8 @@ void Cache::register_metrics()
       auto& efforts = get_by_src(stats.invalidated_efforts_by_src, src);
       for (auto& [ext, ext_label] : labels_by_ext) {
         auto& counter = get_by_ext(efforts.num_trans_invalidated, ext);
+        std::vector<sm::label_instance> merged_labels = src_label;
+        merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
         metrics.add_group(
           "cache",
           {
@@ -264,7 +278,7 @@ void Cache::register_metrics()
               "trans_invalidated_by_extent",
               counter,
               sm::description("total number of transactions invalidated by extents"),
-              {src_label, ext_label}
+              merged_labels
             ),
           }
         );
@@ -273,6 +287,8 @@ void Cache::register_metrics()
       if (src == src_t::READ) {
         // read transaction won't have non-read efforts
         auto read_effort_label = effort_label("READ");
+        std::vector<sm::label_instance> merged_labels = src_label;
+        merged_labels.push_back(read_effort_label);
         metrics.add_group(
           "cache",
           {
@@ -280,13 +296,13 @@ void Cache::register_metrics()
               "invalidated_extents",
               efforts.read.num,
               sm::description("extents of invalidated transactions"),
-              {src_label, read_effort_label}
+              merged_labels
             ),
             sm::make_counter(
               "invalidated_extent_bytes",
               efforts.read.bytes,
               sm::description("extent bytes of invalidated transactions"),
-              {src_label, read_effort_label}
+              merged_labels
             ),
           }
         );
@@ -309,6 +325,8 @@ void Cache::register_metrics()
             return efforts.fresh_ool_written;
           }
         }();
+        std::vector<sm::label_instance> merged_labels = src_label;
+        merged_labels.push_back(effort_label(effort_name));
         metrics.add_group(
           "cache",
           {
@@ -316,13 +334,13 @@ void Cache::register_metrics()
               "invalidated_extents",
               effort.num,
               sm::description("extents of invalidated transactions"),
-              {src_label, effort_label(effort_name)}
+              merged_labels
             ),
             sm::make_counter(
               "invalidated_extent_bytes",
               effort.bytes,
               sm::description("extent bytes of invalidated transactions"),
-              {src_label, effort_label(effort_name)}
+              merged_labels
             ),
           }
         );
@@ -428,7 +446,10 @@ void Cache::register_metrics()
             return efforts.fresh_ool_by_ext;
           }
         }();
+        std::vector<sm::label_instance> merged_labels = src_label;
+        merged_labels.push_back(effort_label(effort_name));
         for (auto& [ext, ext_label] : labels_by_ext) {
+          merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
           auto& effort = get_by_ext(effort_by_ext, ext);
           metrics.add_group(
             "cache",
@@ -437,13 +458,13 @@ void Cache::register_metrics()
                 "committed_extents",
                 effort.num,
                 sm::description("extents of committed transactions"),
-                {src_label, effort_label(effort_name), ext_label}
+                merged_labels
               ),
               sm::make_counter(
                 "committed_extent_bytes",
                 effort.bytes,
                 sm::description("extent bytes of committed transactions"),
-                {src_label, effort_label(effort_name), ext_label}
+                merged_labels
               ),
             }
           );
@@ -452,6 +473,8 @@ void Cache::register_metrics()
 
       auto& delta_by_ext = efforts.delta_bytes_by_ext;
       for (auto& [ext, ext_label] : labels_by_ext) {
+        std::vector<sm::label_instance> merged_labels = src_label;
+        merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
         auto& value = get_by_ext(delta_by_ext, ext);
         metrics.add_group(
           "cache",
@@ -460,7 +483,7 @@ void Cache::register_metrics()
               "committed_delta_bytes",
               value,
               sm::description("delta bytes of committed transactions"),
-              {src_label, ext_label}
+              merged_labels
             ),
           }
         );
@@ -474,17 +497,20 @@ void Cache::register_metrics()
         sm::make_counter(
           "trans_read_successful",
           stats.success_read_efforts.num_trans,
-          sm::description("total number of successful read transactions")
+          sm::description("total number of successful read transactions"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
         sm::make_counter(
           "successful_read_extents",
           stats.success_read_efforts.read.num,
-          sm::description("extents of successful read transactions")
+          sm::description("extents of successful read transactions"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
         sm::make_counter(
           "successful_read_extent_bytes",
           stats.success_read_efforts.read.bytes,
-          sm::description("extent bytes of successful read transactions")
+          sm::description("extent bytes of successful read transactions"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
       }
     );
@@ -503,31 +529,35 @@ void Cache::register_metrics()
         [this] {
           return extents_index.size();
         },
-        sm::description("total number of cached extents")
+        sm::description("total number of cached extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "cached_extent_bytes",
         [this] {
           return extents_index.get_bytes();
         },
-        sm::description("total bytes of cached extents")
+        sm::description("total bytes of cached extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "dirty_extents",
         [this] {
           return dirty.size();
         },
-        sm::description("total number of dirty extents")
+        sm::description("total number of dirty extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "dirty_extent_bytes",
         stats.dirty_bytes,
-        sm::description("total bytes of dirty extents")
+        sm::description("total bytes of dirty extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
     }
   );
 
-  pinboard->register_metrics();
+  pinboard->register_metrics(store_index);
 
   /**
    * tree stats
@@ -537,7 +567,7 @@ void Cache::register_metrics()
   auto omap_label = tree_label("OMAP");
   auto lba_label = tree_label("LBA");
   auto backref_label = tree_label("BACKREF");
-  auto register_tree_metrics = [&labels_by_src, &onode_label, &omap_label, this](
+  auto register_tree_metrics = [&labels_by_src, &onode_label, &omap_label, store_index, this](
       const sm::label_instance& tree_label,
       uint64_t& tree_depth,
       int64_t& tree_extents_num,
@@ -550,13 +580,13 @@ void Cache::register_metrics()
           "tree_depth",
           tree_depth,
           sm::description("the depth of tree"),
-          {tree_label}
+          {tree_label, sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
        sm::make_counter(
          "tree_extents_num",
          tree_extents_num,
          sm::description("num of extents of the tree"),
-         {tree_label}
+         {tree_label, sm::label_instance("shard_store_index", std::to_string(store_index))}
        )
       }
     );
@@ -573,6 +603,8 @@ void Cache::register_metrics()
       }
       auto& committed_efforts = get_by_src(committed_tree_efforts, src);
       auto& invalidated_efforts = get_by_src(invalidated_tree_efforts, src);
+      std::vector<sm::label_instance> merged_labels = {tree_label}; // Copy src_label
+      merged_labels.insert(merged_labels.end(), src_label.begin(), src_label.end());
       metrics.add_group(
         "cache",
         {
@@ -580,37 +612,37 @@ void Cache::register_metrics()
             "tree_inserts_committed",
             committed_efforts.num_inserts,
             sm::description("total number of committed insert operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
           sm::make_counter(
             "tree_erases_committed",
             committed_efforts.num_erases,
             sm::description("total number of committed erase operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
           sm::make_counter(
             "tree_updates_committed",
             committed_efforts.num_updates,
             sm::description("total number of committed update operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
           sm::make_counter(
             "tree_inserts_invalidated",
             invalidated_efforts.num_inserts,
             sm::description("total number of invalidated insert operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
           sm::make_counter(
             "tree_erases_invalidated",
             invalidated_efforts.num_erases,
             sm::description("total number of invalidated erase operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
           sm::make_counter(
             "tree_updates_invalidated",
             invalidated_efforts.num_updates,
             sm::description("total number of invalidated update operations"),
-            {tree_label, src_label}
+            merged_labels
           ),
         }
       );
@@ -675,7 +707,8 @@ void Cache::register_metrics()
             "trans_srcs_invalidated",
             stats.trans_conflicts_by_srcs[srcs_index - 1],
             sm::description("total number conflicted transactions by src pair"),
-            {srcs_label(oss.str())}
+            {srcs_label(oss.str()),
+             sm::label_instance("shard_store_index", std::to_string(store_index))}
           ),
         }
       );
@@ -695,7 +728,8 @@ void Cache::register_metrics()
           "trans_srcs_invalidated",
           stats.trans_conflicts_by_unknown[srcs_index - 1],
           sm::description("total number conflicted transactions by src pair"),
-          {srcs_label(oss.str())}
+          {srcs_label(oss.str()),
+           sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
       }
     );
@@ -712,24 +746,28 @@ void Cache::register_metrics()
         [this] {
           return stats.trim_rewrites.get_num_rewrites();
         },
-        sm::description("total number of rewrite-dirty extents")
+        sm::description("total number of rewrite-dirty extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "version_sum_dirty",
         stats.trim_rewrites.dirty_version,
-        sm::description("sum of the version from rewrite-dirty extents")
+        sm::description("sum of the version from rewrite-dirty extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "version_count_reclaim",
         [this] {
           return stats.reclaim_rewrites.get_num_rewrites();
         },
-        sm::description("total number of rewrite-reclaim extents")
+        sm::description("total number of rewrite-reclaim extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "version_sum_reclaim",
         stats.reclaim_rewrites.dirty_version,
-        sm::description("sum of the version from rewrite-reclaim extents")
+        sm::description("sum of the version from rewrite-reclaim extents"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
     }
   );
index 0a0c65819f48bc6d4266d0e5e0f05348d6145ac7..91818345edbbda26d65bf5b9bacf98e64c4da4f6 100644 (file)
@@ -106,7 +106,7 @@ class SegmentProvider;
 class Cache : public ExtentTransViewRetriever,
              public RetiredExtentPlaceholderInvalidater {
 public:
-  Cache(ExtentPlacementManager &epm);
+  Cache(ExtentPlacementManager &epm, unsigned int store_index);
   ~Cache();
 
   cache_stats_t get_stats(bool report_detail, double seconds) const;
@@ -1818,7 +1818,7 @@ private:
   }
 
   seastar::metrics::metric_group metrics;
-  void register_metrics();
+  void register_metrics(unsigned int store_index);
 
   void apply_backref_mset(
       backref_entry_refs_t& backref_entries) {
index 1d4cc84c8009f1da12c8a47970a2de0a6abad6d8..483707eba9d70de2cd577f576b036677ae14e789 100644 (file)
@@ -296,7 +296,7 @@ public:
     return lru.get_current_num_extents();
   }
 
-  void register_metrics() final {
+  void register_metrics(unsigned int store_index) final {
     namespace sm = seastar::metrics;
     metrics.add_group(
       "cache",
@@ -306,22 +306,26 @@ public:
           [this] {
             return get_current_size_bytes();
           },
-          sm::description("total bytes pinned by the lru")
+          sm::description("total bytes pinned by the lru"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
         sm::make_counter(
           "lru_num_extents",
           [this] {
             return get_current_num_extents();
           },
-          sm::description("total extents pinned by the lru")
+          sm::description("total extents pinned by the lru"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
         sm::make_counter(
           "lru_hit", hit,
-          sm::description("total count of the extents that are linked to lru when touching them")
+          sm::description("total count of the extents that are linked to lru when touching them"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
         sm::make_counter(
           "lru_miss", miss,
-          sm::description("total count of the extents that are not linked to lru when touching them")
+          sm::description("total count of the extents that are not linked to lru when touching them"),
+          {sm::label_instance("shard_store_index", std::to_string(store_index))}
         ),
       }
     );
@@ -505,7 +509,7 @@ public:
     return warm_in.get_current_num_extents() + hot.get_current_num_extents();
   }
 
-  void register_metrics() final;
+  void register_metrics(unsigned int store_index) final;
 
   void get_stats(
     cache_stats_t &stats,
@@ -802,7 +806,7 @@ void ExtentPinboardTwoQ::get_stats(
   last_hits = overall_hits;
 }
 
-void ExtentPinboardTwoQ::register_metrics() {
+void ExtentPinboardTwoQ::register_metrics(unsigned int store_index) {
   namespace sm = seastar::metrics;
   metrics.add_group(
     "cache",
@@ -812,36 +816,42 @@ void ExtentPinboardTwoQ::register_metrics() {
         [this] {
           return warm_in.get_current_size_bytes();
         },
-        sm::description("total bytes pinned by the 2q warm_in queue")
+        sm::description("total bytes pinned by the 2q warm_in queue"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "2q_warm_in_num_extents",
         [this] {
           return warm_in.get_current_num_extents();
         },
-        sm::description("total extents pinned by the 2q warm_in queue")
+        sm::description("total extents pinned by the 2q warm_in queue"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "2q_hot_size_bytes",
         [this] {
           return hot.get_current_size_bytes();
         },
-        sm::description("total bytes pinned by the 2q hot queue")
+        sm::description("total bytes pinned by the 2q hot queue"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "2q_hot_num_extents",
         [this] {
           return hot.get_current_num_extents();
         },
-        sm::description("total extents pinned by the 2q hot queue")
+        sm::description("total extents pinned by the 2q hot queue"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "2q_hit", hit,
-        sm::description("total count of the extents that are linked to 2Q when touching them")
+        sm::description("total count of the extents that are linked to 2Q when touching them"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "2q_miss", miss,
-        sm::description("total count of the extents that are not linked to 2Q when touching them")
+        sm::description("total count of the extents that are not linked to 2Q when touching them"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
     }
   );
index 9f6935d6cefb01ad59bd548146afcdfc8107ebe6..2a891d5c3b5e517048618bf000674b27e7df442a 100644 (file)
@@ -10,7 +10,7 @@ namespace crimson::os::seastore {
 
 struct ExtentPinboard {
   virtual ~ExtentPinboard() = default;
-  virtual void register_metrics() = 0;
+  virtual void register_metrics(unsigned int store_index) = 0;
   virtual void move_to_top(
     CachedExtent &extent,
     const Transaction::src_t *p_src,
index 4c9c9cb91c6163b15c1f32ec95d8428467993d59..4683b96e38c3d7d7a32f4fd54f0cfae971489fe3 100644 (file)
@@ -12,11 +12,13 @@ SET_SUBSYS(seastore_epm);
 namespace crimson::os::seastore {
 
 SegmentedOolWriter::SegmentedOolWriter(
+  unsigned int store_index,
   data_category_t category,
   rewrite_gen_t gen,
   SegmentProvider& sp,
   SegmentSeqAllocator &ssa)
-  : segment_allocator(nullptr, category, gen, sp, ssa),
+  : store_index(store_index),
+    segment_allocator(nullptr, category, gen, sp, ssa),
     record_submitter(crimson::common::get_conf<uint64_t>(
                        "seastore_journal_iodepth_limit"),
                      crimson::common::get_conf<uint64_t>(
@@ -215,7 +217,7 @@ void ExtentPlacementManager::init(
     // DATA
     data_writers_by_gen.resize(num_writers, nullptr);
     for (rewrite_gen_t gen = OOL_GENERATION; gen < hot_tier_generations; ++gen) {
-      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
            data_category_t::DATA, gen, *segment_cleaner,
             *ool_segment_seq_allocator));
       data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
@@ -224,7 +226,7 @@ void ExtentPlacementManager::init(
     // METADATA
     md_writers_by_gen.resize(num_writers, {});
     for (rewrite_gen_t gen = OOL_GENERATION; gen < hot_tier_generations; ++gen) {
-      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
            data_category_t::METADATA, gen, *segment_cleaner,
             *ool_segment_seq_allocator));
       md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
@@ -256,14 +258,14 @@ void ExtentPlacementManager::init(
   if (cold_segment_cleaner) {
     // Cold DATA Segments
     for (rewrite_gen_t gen = hot_tier_generations; gen <= dynamic_max_rewrite_generation; ++gen) {
-      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
             data_category_t::DATA, gen, *cold_segment_cleaner,
             *ool_segment_seq_allocator));
       data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
     }
     for (rewrite_gen_t gen = hot_tier_generations; gen <= dynamic_max_rewrite_generation; ++gen) {
       // Cold METADATA Segments
-      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+      writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
             data_category_t::METADATA, gen, *cold_segment_cleaner,
             *ool_segment_seq_allocator));
       md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
@@ -597,14 +599,14 @@ void ExtentPlacementManager::BackgroundProcess::log_state(const char *caller) co
   }
 }
 
-ExtentPlacementManager::mount_ret ExtentPlacementManager::BackgroundProcess::mount() {
+ExtentPlacementManager::mount_ret ExtentPlacementManager::BackgroundProcess::mount(unsigned int store_index) {
   LOG_PREFIX(BackgroundProcess::mount);
   DEBUG("start");
   ceph_assert(state == state_t::STOP);
   state = state_t::MOUNT;
   trimmer->reset();
   stats = {};
-  register_metrics();
+  register_metrics(store_index);
   DEBUG("mounting main cleaner");
   co_await main_cleaner->mount();
   if (has_cold_tier()) {
@@ -1006,26 +1008,34 @@ ExtentPlacementManager::BackgroundProcess::do_background_cycle()
   }
 }
 
-void ExtentPlacementManager::BackgroundProcess::register_metrics()
+void ExtentPlacementManager::BackgroundProcess::register_metrics(unsigned int store_index)
 {
   namespace sm = seastar::metrics;
   metrics.add_group("background_process", {
     sm::make_counter("io_count", stats.io_count,
-                     sm::description("the sum of IOs")),
+                     sm::description("the sum of IOs"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_blocked_count", stats.io_blocked_count,
-                     sm::description("IOs that are blocked by gc")),
+                     sm::description("IOs that are blocked by gc"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_blocked_count_trim", stats.io_blocked_count_trim,
-                     sm::description("IOs that are blocked by trimming")),
+                     sm::description("IOs that are blocked by trimming"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_retried_blocked_count_clean", stats.io_blocked_count_clean,
-                     sm::description("Retried IOs that are blocked by cleaning")),
+                     sm::description("Retried IOs that are blocked by cleaning"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_retried_blocked_count_trim", stats.io_blocked_count_trim,
-                     sm::description("Retried IOs that are blocked by trimming")),
+                     sm::description("Retried IOs that are blocked by trimming"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_blocked_count_clean", stats.io_blocked_count_clean,
-                     sm::description("IOs that are blocked by cleaning")),
+                     sm::description("IOs that are blocked by cleaning"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_blocked_sum", stats.io_blocked_sum,
-                     sm::description("the sum of blocking IOs")),
+                     sm::description("the sum of blocking IOs"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))}),
     sm::make_counter("io_blocked_time", stats.io_blocked_time,
-                     sm::description("the sum of the time(ms) in which IOs are blocked"))
+                     sm::description("the sum of the time(ms) in which IOs are blocked"),
+                     {sm::label_instance("shard_store_index", std::to_string(store_index))})
   });
 }
 
index 373b1996325c571691a821f750fe92b2e7b99743..ea02054df4f9f7e14fda99e6e6c968d3a32d4509 100644 (file)
@@ -69,7 +69,8 @@ using ExtentOolWriterRef = std::unique_ptr<ExtentOolWriter>;
  */
 class SegmentedOolWriter : public ExtentOolWriter {
 public:
-  SegmentedOolWriter(data_category_t category,
+  SegmentedOolWriter(unsigned int store_index,
+                     data_category_t category,
                      rewrite_gen_t gen,
                      SegmentProvider &sp,
                      SegmentSeqAllocator &ssa);
@@ -83,7 +84,7 @@ public:
   }
 
   open_ertr::future<> open() final {
-    return record_submitter.open(false).discard_result();
+    return record_submitter.open(store_index, false).discard_result();
   }
 
   alloc_write_iertr::future<> alloc_write_ool_extents(
@@ -122,6 +123,7 @@ private:
     std::list<LogicalCachedExtentRef> &&extents,
     bool with_atomic_roll_segment=false);
 
+  unsigned int store_index;
   journal::SegmentAllocator segment_allocator;
   journal::RecordSubmitter record_submitter;
   seastar::gate write_guard;
@@ -269,10 +271,12 @@ class ExtentPlacementManager {
 public:
   ExtentPlacementManager(
     rewrite_gen_t hot_tier_generations,
-    rewrite_gen_t cold_tier_generations)
+    rewrite_gen_t cold_tier_generations,
+    unsigned int store_index)
     : hot_tier_generations(hot_tier_generations),
       cold_tier_generations(cold_tier_generations),
       dynamic_max_rewrite_generation(cold_tier_generations),
+      store_index(store_index),
       ool_segment_seq_allocator(
           std::make_unique<SegmentSeqAllocator>(segment_type_t::OOL)),
       max_data_allocation_size(crimson::common::get_conf<Option::size_t>(
@@ -331,7 +335,7 @@ public:
       crimson::ct_error::input_output_error>;
   using mount_ret = mount_ertr::future<>;
   mount_ret mount() {
-    return background_process.mount();
+    return background_process.mount(store_index);
   }
 
   using open_ertr = ExtentOolWriter::open_ertr;
@@ -764,7 +768,7 @@ private:
       return stat;
     }
 
-    ExtentPlacementManager::mount_ret mount();
+    ExtentPlacementManager::mount_ret mount(unsigned int store_index);
 
     void start_scan_space() {
       ceph_assert(state == state_t::MOUNT);
@@ -1073,7 +1077,7 @@ private:
 
     seastar::future<> do_background_cycle();
 
-    void register_metrics();
+    void register_metrics(unsigned int store_index);
 
     struct {
       uint64_t io_blocking_num = 0;
@@ -1121,6 +1125,7 @@ private:
   const rewrite_gen_t cold_tier_generations = NULL_GENERATION;
   rewrite_gen_t dynamic_max_rewrite_generation = NULL_GENERATION;
   BackgroundProcess background_process;
+  unsigned int store_index = 0;
   // TODO: drop once paddr->journal_seq_t is introduced
   SegmentSeqAllocatorRef ool_segment_seq_allocator;
   extent_len_t max_data_allocation_size = 0;
index 91323de333ba5787227c979992760f65a48511a6..dc098f58bbd1afe90884f18aded2cecb028488f4 100644 (file)
@@ -8,18 +8,20 @@
 namespace crimson::os::seastore::journal {
 
 JournalRef make_segmented(
+  unsigned int store_index,
   SegmentProvider &provider,
   JournalTrimmer &trimmer)
 {
-  return std::make_unique<SegmentedJournal>(provider, trimmer);
+  return std::make_unique<SegmentedJournal>(store_index, provider, trimmer);
 }
 
 JournalRef make_circularbounded(
+  unsigned int store_index,
   JournalTrimmer &trimmer,
   crimson::os::seastore::random_block_device::RBMDevice* device,
   std::string path)
 {
-  return std::make_unique<CircularBoundedJournal>(trimmer, device, path);
+  return std::make_unique<CircularBoundedJournal>(store_index, trimmer, device, path);
 }
 
 }
index 3e9beaa248d03b7e747908bee1b6f5e056b4b9e0..ef28436c5e3dd9c6da732a736707f8d561bad2c6 100644 (file)
@@ -112,10 +112,12 @@ using JournalRef = std::unique_ptr<Journal>;
 namespace journal {
 
 JournalRef make_segmented(
+  unsigned int store_index,
   SegmentProvider &provider,
   JournalTrimmer &trimmer);
 
 JournalRef make_circularbounded(
+  unsigned int store_index,
   JournalTrimmer &trimmer,
   crimson::os::seastore::random_block_device::RBMDevice* device,
   std::string path);
index a13be09148e16255e9f3d97b93f7b362ae447044..115db64e3648dffcbe5056332dcf4b8797f82c44 100644 (file)
@@ -15,10 +15,12 @@ SET_SUBSYS(seastore_journal);
 namespace crimson::os::seastore::journal {
 
 CircularBoundedJournal::CircularBoundedJournal(
+    unsigned int store_index,
     JournalTrimmer &trimmer,
     RBMDevice* device,
     const std::string &path)
-  : trimmer(trimmer), path(path),
+  : store_index(store_index),
+    trimmer(trimmer), path(path),
   cjs(device),
   record_submitter(crimson::common::get_conf<uint64_t>(
       "seastore_journal_iodepth_limit"),
@@ -36,7 +38,7 @@ CircularBoundedJournal::CircularBoundedJournal(
 CircularBoundedJournal::open_for_mkfs_ret
 CircularBoundedJournal::open_for_mkfs()
 {
-  return record_submitter.open(true
+  return record_submitter.open(store_index, true
   ).safe_then([this](auto ret) {
     return open_for_mkfs_ret(
       open_for_mkfs_ertr::ready_future_marker{},
@@ -47,7 +49,7 @@ CircularBoundedJournal::open_for_mkfs()
 CircularBoundedJournal::open_for_mount_ret
 CircularBoundedJournal::open_for_mount()
 {
-  return record_submitter.open(false
+  return record_submitter.open(store_index, false
   ).safe_then([this](auto ret) {
     return open_for_mount_ret(
       open_for_mount_ertr::ready_future_marker{},
index cd8c7a1a84cfb9ceab44b7be05742c460193a0f3..925fe171d9b2baf28c9454c851bf7d8d8895ff61 100644 (file)
@@ -55,6 +55,7 @@ constexpr uint64_t DEFAULT_BLOCK_SIZE = 4096;
 class CircularBoundedJournal : public Journal, RecordScanner {
 public:
   CircularBoundedJournal(
+      unsigned int store_index,
       JournalTrimmer &trimmer, RBMDevice* device, const std::string &path);
   ~CircularBoundedJournal() {}
 
@@ -203,6 +204,7 @@ public:
   }
 
 private:
+  unsigned int store_index;
   JournalTrimmer &trimmer;
   std::string path;
   WritePipeline *write_pipeline = nullptr;
index acff722f07caae71efadcd4dcd075a4e5b26f6bb..3b0f91c5195c5cabde5dfad4e669d5a71053b738 100644 (file)
@@ -389,10 +389,10 @@ RecordSubmitter::submit(
 }
 
 RecordSubmitter::open_ret
-RecordSubmitter::open(bool is_mkfs)
+RecordSubmitter::open(unsigned int store_index, bool is_mkfs)
 {
   return journal_allocator.open(is_mkfs
-  ).safe_then([this](journal_seq_t ret) {
+  ).safe_then([this, store_index](journal_seq_t ret) {
     LOG_PREFIX(RecordSubmitter::open);
     DEBUG("{} register metrics", get_name());
     stats = {};
@@ -400,6 +400,8 @@ RecordSubmitter::open(bool is_mkfs)
     namespace sm = seastar::metrics;
     std::vector<sm::label_instance> label_instances;
     label_instances.push_back(sm::label_instance("submitter", get_name()));
+    label_instances.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+
     metrics.add_group(
       "journal",
       {
index 8f0854482797a248eec78b5c59021a6569be737b..0140c38c6abc93e4c4e88b01d452e8f38feaca97 100644 (file)
@@ -289,7 +289,7 @@ public:
   // open for write, generate the correct print name, and register metrics
   using open_ertr = base_ertr;
   using open_ret = open_ertr::future<journal_seq_t>;
-  open_ret open(bool is_mkfs);
+  open_ret open(unsigned int store_index, bool is_mkfs);
 
   using close_ertr = base_ertr;
   close_ertr::future<> close();
index 5ed8595aebfa2462ca18abf0662782c4825129df..9c4fc61bbce070625eb1c8b0011782b112e5ff65 100644 (file)
@@ -27,9 +27,11 @@ SET_SUBSYS(seastore_journal);
 namespace crimson::os::seastore::journal {
 
 SegmentedJournal::SegmentedJournal(
+  unsigned int store_index,
   SegmentProvider &segment_provider,
   JournalTrimmer &trimmer)
-  : segment_seq_allocator(
+  : store_index(store_index),
+    segment_seq_allocator(
       new SegmentSeqAllocator(segment_type_t::JOURNAL)),
     journal_segment_allocator(&trimmer,
                               data_category_t::METADATA,
@@ -53,13 +55,13 @@ SegmentedJournal::SegmentedJournal(
 SegmentedJournal::open_for_mkfs_ret
 SegmentedJournal::open_for_mkfs()
 {
-  return record_submitter.open(true);
+  return record_submitter.open(store_index, true);
 }
 
 SegmentedJournal::open_for_mount_ret
 SegmentedJournal::open_for_mount()
 {
-  return record_submitter.open(false);
+  return record_submitter.open(store_index, false);
 }
 
 SegmentedJournal::close_ertr::future<> SegmentedJournal::close()
index 6e0cb0ca5dd9ac3f8e0125d127e990479d7f6fc1..ef8d3e0e1bff65f17d25fdcb4e0ead4fa3252a4f 100644 (file)
@@ -26,6 +26,7 @@ namespace crimson::os::seastore::journal {
 class SegmentedJournal : public Journal {
 public:
   SegmentedJournal(
+      unsigned int store_index,
       SegmentProvider &segment_provider,
       JournalTrimmer &trimmer);
   ~SegmentedJournal() {}
@@ -73,6 +74,7 @@ private:
     OrderingHandle &handle,
     on_submission_func_t &&on_submission);
 
+  unsigned int store_index;
   SegmentSeqAllocatorRef segment_seq_allocator;
   SegmentAllocator journal_segment_allocator;
   RecordSubmitter record_submitter;
index d1d4fc0d61d99fe453142f9679ea48b5d42b265b..4fe4aa1490ea4765b679055e9bf01b7f863f7cd4 100644 (file)
@@ -1027,7 +1027,7 @@ BtreeLBAManager::complete_indirect_lba_mapping(
   });
 }
 
-void BtreeLBAManager::register_metrics()
+void BtreeLBAManager::register_metrics(unsigned int store_index)
 {
   LOG_PREFIX(BtreeLBAManager::register_metrics);
   DEBUG("start");
@@ -1039,12 +1039,14 @@ void BtreeLBAManager::register_metrics()
       sm::make_counter(
         "alloc_extents",
         stats.num_alloc_extents,
-        sm::description("total number of lba alloc_extent operations")
+        sm::description("total number of lba alloc_extent operations"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
       sm::make_counter(
         "alloc_extents_iter_nexts",
         stats.num_alloc_extents_iter_nexts,
-        sm::description("total number of iterator next operations during extent allocation")
+        sm::description("total number of iterator next operations during extent allocation"),
+        {sm::label_instance("shard_store_index", std::to_string(store_index))}
       ),
     }
   );
index d2b460ca62ecc026851f840c368f3c82fc72b672..7b37f81a95ef25f7f4f597df45fb03f05e0ec6a3 100644 (file)
@@ -53,10 +53,10 @@ using LBABtree = FixedKVBtree<
  */
 class BtreeLBAManager : public LBAManager {
 public:
-  BtreeLBAManager(Cache &cache)
+  BtreeLBAManager(Cache &cache, unsigned int store_index)
     : cache(cache)
   {
-    register_metrics();
+    register_metrics(store_index);
   }
 
   mkfs_ret mkfs(
@@ -420,7 +420,7 @@ private:
   }
 
   seastar::metrics::metric_group metrics;
-  void register_metrics();
+  void register_metrics(unsigned int store_index);
 
   struct update_mapping_ret_bare_t {
     update_mapping_ret_bare_t()
index 499416353256028aed521e503b0b62aa9c3a0412..fd7118c2dbea29ffdd2f05ff025a475da96b857f 100644 (file)
@@ -6,8 +6,8 @@
 
 namespace crimson::os::seastore {
 
-LBAManagerRef lba::create_lba_manager(Cache &cache) {
-  return LBAManagerRef(new lba::BtreeLBAManager(cache));
+LBAManagerRef lba::create_lba_manager(Cache &cache, unsigned int store_index) {
+  return LBAManagerRef(new lba::BtreeLBAManager(cache, store_index));
 }
 
 }
index 33b9facada13f36153d7f92f3ba829fe4fd59055..ab36e1ba28d16a757e31045ccbb5696e0869c160 100644 (file)
@@ -344,7 +344,7 @@ using LBAManagerRef = std::unique_ptr<LBAManager>;
 
 class Cache;
 namespace lba {
-LBAManagerRef create_lba_manager(Cache &cache);
+LBAManagerRef create_lba_manager(Cache &cache, unsigned int store_index);
 }
 
 }
index ca1a45af9a77431ae68831c2d138406413c31414..f2e960b1ac446c5c956ba8bd0298f69cd48001d1 100644 (file)
@@ -981,6 +981,7 @@ TransactionManagerRef make_transaction_manager(
     Device *primary_device,
     const std::vector<Device*> &secondary_devices,
     shard_stats_t& shard_stats,
+    unsigned int store_index,
     bool is_test)
 {
   rewrite_gen_t hot_tier_generations = crimson::common::get_conf<uint64_t>(
@@ -988,9 +989,9 @@ TransactionManagerRef make_transaction_manager(
   rewrite_gen_t cold_tier_generations = crimson::common::get_conf<uint64_t>(
     "seastore_cold_tier_generations");
   auto epm = std::make_unique<ExtentPlacementManager>(
-    hot_tier_generations, cold_tier_generations);
-  auto cache = std::make_unique<Cache>(*epm);
-  auto lba_manager = lba::create_lba_manager(*cache);
+    hot_tier_generations, cold_tier_generations, store_index);
+  auto cache = std::make_unique<Cache>(*epm, store_index);
+  auto lba_manager = lba::create_lba_manager(*cache, store_index);
   auto sms = std::make_unique<SegmentManagerGroup>();
   auto rbs = std::make_unique<RBMDeviceGroup>();
   auto backref_manager = create_backref_manager(*cache);
@@ -1062,6 +1063,7 @@ TransactionManagerRef make_transaction_manager(
   }
 
   auto journal_trimmer = JournalTrimmerImpl::create(
+      store_index,
       *backref_manager, trimmer_config,
       backend_type, roll_start, roll_size);
 
@@ -1072,6 +1074,7 @@ TransactionManagerRef make_transaction_manager(
 
   if (cold_sms) {
     cold_segment_cleaner = SegmentCleaner::create(
+      store_index,
       cleaner_config,
       std::move(cold_sms),
       *backref_manager,
@@ -1089,6 +1092,7 @@ TransactionManagerRef make_transaction_manager(
 
   if (backend_type == backend_type_t::SEGMENTED) {
     cleaner = SegmentCleaner::create(
+      store_index,
       cleaner_config,
       std::move(sms),
       *backref_manager,
@@ -1102,14 +1106,17 @@ TransactionManagerRef make_transaction_manager(
     }
     segment_cleaner->set_journal_trimmer(*journal_trimmer);
     journal = journal::make_segmented(
+      store_index,
       *segment_cleaner,
       *journal_trimmer);
   } else {
     cleaner = RBMCleaner::create(
+      store_index,
       std::move(rbs),
       *backref_manager,
       cleaner_is_detailed);
     journal = journal::make_circularbounded(
+      store_index,
       *journal_trimmer,
       static_cast<random_block_device::RBMDevice*>(primary_device),
       "");
index 0a4b0ce36ada6a21a6be49c77d0549ad2eb444b7..d1ce74bdf0686404e74619d43dcfb20f50db02a3 100644 (file)
@@ -1615,5 +1615,6 @@ TransactionManagerRef make_transaction_manager(
     Device *primary_device,
     const std::vector<Device*> &secondary_devices,
     shard_stats_t& shard_stats,
+    unsigned int store_index,
     bool is_test);
 }