]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/common/options: add
authorchunmei liu <chunmei.liu@ibm.com>
Thu, 19 Feb 2026 23:19:32 +0000 (15:19 -0800)
committerchunmei liu <chunmei.liu@ibm.com>
Mon, 23 Feb 2026 22:16:48 +0000 (14:16 -0800)
 seastore_require_partition_count_match_reactor_count in crimson.yaml.in

Signed-off-by: chunmei liu <chunmei.liu@ibm.com>
src/common/options/crimson.yaml.in
src/crimson/os/futurized_store.h
src/crimson/os/seastore/seastore.cc
src/crimson/os/seastore/segment_manager.h
src/crimson/osd/pg_map.cc

index ab4c5bcc33db2b5df674b0dfc60bce48750e5a56..bce550643fc48474cf49b9ffaab148658bb50f60 100644 (file)
@@ -269,3 +269,10 @@ options:
         internal checksum feature without using sever CPU then enable if available,
         set to true to disable unconditionally.
   default: true
+- name: seastore_require_partition_count_match_reactor_count
+  type: bool
+  level: advanced
+  default: true
+  desc: disable osd shards changes upon restart.
+  flags:
+  - startup
index 3abc364dac8838ef7d99b49990f652c75d1f600b..6664ac355f2fa324517e7270fe0a9fdd3e2c09dc 100644 (file)
@@ -268,6 +268,11 @@ auto with_store(BackendStore store, Args&&... args)
   constexpr bool is_seastar_future = seastar::is_future<raw_return_type>::value && !is_errorator;
   constexpr bool is_plain = !is_errorator && !is_seastar_future;
   const auto original_core = seastar::this_shard_id();
+  if(crimson::common::get_conf<bool>("seastore_require_partition_count_match_reactor_count")) {
+    if (store.shard_id != GLOBAL_STORE) {
+      ceph_assert(store.shard_id == seastar::this_shard_id());
+    }
+  }
   if (store.shard_id == seastar::this_shard_id() || store.shard_id == GLOBAL_STORE) {
     if constexpr (is_plain) {
       return seastar::make_ready_future<raw_return_type>(
index 04092e89bdd9f492a82fb3bfd9f902d67c6e272e..29330f97760d30cf7b40e3cfab31cf3a17a49066 100644 (file)
@@ -235,6 +235,13 @@ seastar::future<> SeaStore::get_shard_nums()
       });
     INFO("seastore shard nums {}", shard_nums);
     store_shard_nums = shard_nums;
+    if(crimson::common::get_conf<bool>("seastore_require_partition_count_match_reactor_count")) {
+      INFO("seastore doesn't allow shard change");
+      if (store_shard_nums != seastar::smp::count) {
+        INFO("seastore shards {} do not match seastar::smp {}", store_shard_nums, seastar::smp::count);
+        ceph_abort_msg("seastore_require_partition_count_match_reactor_count is true, seastore shards do not match seastar::smp");
+      }
+    }
     co_return;
   }
 }
index 0f1bbaea569ea73a7b02b7e33162c5be36afcdf1..5de78d597a524e873fc25658f3028c6352d082f9 100644 (file)
@@ -57,6 +57,9 @@ struct block_sm_superblock_t {
   }
 
   void validate() const {
+    if(crimson::common::get_conf<bool>("seastore_require_partition_count_match_reactor_count")) {
+      ceph_assert(shard_num == seastar::smp::count);
+    }
     ceph_assert(block_size > 0);
     ceph_assert(segment_size > 0 &&
                 segment_size % block_size == 0);
index 21aa88751ac0f375789be068319f9c651c49daf6..ee81436c3fd7b230ca63e22e30bed40c6360e11a 100644 (file)
@@ -127,38 +127,42 @@ seastar::future<std::pair<core_id_t, unsigned int>> PGShardMapping::get_or_creat
         ceph_assert_always(primary_mapping.core_to_num_pgs.end() != count_iter);
         ++(count_iter->second);
 
-        if (seastar::smp::count > store_shard_nums ) {
-          auto alien_iter = primary_mapping.core_alien_to_num_pgs.find(core_to_update);
-          auto core_iter = std::min_element(
-            alien_iter->second.begin(),
-            alien_iter->second.end(),
-            [](const auto &left, const auto &right) {
-              return left.second < right.second;
-            }
-          );
-          core_iter->second++;
-          core_to_update = core_iter->first;
-        }
-        if (seastar::smp::count >= store_shard_nums) {
-          shard_index_update = 0; // use the first store shard index on this core
+        if(crimson::common::get_conf<bool>("seastore_require_partition_count_match_reactor_count")) {
+          shard_index_update = 0;
         } else {
-          core_shard_iter = primary_mapping.core_shard_to_num_pgs.find(core_to_update);
-          ceph_assert_always(core_shard_iter != primary_mapping.core_shard_to_num_pgs.end());
-          if (shard_index_update == NULL_STORE_INDEX) {
-            // find the store shard index with the least number of pgs
-            // on this core
-            shard_iter = std::min_element(
-              core_shard_iter->second.begin(),
-              core_shard_iter->second.end(),
+          if (seastar::smp::count > store_shard_nums ) {
+            auto alien_iter = primary_mapping.core_alien_to_num_pgs.find(core_to_update);
+            auto core_iter = std::min_element(
+              alien_iter->second.begin(),
+              alien_iter->second.end(),
               [](const auto &left, const auto &right) {
                 return left.second < right.second;
               }
             );
-            shard_index_update = shard_iter->first; //find the store shard index on this core
+            core_iter->second++;
+            core_to_update = core_iter->first;
+          }
+          if (seastar::smp::count >= store_shard_nums) {
+            shard_index_update = 0; // use the first store shard index on this core
           } else {
-            shard_iter = core_shard_iter->second.find(shard_index_update);
+            core_shard_iter = primary_mapping.core_shard_to_num_pgs.find(core_to_update);
+            ceph_assert_always(core_shard_iter != primary_mapping.core_shard_to_num_pgs.end());
+            if (shard_index_update == NULL_STORE_INDEX) {
+              // find the store shard index with the least number of pgs
+              // on this core
+              shard_iter = std::min_element(
+                core_shard_iter->second.begin(),
+                core_shard_iter->second.end(),
+                [](const auto &left, const auto &right) {
+                  return left.second < right.second;
+                }
+              );
+              shard_index_update = shard_iter->first; //find the store shard index on this core
+            }  else {
+              shard_iter = core_shard_iter->second.find(shard_index_update);
+            }
+            ++(shard_iter->second);
           }
-          ++(shard_iter->second);
         }
         [[maybe_unused]] auto [insert_iter, inserted] =
           primary_mapping.pg_to_core.emplace(pgid, std::make_pair(core_to_update, shard_index_update));