]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: don't use other zone's shard count for forwarded CreateBucket requests
authorCasey Bodley <cbodley@redhat.com>
Tue, 26 Jul 2022 15:48:05 +0000 (11:48 -0400)
committerCasey Bodley <cbodley@redhat.com>
Mon, 6 Nov 2023 16:07:23 +0000 (11:07 -0500)
multisite no longer requires shard counts to match between zones. each
zone can configure its own default index shard count, so ignore the shard
counts that come from the metadata master zone

Signed-off-by: Casey Bodley <cbodley@redhat.com>
src/rgw/driver/rados/rgw_bucket.cc
src/rgw/driver/rados/rgw_bucket.h
src/rgw/driver/rados/rgw_rados.cc
src/rgw/driver/rados/rgw_rados.h
src/rgw/driver/rados/rgw_sal_rados.cc

index d6d83eab3dcc698e3c7e49312ec3bf86d6cdbaca..fc16316efa4c1efe33a05937d01eb4feb8006f3d 100644 (file)
@@ -2506,7 +2506,6 @@ int RGWBucketInstanceMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op,
 
 void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout,
                                const RGWZone& zone,
-                               std::optional<uint32_t> shards,
                                std::optional<rgw::BucketIndexType> type) {
   layout.current_index.gen = 0;
   layout.current_index.layout.normal.hash_type = rgw::BucketHashType::Mod;
@@ -2514,9 +2513,7 @@ void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout,
   layout.current_index.layout.type =
     type.value_or(rgw::BucketIndexType::Normal);
 
-  if (shards) {
-    layout.current_index.layout.normal.num_shards = *shards;
-  } else if (cct->_conf->rgw_override_bucket_index_max_shards > 0) {
+  if (cct->_conf->rgw_override_bucket_index_max_shards > 0) {
     layout.current_index.layout.normal.num_shards =
       cct->_conf->rgw_override_bucket_index_max_shards;
   } else {
@@ -2548,7 +2545,7 @@ int RGWMetadataHandlerPut_BucketInstance::put_check(const DoutPrefixProvider *dp
       bci.info.layout = rgw::BucketLayout{};
       init_default_bucket_layout(cct, bci.info.layout,
                                 bihandler->svc.zone->get_zone(),
-                                std::nullopt, std::nullopt);
+                                std::nullopt);
     } else {
       bci.info.layout = old_bci->info.layout;
     }
index ba0d1cf005c8e5aeff9cdb10670094868bf61a02..0110eff334392a75212806b1a7df6df8cbcbb5fb 100644 (file)
@@ -45,7 +45,6 @@ extern bool rgw_bucket_object_check_filter(const std::string& oid);
 
 void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout,
                                const RGWZone& zone,
-                               std::optional<uint32_t> shards,
                                std::optional<rgw::BucketIndexType> type);
 
 struct RGWBucketCompleteInfo {
index b935610f9ad5cd59a00869e578dd8722015d544b..2df4f77b673eb726e185c0a0d35f514a537abdb0 100644 (file)
@@ -2282,8 +2282,7 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket,
                             obj_version *pobjv,
                             obj_version *pep_objv,
                             real_time creation_time,
-                            rgw_bucket *pmaster_bucket,
-                            uint32_t *pmaster_num_shards,
+                            const rgw_bucket* pmaster_bucket,
                            optional_yield y,
                             const DoutPrefixProvider *dpp,
                            bool exclusive)
@@ -2325,9 +2324,6 @@ int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket,
     info.swift_versioning = (!swift_ver_location.empty());
 
     init_default_bucket_layout(cct, info.layout, svc.zone->get_zone(),
-                              pmaster_num_shards ?
-                              std::optional{*pmaster_num_shards} :
-                              std::nullopt,
                               rule_info.index_type);
 
     info.requester_pays = false;
index 2dca3cfaad4e35efa3d7ef716afe618200c658df..d312e7355e227704fc30105d5134490c560995b7 100644 (file)
@@ -638,8 +638,7 @@ public:
                    obj_version *pobjv,
                    obj_version *pep_objv,
                    ceph::real_time creation_time,
-                   rgw_bucket *master_bucket,
-                   uint32_t *master_num_shards,
+                   const rgw_bucket *master_bucket,
                    optional_yield y,
                     const DoutPrefixProvider *dpp,
                    bool exclusive = true);
index fb684a921e6d8b0d2e56d4cc306acb4592d05d7c..f085afb2beeda8e2884a7173d74ac12f75c33494 100644 (file)
@@ -186,7 +186,6 @@ int RadosUser::create_bucket(const DoutPrefixProvider* dpp,
   bufferlist in_data;
   RGWBucketInfo master_info;
   rgw_bucket* pmaster_bucket;
-  uint32_t* pmaster_num_shards;
   real_time creation_time;
   std::unique_ptr<Bucket> bucket;
   obj_version objv,* pobjv = NULL;
@@ -232,14 +231,12 @@ int RadosUser::create_bucket(const DoutPrefixProvider* dpp,
     ldpp_dout(dpp, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl;
     pmaster_bucket= &master_info.bucket;
     creation_time = master_info.creation_time;
-    pmaster_num_shards = &master_info.layout.current_index.layout.normal.num_shards;
     pobjv = &objv;
     if (master_info.obj_lock_enabled()) {
       info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
     }
   } else {
     pmaster_bucket = NULL;
-    pmaster_num_shards = NULL;
     if (obj_lock_enabled)
       info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
   }
@@ -264,8 +261,7 @@ int RadosUser::create_bucket(const DoutPrefixProvider* dpp,
     ret = store->getRados()->create_bucket(this->get_info(), bucket->get_key(),
                                    zid, placement_rule, swift_ver_location, pquota_info,
                                    attrs, info, pobjv, &ep_objv, creation_time,
-                                   pmaster_bucket, pmaster_num_shards, y, dpp,
-                                   exclusive);
+                                   pmaster_bucket, y, dpp, exclusive);
     if (ret == -EEXIST) {
       *existed = true;
       /* bucket already existed, might have raced with another bucket creation,