From 0f1726d765b1268229b205f7067a5e0547bc41cb Mon Sep 17 00:00:00 2001 From: "J. Eric Ivancich" Date: Tue, 14 May 2024 15:09:03 -0400 Subject: [PATCH] rgw: add shard reduction ability to dynamic resharding Previously, dynamic resharding could only *increase* the number of bucket index shards for a given bucket. This adds the ability to also *reduce* the number of shards. So in addition the existing 100,000 entries (current default value) per shard trigger for an increase, there's a new trigger of 10,000 entries per shard for a decrease. However, for buckets with object-counts that go up and down regularly, we don't want to keep resharding up and down to chase the number of objects. So for shard reduction to take place there's also a time delay (default 5 days). Once the entry on the reshard queue (log) is added for reduction, processing will not result in a reshard reduction within this delay period as the queue is processed. Only when the reshard entry is processed after this delay can it perform the shard reduction. However, if at any point between the time the shard reduction entry is added to the queue and after the delay, if the entry is processed and there are *not* few enough entries to trigger a shard reduction, the entry on the reshard queue entry will be discarded. So using the defaults, this effectively means the bucket must have few enough objects for a shard reduction for 5 consecutive days before the reshard will take place. Signed-off-by: J. Eric Ivancich --- src/common/options/rgw.yaml.in | 30 ++++ src/rgw/driver/rados/rgw_rados.cc | 94 ++++++++---- src/rgw/driver/rados/rgw_rados.h | 6 + src/rgw/driver/rados/rgw_reshard.cc | 226 ++++++++++++++++++++++++---- src/rgw/driver/rados/rgw_reshard.h | 49 +++--- src/rgw/rgw_quota.cc | 25 +-- src/rgw/rgw_quota.h | 4 - src/test/rgw/test_rgw_reshard.cc | 26 ++-- 8 files changed, 329 insertions(+), 131 deletions(-) diff --git a/src/common/options/rgw.yaml.in b/src/common/options/rgw.yaml.in index 3bb4942a9e3..d34a60ba4d7 100644 --- a/src/common/options/rgw.yaml.in +++ b/src/common/options/rgw.yaml.in @@ -3236,6 +3236,36 @@ options: see_also: - rgw_max_objs_per_shard - rgw_max_dynamic_shards +- name: rgw_dynamic_resharding_may_reduce + type: bool + level: advanced + desc: Whether dynamic resharding can reduce the number of shards + long_desc: If true, RGW's dynamic resharding ability is allowed to + reduce the number of shards if it appears there are too many. + default: true + services: + - rgw + see_also: + - rgw_dynamic_resharding +- name: rgw_dynamic_resharding_reduction_wait + type: uint + level: advanced + desc: Number of hours to delay bucket index shard reduction. + long_desc: >- + In order to avoid resharding buckets with object + counts that fluctuate up and down regularly, we implemement a delay + between noting a shard reduction might be appropriate and when it's + actually done. This allows us to cancel the reshard operation if the + number of object significantly increases during this delay. + WARNING: Setting this value too low could result in significantly reduced + cluster performance. + default: 120 + min: 0 + services: + - rgw + see_also: + - rgw_dynamic_resharding + - rgw_dynamic_resharding_may_reduce - name: rgw_max_objs_per_shard type: uint level: basic diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index c30caf3f533..bbfedf8ca9a 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -10324,66 +10324,96 @@ int RGWRados::cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBuck return 0; } + +// uses information that the store has easy access to transition to the shard calculatoin logic +void RGWRados::calculate_preferred_shards(const DoutPrefixProvider* dpp, + const uint64_t num_objs, + const uint32_t num_source_shards, + bool& need_resharding, + uint32_t* suggested_num_shards) +{ + const uint32_t max_dynamic_shards = + uint32_t(cct->_conf.get_val("rgw_max_dynamic_shards")); + const uint64_t max_objs_per_shard = + cct->_conf.get_val("rgw_max_objs_per_shard"); + const bool is_multisite = svc.zone->need_to_log_data(); + + RGWBucketReshard::calculate_preferred_shards(dpp, + max_dynamic_shards, + max_objs_per_shard, + is_multisite, + num_objs, + num_source_shards, + need_resharding, + suggested_num_shards); +} + + +// Check whether a bucket is a candidate for dynamic resharding and if +// so, add it to the reshard queue (log). +// +// We implement dynamic reshard reduction (where the number of shards +// can be reduced) in the following manner. In addition to the maximum +// number of desired entries per shard, we now set a minimum int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info, uint64_t num_objs, - const DoutPrefixProvider *dpp, optional_yield y) + const DoutPrefixProvider* dpp, optional_yield y) { if (! cct->_conf.get_val("rgw_dynamic_resharding")) { - return 0; + return 0; } if (! is_layout_reshardable(bucket_info.layout)) { return 0; } - bool need_resharding = false; - uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout); - const uint32_t max_dynamic_shards = - uint32_t(cct->_conf.get_val("rgw_max_dynamic_shards")); - - if (num_source_shards >= max_dynamic_shards) { - return 0; - } + // TODO: consider per-bucket sync policy here? + bool need_resharding = false; uint32_t suggested_num_shards = 0; - const uint64_t max_objs_per_shard = - cct->_conf.get_val("rgw_max_objs_per_shard"); + const uint32_t num_source_shards = + rgw::current_num_shards(bucket_info.layout); - // TODO: consider per-bucket sync policy here? - const bool is_multisite = svc.zone->need_to_log_data(); - - quota_handler->check_bucket_shards(dpp, max_objs_per_shard, num_source_shards, - num_objs, is_multisite, need_resharding, - &suggested_num_shards); + calculate_preferred_shards(dpp, num_objs, num_source_shards, + need_resharding, &suggested_num_shards); if (! need_resharding) { return 0; } - const uint32_t final_num_shards = - RGWBucketReshard::get_preferred_shards(suggested_num_shards, - max_dynamic_shards); // final verification, so we don't reduce number of shards - if (final_num_shards <= num_source_shards) { + const bool may_reduce = + uint32_t(cct->_conf.get_val("rgw_dynamic_resharding_may_reduce")); + if (! may_reduce && suggested_num_shards <= num_source_shards) { return 0; } - ldpp_dout(dpp, 1) << "RGWRados::" << __func__ << " bucket " << bucket_info.bucket.name << - " needs resharding; current num shards " << bucket_info.layout.current_index.layout.normal.num_shards << - "; new num shards " << final_num_shards << " (suggested " << - suggested_num_shards << ")" << dendl; + ldpp_dout(dpp, 1) << "RGWRados::" << __func__ << + " bucket " << bucket_info.bucket.name << + " needs resharding; current num shards " << num_source_shards << + "; new num shards " << suggested_num_shards << dendl; - return add_bucket_to_reshard(dpp, bucket_info, final_num_shards, y); + return add_bucket_to_reshard(dpp, bucket_info, suggested_num_shards, y); } -int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards, optional_yield y) +int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, + const RGWBucketInfo& bucket_info, + uint32_t new_num_shards, + optional_yield y) { RGWReshard reshard(this->driver, dpp); - uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout); - + const uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout); + const bool may_reduce = + uint32_t(cct->_conf.get_val("rgw_dynamic_resharding_may_reduce")); new_num_shards = std::min(new_num_shards, get_max_bucket_shards()); - if (new_num_shards <= num_source_shards) { - ldpp_dout(dpp, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl; + + if ((! may_reduce && new_num_shards < num_source_shards) || + new_num_shards == num_source_shards) { + ldpp_dout(dpp, 10) << "WARNING: " << __func__ << + ": rejecting resharding request for bucket name=" << + bucket_info.bucket.name << ", shard count=" << num_source_shards << + ", new shard count=" << new_num_shards << + ", rgw_dynamic_resharding_may_reduce=" << may_reduce << dendl; return 0; } diff --git a/src/rgw/driver/rados/rgw_rados.h b/src/rgw/driver/rados/rgw_rados.h index 481a94a140d..10a3b3ad02f 100644 --- a/src/rgw/driver/rados/rgw_rados.h +++ b/src/rgw/driver/rados/rgw_rados.h @@ -1586,6 +1586,12 @@ public: RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false); + void calculate_preferred_shards(const DoutPrefixProvider* dpp, + const uint64_t num_objs, + const uint32_t current_shard_count, + bool& need_resharding, + uint32_t* suggested_num_shard_count = nullptr); + int check_bucket_shards(const RGWBucketInfo& bucket_info, uint64_t num_objs, const DoutPrefixProvider *dpp, optional_yield y); diff --git a/src/rgw/driver/rados/rgw_reshard.cc b/src/rgw/driver/rados/rgw_reshard.cc index 62f7e6ff7b2..b6b5ca3c4ad 100644 --- a/src/rgw/driver/rados/rgw_reshard.cc +++ b/src/rgw/driver/rados/rgw_reshard.cc @@ -3,6 +3,7 @@ #include #include +#include #include "rgw_zone.h" #include "driver/rados/rgw_bucket.h" @@ -30,6 +31,10 @@ const string reshard_oid_prefix = "reshard."; const string reshard_lock_name = "reshard_process"; const string bucket_instance_lock_name = "bucket_instance_lock"; +// key reduction values; NB maybe expose some in options +constexpr uint64_t min_objs_per_shard = 10000; +constexpr uint32_t min_dynamic_shards = 11; + /* All primes up to 2000 used to attempt to make dynamic sharding use * a prime numbers of shards. Note: this list also includes 1 for when * 1 shard is the most appropriate, even though 1 is not prime. @@ -61,6 +66,90 @@ const std::initializer_list RGWBucketReshard::reshard_primes = { 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999 }; + +uint32_t RGWBucketReshard::get_prime_shard_count( + uint32_t shard_count, + uint32_t max_dynamic_shards, + uint32_t min_dynamic_shards) +{ + uint32_t prime_shard_count = + get_prime_shards_greater_or_equal(shard_count); + + // if we cannot find a larger prime number, then just use what was + // passed in + if (! prime_shard_count) { + prime_shard_count = shard_count; + } + + // keep within min/max bounds + return std::min(max_dynamic_shards, + std::max(min_dynamic_shards, prime_shard_count)); +} + + +// Given the current number of shards and objects (entries), we +// calculate whether resharding is called for and if so, how many +// shards we should have given a variety of considerations to be used +// as part of the dynamic resharding capability. +void RGWBucketReshard::calculate_preferred_shards( + const DoutPrefixProvider* dpp, + const uint32_t max_dynamic_shards, + const uint64_t max_objs_per_shard, +// const uint64_t min_objs_per_shard, + const bool is_multisite, +// const uint64_t min_dynamic_shards, FIX THIS!!!!!! + const uint64_t num_objs, + const uint32_t current_num_shards, + bool& need_resharding, + uint32_t* suggested_num_shards, + bool prefer_prime) +{ + constexpr uint32_t regular_multiplier = 2; + // to reduce number of reshards in multisite, increase number of shards more aggressively + constexpr uint32_t multisite_multiplier = 8; + const char* verb = "n/a"; + + if (current_num_shards < max_dynamic_shards && + num_objs > current_num_shards * max_objs_per_shard) { + need_resharding = true; + verb = "expansion"; + } else if (current_num_shards > min_dynamic_shards && + num_objs < current_num_shards * min_objs_per_shard) { + need_resharding = true; + verb = "reduction"; + } else { + need_resharding = false; + return; + } + + const uint32_t multiplier = + is_multisite ? multisite_multiplier : regular_multiplier; + uint32_t calculated_num_shards = + std::max(min_dynamic_shards, + std::min(max_dynamic_shards, + (uint32_t) (num_objs * multiplier / max_objs_per_shard))); + if (calculated_num_shards == current_num_shards) { + need_resharding = false; + return; + } + + if (prefer_prime) { + calculated_num_shards = get_prime_shard_count( + calculated_num_shards, max_dynamic_shards, min_dynamic_shards); + } + + ldpp_dout(dpp, 20) << __func__ << ": reshard " << verb << + " suggested; current average (objects/shard) is " << + float(num_objs) / current_num_shards << ", which is not within " << + min_objs_per_shard << " and " << max_objs_per_shard << + "; suggesting " << calculated_num_shards << " shards" << dendl; + + if (suggested_num_shards) { + *suggested_num_shards = calculated_num_shards; + } +} // RGWBucketReshard::check_bucket_shards + + class BucketReshardShard { rgw::sal::RadosStore* store; const RGWBucketInfo& bucket_info; @@ -989,8 +1078,11 @@ int RGWBucketReshard::execute(int num_shards, return ret; } - ldpp_dout(dpp, 1) << __func__ << " INFO: reshard of bucket \"" - << bucket_info.bucket.name << "\" completed successfully" << dendl; + ldpp_dout(dpp, 1) << __func__ << " INFO: reshard of bucket \"" << + bucket_info.bucket.name << "\" from " << + rgw::num_shards(bucket_info.layout.current_index) << " shards to " << num_shards << + " shards completed successfully" << dendl; + return 0; } // execute @@ -1204,7 +1296,9 @@ void RGWReshardWait::stop() } int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry, - int max_entries, const DoutPrefixProvider *dpp, optional_yield y) + int max_entries, + const DoutPrefixProvider* dpp, + optional_yield y) { ldpp_dout(dpp, 20) << __func__ << " resharding " << entry.bucket_name << dendl; @@ -1213,6 +1307,29 @@ int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry, RGWBucketInfo bucket_info; std::map bucket_attrs; + // removes the entry and logs a message + auto clean_up = [this, &dpp, &entry, &y](const std::string_view& reason = "") -> int { + int ret = remove(dpp, entry, y); + if (ret < 0) { + ldpp_dout(dpp, 0) << + "ERROR removing bucket \"" << entry.bucket_name << + "\" from resharding queue, because " << + (reason.empty() ? "resharding complete" : reason) << + "; error is " << + cpp_strerror(-ret) << dendl; + return ret; + } + + if (! reason.empty()) { + ldpp_dout(dpp, 10) << + "WARNING: processing reshard reduction on bucket \"" << + entry.bucket_name << "\", but cancelling because " << + reason << dendl; + } + + return 0; + }; + int ret = store->getRados()->get_bucket_info(store->svc(), entry.tenant, entry.bucket_name, @@ -1228,38 +1345,87 @@ int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry, // any error other than ENOENT will abort return ret; } + + // we've encountered a reshard queue entry for an apparently + // non-existent bucket; let's try to recover by cleaning up + return clean_up("bucket does not currently exist"); } else { - ldpp_dout(dpp, 0) << __func__ << - ": Bucket: " << entry.bucket_name << - " already resharded by someone, skipping " << dendl; + return clean_up("bucket already resharded"); } + } - // we've encountered a reshard queue entry for an apparently - // non-existent bucket; let's try to recover by cleaning up - ldpp_dout(dpp, 0) << __func__ << - ": removing reshard queue entry for a resharded or non-existent bucket" << - entry.bucket_name << dendl; + // if reshard reduction, perform extra sanity checks in part to + // prevent chasing constantly changing entry count + if (entry.new_num_shards < entry.old_num_shards) { + const bool may_reduce = + store->ctx()->_conf.get_val("rgw_dynamic_resharding_may_reduce"); + if (! may_reduce) { + return clean_up("current configuration does not allow reshard reduction"); + } - ret = remove(dpp, entry, y); - if (ret < 0) { - ldpp_dout(dpp, 0) << __func__ << - ": Error removing non-existent bucket " << - entry.bucket_name << " from resharding queue: " << - cpp_strerror(-ret) << dendl; - return ret; + // determine how many entries there are in the bucket index + std::map stats; + ret = store->getRados()->get_bucket_stats(dpp, bucket_info, + bucket_info.layout.current_index, + -1, nullptr, nullptr, stats, nullptr, nullptr); + + // determine current number of bucket entries across shards + uint64_t num_entries = 0; + for (const auto& s : stats) { + num_entries += s.second.num_objects; } - // we cleaned up, move on to the next entry - return 0; + const uint32_t current_shard_count = + rgw::num_shards(bucket_info.get_current_index().layout.normal); + + bool needs_resharding { false }; + uint32_t suggested_shard_count { 0 }; + // calling this rados function determines various rados values + // needed to perform the calculation before calling + // calculating_preferred_shards() in this class + store->getRados()->calculate_preferred_shards( + dpp, num_entries, current_shard_count, + needs_resharding, &suggested_shard_count); + + // if we no longer need resharding or currently need to expand + // number of shards, drop this request + if (! needs_resharding || suggested_shard_count > current_shard_count) { + return clean_up("reshard reduction no longer appropriate"); + } + + // see if it's been long enough since this reshard queue entry was + // added to actually do the reshard reduction + ceph::real_time when_queued = entry.time; + ceph::real_time now = real_clock::now(); + + // convert hours to seconds + const uint32_t reshard_reduction_wait_period_hours = + uint32_t(store->ctx()->_conf.get_val("rgw_dynamic_resharding_reduction_wait")); + + auto timespan = + ceph::make_timespan(reshard_reduction_wait_period_hours * 60 * 60); + // if (now < when_queued + reshard_reduction_wait_period) { + if (now < when_queued + timespan) { + // skip for now + ldpp_dout(dpp, 20) << __func__ << + ": INFO: reshard reduction for bucket \"" << + entry.bucket_name << "\" will not proceed until " << + (when_queued + timespan) << dendl; + + return 0; + } + + // all checks passed; we can drop through and proceed } if (!RGWBucketReshard::should_zone_reshard_now(bucket_info, store->svc()->zone)) { - ldpp_dout(dpp, 1) << "Bucket " << bucket_info.bucket << " is not " - "eligible for resharding until peer zones finish syncing one " - "or more of its old log generations" << dendl; - return remove(dpp, entry, y); + return clean_up("bucket not eligible for resharding until peer " + "zones finish syncing one or more of its old log " + "generations"); } + // all checkes passed; we can reshard... + RGWBucketReshard br(store, bucket_info, bucket_attrs, nullptr); ReshardFaultInjector f; // no fault injected @@ -1276,15 +1442,9 @@ int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry, " removing reshard queue entry for bucket " << entry.bucket_name << dendl; - ret = remove(dpp, entry, y); - if (ret < 0) { - ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " << - entry.bucket_name << " from resharding queue: " << - cpp_strerror(-ret) << dendl; - return ret; - } - return 0; -} + return clean_up(); +} // RGWReshard::process_entry + int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp, optional_yield y) { diff --git a/src/rgw/driver/rados/rgw_reshard.h b/src/rgw/driver/rados/rgw_reshard.h index 8e37defa1db..0ff01308a64 100644 --- a/src/rgw/driver/rados/rgw_reshard.h +++ b/src/rgw/driver/rados/rgw_reshard.h @@ -24,6 +24,8 @@ class RGWReshard; + + namespace rgw { namespace sal { class RadosStore; } } @@ -114,6 +116,10 @@ public: return *std::crbegin(reshard_primes); } + static uint32_t get_min_prime_shards() { + return *std::cbegin(reshard_primes); + } + // returns the prime in our list less than or equal to the // parameter; the lowest value that can be returned is 1 static uint32_t get_prime_shards_less_or_equal(uint32_t requested_shards) { @@ -142,37 +148,28 @@ public: // returns a preferred number of shards given a calculated number of // shards based on max_dynamic_shards and the list of prime values - static uint32_t get_preferred_shards(uint32_t suggested_shards, - uint32_t max_dynamic_shards) { - - // use a prime if max is within our prime range, otherwise use - // specified max - const uint32_t absolute_max = - max_dynamic_shards >= get_max_prime_shards() ? - max_dynamic_shards : - get_prime_shards_less_or_equal(max_dynamic_shards); - - // if we can use a prime number, use it, otherwise use suggested; - // note get_prime_shards_greater_or_equal will return 0 if no prime in - // prime range - const uint32_t prime_ish_num_shards = - std::max(get_prime_shards_greater_or_equal(suggested_shards), - suggested_shards); - - // dynamic sharding cannot reshard more than defined maximum - const uint32_t final_num_shards = - std::min(prime_ish_num_shards, absolute_max); - - return final_num_shards; - } + static uint32_t get_prime_shard_count(uint32_t suggested_shards, + uint32_t max_dynamic_shards, + uint32_t min_dynamic_shards); + + static void calculate_preferred_shards(const DoutPrefixProvider* dpp, + const uint32_t max_dynamic_shards, + const uint64_t max_objs_per_shard, + const bool is_multisite, + const uint64_t num_objs, + const uint32_t current_shard_count, + bool& need_resharding, + uint32_t* suggested_shard_count, + bool prefer_prime = true); const std::map& get_bucket_attrs() const { return bucket_attrs; } - // for multisite, the RGWBucketInfo keeps a history of old log generations - // until all peers are done with them. prevent this log history from growing - // too large by refusing to reshard the bucket until the old logs get trimmed + // for multisite, the RGWBucketInfo keeps a history of old log + // generations until all peers are done with them. prevent this log + // history from growing too large by refusing to reshard the bucket + // until the old logs get trimmed static constexpr size_t max_bilog_history = 4; static bool should_zone_reshard_now(const RGWBucketInfo& bucket, diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc index 01f5c0cffdf..af7d9734627 100644 --- a/src/rgw/rgw_quota.cc +++ b/src/rgw/rgw_quota.cc @@ -958,30 +958,7 @@ public: bucket_stats_cache.adjust_stats(owner, bucket, obj_delta, added_bytes, removed_bytes); owner_stats_cache.adjust_stats(owner, bucket, obj_delta, added_bytes, removed_bytes); } - - void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard, - uint64_t num_shards, uint64_t num_objs, bool is_multisite, - bool& need_resharding, uint32_t *suggested_num_shards) override - { - if (num_objs > num_shards * max_objs_per_shard) { - ldpp_dout(dpp, 0) << __func__ << ": resharding needed: stats.num_objects=" << num_objs - << " shard max_objects=" << max_objs_per_shard * num_shards << dendl; - need_resharding = true; - if (suggested_num_shards) { - uint32_t obj_multiplier = 2; - if (is_multisite) { - // if we're maintaining bilogs for multisite, reshards are significantly - // more expensive. scale up the shard count much faster to minimize the - // number of reshard events during a write workload - obj_multiplier = 8; - } - *suggested_num_shards = num_objs * obj_multiplier / max_objs_per_shard; - } - } else { - need_resharding = false; - } - } -}; +}; // class RGWQuotaHandlerImpl RGWQuotaHandler *RGWQuotaHandler::generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads) diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h index 7332c522d2b..4d5e04d528b 100644 --- a/src/rgw/rgw_quota.h +++ b/src/rgw/rgw_quota.h @@ -35,10 +35,6 @@ public: const rgw_bucket& bucket, const RGWQuota& quota, uint64_t num_objs, uint64_t size, optional_yield y) = 0; - virtual void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard, - uint64_t num_shards, uint64_t num_objs, bool is_multisite, - bool& need_resharding, uint32_t *suggested_num_shards) = 0; - virtual void update_stats(const rgw_owner& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0; static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads); diff --git a/src/test/rgw/test_rgw_reshard.cc b/src/test/rgw/test_rgw_reshard.cc index da41b967f05..3513e644aa8 100644 --- a/src/test/rgw/test_rgw_reshard.cc +++ b/src/test/rgw/test_rgw_reshard.cc @@ -49,20 +49,22 @@ TEST(TestRGWReshard, dynamic_reshard_shard_count) "821 is prime"; // tests when max dynamic shards is equal to end of prime list - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 1999)); - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 1999)); - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2000, 1999)); - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2001, 1999)); + ASSERT_EQ(1999u, RGWBucketReshard::get_prime_shard_count(1998, 1999, 11)); + ASSERT_EQ(1999u, RGWBucketReshard::get_prime_shard_count(1999, 1999, 11)); + ASSERT_EQ(1999u, RGWBucketReshard::get_prime_shard_count(2000, 1999, 11)); // tests when max dynamic shards is above end of prime list - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 3000)); - ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 3000)); - ASSERT_EQ(2000u, RGWBucketReshard::get_preferred_shards(2000, 3000)); - ASSERT_EQ(2001u, RGWBucketReshard::get_preferred_shards(2001, 3000)); + ASSERT_EQ(1999u, RGWBucketReshard::get_prime_shard_count(1998, 3000, 11)); + ASSERT_EQ(1999u, RGWBucketReshard::get_prime_shard_count(1999, 3000, 11)); + ASSERT_EQ(2000u, RGWBucketReshard::get_prime_shard_count(2000, 3000, 11)); + ASSERT_EQ(2001u, RGWBucketReshard::get_prime_shard_count(2001, 3000, 11)); // tests when max dynamic shards is below end of prime list - ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1998, 500)); - ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1999, 500)); - ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2000, 500)); - ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2001, 500)); + ASSERT_EQ(500u, RGWBucketReshard::get_prime_shard_count(1998, 500, 11)); + ASSERT_EQ(500u, RGWBucketReshard::get_prime_shard_count(2001, 500, 11)); + + // tests when max dynamic shards is below end of prime list + ASSERT_EQ(499u, RGWBucketReshard::get_prime_shard_count(498, 1999, 499)); + ASSERT_EQ(499u, RGWBucketReshard::get_prime_shard_count(499, 1999, 499)); + ASSERT_EQ(503u, RGWBucketReshard::get_prime_shard_count(500, 1999, 499)); } -- 2.39.5