return -EBUSY;
}
- int num_source_shards = ((*bucket)->get_info().layout.current_index.layout.normal.num_shards > 0 ? (*bucket)->get_info().layout.current_index.layout.normal.num_shards : 1);
+ int num_source_shards = rgw::current_num_shards((*bucket)->get_info().layout);
if (num_shards <= num_source_shards && !yes_i_really_mean_it) {
cerr << "num shards is less or equal to current shards count" << std::endl
}
const auto& index = bucket->get_info().layout.current_index;
- int max_shards = index.layout.normal.num_shards;
+ const int max_shards = rgw::num_shards(index);
formatter->open_array_section("entries");
}
const auto& index = bucket->get_info().layout.current_index;
- int max_shards = index.layout.normal.num_shards;
+ if (index.layout.type == rgw::BucketIndexType::Indexless) {
+ cerr << "ERROR: indexless bucket has no index to purge" << std::endl;
+ return EINVAL;
+ }
+ const int max_shards = rgw::num_shards(index);
for (int i = 0; i < max_shards; i++) {
RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(store)->getRados());
int shard_id = (bucket->get_info().layout.current_index.layout.normal.num_shards > 0 ? i : -1);
return ret;
}
- int num_source_shards = (bucket->get_info().layout.current_index.layout.normal.num_shards > 0 ? bucket->get_info().layout.current_index.layout.normal.num_shards : 1);
+ int num_source_shards = rgw::current_num_shards(bucket->get_info().layout);
RGWReshard reshard(static_cast<rgw::sal::RadosStore*>(store), dpp());
cls_rgw_reshard_entry entry;
static int purge_bucket_instance(rgw::sal::Store* store, const RGWBucketInfo& bucket_info, const DoutPrefixProvider *dpp)
{
const auto& index = bucket_info.layout.current_index;
- int max_shards = index.layout.normal.num_shards;
+ const int max_shards = num_shards(index);
for (int i = 0; i < max_shards; i++) {
RGWRados::BucketShard bs(static_cast<rgw::sal::RadosStore*>(store)->getRados());
int ret = bs.init(bucket_info.bucket, i, index, nullptr, dpp);
void encode(const BucketLayout& l, bufferlist& bl, uint64_t f=0);
void decode(BucketLayout& l, bufferlist::const_iterator& bl);
+
+inline uint32_t num_shards(const bucket_index_normal_layout& index) {
+ return index.num_shards;
+}
+inline uint32_t num_shards(const bucket_index_layout& index) {
+ ceph_assert(index.type == BucketIndexType::Normal);
+ return num_shards(index.normal);
+}
+inline uint32_t num_shards(const bucket_index_layout_generation& index) {
+ return num_shards(index.layout);
+}
+inline uint32_t current_num_shards(const BucketLayout& layout) {
+ return num_shards(layout.current_index);
+}
+
} // namespace rgw
}
bool need_resharding = false;
- uint32_t num_source_shards =
- (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? bucket_info.layout.current_index.layout.normal.num_shards : 1);
+ uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout);
const uint32_t max_dynamic_shards =
uint32_t(cct->_conf.get_val<uint64_t>("rgw_max_dynamic_shards"));
{
RGWReshard reshard(this->store, dpp);
- uint32_t num_source_shards = (bucket_info.layout.current_index.layout.normal.num_shards > 0 ? bucket_info.layout.current_index.layout.normal.num_shards : 1);
+ uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout);
new_num_shards = std::min(new_num_shards, get_max_bucket_shards());
if (new_num_shards <= num_source_shards) {
bool old_sync_enabled = orig_info.datasync_flag_enabled();
if (old_sync_enabled != new_sync_enabled) {
- int shards_num = info.layout.current_index.layout.normal.num_shards? info.layout.current_index.layout.normal.num_shards : 1;
+ int shards_num = rgw::current_num_shards(info.layout);
int shard_id = info.layout.current_index.layout.normal.num_shards? 0 : -1;
const auto& log_layout = info.layout.logs.back();