With dynamic bucket index resharding, when the average number of
objects per shard exceeds the configured value, that bucket is
scheduled for reshard. That bucket may receive more new objects before
the resharding takes place. As a result, the existing code
re-calculates the number of new shards just prior to resharding,
rather than waste a resharding opportunity with too low a value.
The same holds true for a user-scheduled resharding.
A user reported confusion that the number reported in `radosgw-admin
reshard list` wasn't the number that the reshard operation ultimately
used. This commit makes it clear that the new number of shards is
"tentative". And test_rgw_reshard.py is updated to reflect this
altered output.
Additionally this commit adds some modernization and efficiency to the
"reshard list" subcommand.
Signed-off-by: J. Eric Ivancich <ivancich@redhat.com>
(cherry picked from commit
aa0071ce8b8594b92c0bed2be7a9bf35bfff8cac)
json_op = json.loads(cmd)
log.debug('bucket name %s', json_op[0]['bucket_name'])
assert json_op[0]['bucket_name'] == BUCKET_NAME1
- assert json_op[0]['new_num_shards'] == num_shards_expected
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard-process','reshard','','process bucket resharding','succeeds'
log.debug(' test: reshard process')
json_op = json.loads(cmd)
log.debug('bucket name %s', json_op[0]['bucket_name'])
assert json_op[0]['bucket_name'] == BUCKET_NAME1
- assert json_op[0]['new_num_shards'] == num_shards_expected
+ assert json_op[0]['tentative_new_num_shards'] == num_shards_expected
# TESTCASE 'reshard process ,'reshard','process','reshard non empty bucket','succeeds'
log.debug(' test: reshard process non empty bucket')
encode_json("bucket_id", bucket_id, f);
encode_json("new_instance_id", new_instance_id, f);
encode_json("old_num_shards", old_num_shards, f);
- encode_json("new_num_shards", new_num_shards, f);
-
+ encode_json("tentative_new_num_shards", new_num_shards, f);
}
void cls_rgw_reshard_entry::generate_test_instances(list<cls_rgw_reshard_entry*>& ls)
}
if (opt_cmd == OPT::RESHARD_LIST) {
- list<cls_rgw_reshard_entry> entries;
int ret;
int count = 0;
if (max_entries < 0) {
formatter->open_array_section("reshard");
for (int i = 0; i < num_logshards; i++) {
bool is_truncated = true;
- string marker;
+ std::string marker;
do {
- entries.clear();
+ std::list<cls_rgw_reshard_entry> entries;
ret = reshard.list(dpp(), i, marker, max_entries - count, entries, &is_truncated);
if (ret < 0) {
cerr << "Error listing resharding buckets: " << cpp_strerror(-ret) << std::endl;
return ret;
}
- for (auto iter=entries.begin(); iter != entries.end(); ++iter) {
- cls_rgw_reshard_entry& entry = *iter;
+ for (const auto& entry : entries) {
encode_json("entry", entry, formatter.get());
- entry.get_key(&marker);
}
+ if (is_truncated) {
+ entries.crbegin()->get_key(&marker); // last entry's key becomes marker
+ }
count += entries.size();
formatter->flush(cout);
} while (is_truncated && count < max_entries);
formatter->close_section();
formatter->flush(cout);
+
return 0;
}