formatter->open_array_section("objects");
constexpr uint32_t NUM_ENTRIES = 1000;
- uint16_t attempt = 1;
+ uint16_t expansion_factor = 1;
while (is_truncated) {
map<string, rgw_bucket_dir_entry> result;
int r =
store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD, marker,
- prefix, NUM_ENTRIES, true, attempt,
+ prefix, NUM_ENTRIES, true, expansion_factor,
result, &is_truncated, &marker,
bucket_object_check_filter);
if (r < 0 && r != -ENOENT) {
}
if (result.size() < NUM_ENTRIES / 8) {
- ++attempt;
- } else if (result.size() > NUM_ENTRIES * 7 / 8 && attempt > 1) {
- --attempt;
+ ++expansion_factor;
+ } else if (result.size() > NUM_ENTRIES * 7 / 8 &&
+ expansion_factor > 1) {
+ --expansion_factor;
}
map<string, rgw_bucket_dir_entry>::iterator iter;
#define BUCKET_TAG_TIMEOUT 30
+// default number of entries to list with each bucket listing call
+// (use marker to bridge between calls)
+static constexpr size_t listing_max_entries = 1000;
+
static RGWMetadataHandler *bucket_meta_handler = NULL;
static RGWMetadataHandler *bucket_instance_meta_handler = NULL;
Formatter *formatter = flusher.get_formatter();
formatter->open_object_section("objects");
- constexpr uint32_t NUM_ENTRIES = 1000;
- uint16_t attempt = 1;
+ uint16_t expansion_factor = 1;
while (is_truncated) {
map<string, rgw_bucket_dir_entry> result;
int r = store->cls_bucket_list_ordered(bucket_info, RGW_NO_SHARD,
- marker, prefix, NUM_ENTRIES, true, attempt,
+ marker, prefix,
+ listing_max_entries, true,
+ expansion_factor,
result, &is_truncated, &marker,
bucket_object_check_filter);
if (r == -ENOENT) {
set_err_msg(err_msg, "ERROR: failed operation r=" + cpp_strerror(-r));
}
- if (result.size() < NUM_ENTRIES / 8) {
- ++attempt;
- } else if (result.size() > NUM_ENTRIES * 7 / 8 && attempt > 1) {
- --attempt;
+ if (result.size() < listing_max_entries / 8) {
+ ++expansion_factor;
+ } else if (result.size() > listing_max_entries * 7 / 8 &&
+ expansion_factor > 1) {
+ --expansion_factor;
}
dump_bucket_index(result, formatter);
for (auto eiter = ent_map.begin(); eiter != ent_map.end(); ++eiter) {
rgw_bucket_dir_entry& entry = eiter->second;
rgw_obj_index_key index_key = entry.key;
-
rgw_obj_key obj(index_key);
+ ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ <<
+ " considering entry " << entry.key << dendl;
+
/* note that parse_raw_oid() here will not set the correct
* object's instance, as rgw_obj_index_key encodes that
* separately. We don't need to set the instance because it's
continue;
}
- bool check_ns = (obj.ns == params.ns);
+ bool matched_ns = (obj.ns == params.ns);
if (!params.list_versions && !entry.is_visible()) {
continue;
}
- if (params.enforce_ns && !check_ns) {
+ if (params.enforce_ns && !matched_ns) {
if (!params.ns.empty()) {
/* we've iterated past the namespace we're searching -- done now */
truncated = false;
const string& prefix,
const uint32_t num_entries,
const bool list_versions,
- const uint16_t attempt,
+ const uint16_t expansion_factor,
map<string, rgw_bucket_dir_entry>& m,
bool *is_truncated,
rgw_obj_index_key *last_entry,
bool (*force_check_filter)(const string& name))
{
+ /* expansion_factor allows the number of entries to read to grow
+ * exponentially; this is used when earlier reads are producing too
+ * few results, perhaps due to filtering or to a series of
+ * namespaced entries */
+
ldout(cct, 10) << "RGWRados::" << __func__ << ": " << bucket_info.bucket <<
" start_after=\"" << start_after.name <<
"[" << start_after.instance <<
"]\", prefix=\"" << prefix <<
"\" num_entries=" << num_entries <<
", list_versions=" << list_versions <<
- ", attempt=" << attempt << dendl;
+ ", expansion_factor=" << expansion_factor << dendl;
librados::IoCtx index_ctx;
// key - oid (for different shards if there is any)
const uint32_t shard_count = oids.size();
uint32_t num_entries_per_shard;
- if (attempt == 0) {
+ if (expansion_factor == 0) {
num_entries_per_shard =
calc_ordered_bucket_list_per_shard(num_entries, shard_count);
- } else if (attempt <= 11) {
+ } else if (expansion_factor <= 11) {
// we'll max out the exponential multiplication factor at 1024 (2<<10)
num_entries_per_shard =
std::min(num_entries,
- (uint32_t(1 << (attempt - 1)) *
+ (uint32_t(1 << (expansion_factor - 1)) *
calc_ordered_bucket_list_per_shard(num_entries, shard_count)));
} else {
num_entries_per_shard = num_entries;
return r;
}
- // Create a list of iterators that are used to iterate each shard
+ // create a list of iterators that are used to iterate each shard
vector<map<string, struct rgw_bucket_dir_entry>::iterator> vcurrents;
vector<map<string, struct rgw_bucket_dir_entry>::iterator> vends;
vector<string> vnames;