There was an issue when limit was being set, we didn't
break from the iterating loop if limit was reached. Also,
S3 does not enforce any limit, so keep that behavior.
Signed-off-by: Yehuda Sadeh <yehuda@inktank.com>
do {
RGWUserBuckets buckets;
- uint64_t read_count = min(limit - total_count, max_buckets);
+ uint64_t read_count;
+ if (limit > 0)
+ read_count = min(limit - total_count, max_buckets);
+ else
+ read_count = max_buckets;
+
ret = rgw_read_user_buckets(store, s->user.user_id, buckets,
marker, read_count, should_get_stats());
total_count += m.size();
- done = (m.size() < read_count || total_count == limit);
+ done = (m.size() < read_count || (limit > 0 && total_count == limit));
if (m.size()) {
send_response_data(buckets);
RGWListBuckets_ObjStore_S3() {}
~RGWListBuckets_ObjStore_S3() {}
- int get_params() { return 0; }
+ int get_params() {
+ limit = 0; /* no limit */
+ return 0;
+ }
virtual void send_response_begin(bool has_buckets);
virtual void send_response_data(RGWUserBuckets& buckets);
virtual void send_response_end();