params.list_versions = true;
params.allow_unordered = true;
+ const bool own_bucket = store->get_zone()->get_zonegroup().get_id() == info.zonegroup;
+
ListResults results;
+ results.is_truncated = own_bucket; // if we don't have the index, we're done
// 1. Check if Bucket has objects.
// If bucket contains objects and delete_children is true, delete all objects.
// Else throw error that bucket is not empty.
- do {
+ while (results.is_truncated) {
results.objs.clear();
// Check if bucket has objects.
return ret;
}
}
- } while(results.is_truncated);
+ }
// 2. Abort Mp uploads on the bucket.
- ret = abort_multiparts(dpp, store->ctx());
- if (ret < 0) {
- return ret;
+ if (own_bucket) {
+ ret = abort_multiparts(dpp, store->ctx());
+ if (ret < 0) {
+ return ret;
+ }
}
// 3. Remove mp index??
}
// 4. Sync user stats.
- ret = this->sync_owner_stats(dpp, y);
- if (ret < 0) {
- ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
+ if (own_bucket) {
+ ret = this->sync_owner_stats(dpp, y);
+ if (ret < 0) {
+ ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
+ }
}
// 5. Remove the bucket from user info index. (unlink user)
int RGWRados::check_bucket_empty(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, optional_yield y)
{
+ const auto& zonegroup = svc.site->get_zonegroup();
+ if (bucket_info.zonegroup != zonegroup.id) {
+ // don't check on this zone, the bucket's data belongs to another zonegroup
+ return 0;
+ }
+
constexpr uint NUM_ENTRIES = 1000u;
rgw_obj_index_key marker;
params.list_versions = true;
params.allow_unordered = true;
+ const bool own_bucket = store->get_zone()->get_zonegroup().get_id() == info.zonegroup;
+
ListResults results;
+ results.is_truncated = own_bucket; // if we don't have the index, we're done
- do {
+ while (results.is_truncated) {
results.objs.clear();
ret = list(dpp, params, 1000, results, y);
return ret;
}
}
- } while(results.is_truncated);
+ }
- ret = abort_multiparts(dpp, store->ctx(), y);
- if (ret < 0) {
- return ret;
+ if (own_bucket) {
+ ret = abort_multiparts(dpp, store->ctx(), y);
+ if (ret < 0) {
+ return ret;
+ }
}
// remove lifecycle config, if any (XXX note could be made generic)
}
librados::Rados& rados = *store->getRados()->get_rados_handle();
- ret = store->ctl()->bucket->sync_owner_stats(dpp, rados, info.owner, info, y, nullptr);
- if (ret < 0) {
- ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
+ if (own_bucket) {
+ ret = store->ctl()->bucket->sync_owner_stats(dpp, rados, info.owner, info, y, nullptr);
+ if (ret < 0) {
+ ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
+ }
}
RGWObjVersionTracker ot;
}
}
- op_ret = s->bucket->sync_owner_stats(this, y, nullptr);
- if ( op_ret < 0) {
- ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
- }
+ const bool own_bucket = s->penv.site->get_zonegroup().get_id() == s->bucket->get_info().zonegroup;
- op_ret = s->bucket->check_empty(this, y);
- if (op_ret < 0) {
- return;
+ if (own_bucket) {
+ // only if we own the bucket
+ op_ret = s->bucket->sync_owner_stats(this, y, nullptr);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
+ }
+
+ op_ret = s->bucket->check_empty(this, y);
+ if (op_ret < 0) {
+ return;
+ }
}
op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
return;
}
- op_ret = rgw_remove_sse_s3_bucket_key(s, y);
- if (op_ret != 0) {
- // do nothing; it will already have been logged
+ if (own_bucket) {
+ op_ret = rgw_remove_sse_s3_bucket_key(s, y);
+ if (op_ret != 0) {
+ // do nothing; it will already have been logged
+ }
}
op_ret = s->bucket->remove(this, false, y);
assert check_all_buckets_exist(zone, buckets)
def test_bucket_remove():
- zonegroup = realm.master_zonegroup()
- zonegroup_conns = ZonegroupConns(zonegroup)
- buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns)
- zonegroup_meta_checkpoint(zonegroup)
+ for zonegroup in realm.current_period.zonegroups:
+ zonegroup_conns = ZonegroupConns(zonegroup)
+ buckets, zone_buckets = create_bucket_per_zone(zonegroup_conns)
+ zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup_conns.zones:
- assert check_all_buckets_exist(zone, buckets)
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_exist(zone, buckets)
- for zone, bucket_name in zone_bucket:
- zone.conn.delete_bucket(bucket_name)
+ for zone, bucket_name in zone_buckets:
+ zone.conn.delete_bucket(bucket_name)
- zonegroup_meta_checkpoint(zonegroup)
+ zonegroup_meta_checkpoint(zonegroup)
- for zone in zonegroup_conns.zones:
- assert check_all_buckets_dont_exist(zone, buckets)
+ for zone in zonegroup_conns.zones:
+ assert check_all_buckets_dont_exist(zone, buckets)
def get_bucket(zone, bucket_name):
return zone.conn.get_bucket(bucket_name)