From 9e7385e0762c297dcdf66cb3e06bf99eaa91f4ca Mon Sep 17 00:00:00 2001 From: Shilpa Jagannath Date: Wed, 8 Jan 2025 12:08:02 -0500 Subject: [PATCH] rgw/multisite: check for both zonegroup and bucket level sync policies when returning enotempty during bucket deletion Signed-off-by: Shilpa Jagannath --- src/rgw/driver/rados/rgw_rados.cc | 15 +- src/rgw/rgw_sync_policy.h | 10 ++ src/test/rgw/rgw_multi/tests.py | 245 +++++++++++++++++++++++++++++- 3 files changed, 262 insertions(+), 8 deletions(-) diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc index d7fd7ae29c4..0bef9ac672d 100644 --- a/src/rgw/driver/rados/rgw_rados.cc +++ b/src/rgw/driver/rados/rgw_rados.cc @@ -5682,11 +5682,18 @@ int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, std::mapis_syncing_bucket_meta(bucket)) { - auto bs_policy = bucket_info.sync_policy; - if (bs_policy) { - ldpp_dout(dpp, 10) << "bucket policy exists. listing remote zones" << dendl; - const rgw_zone_id source_zone = svc.zone->get_zone_params().get_id(); + // check if asymmetric replication policy exists either at zonegroup or bucket level + auto zg_sync_policy = svc.zone->get_zonegroup().sync_policy; + bool is_zg_policy_directional = zg_sync_policy.is_directional(); + bool is_bucket_policy_directional = false; + auto bucket_sync_policy = bucket_info.sync_policy; + if (bucket_sync_policy) { + is_bucket_policy_directional = bucket_sync_policy->is_directional(); + } + if (is_zg_policy_directional || is_bucket_policy_directional) { + ldpp_dout(dpp, 10) << "sync policy exists. listing remote zones" << dendl; + const rgw_zone_id source_zone = svc.zone->get_zone_params().get_id(); r = list_remote_buckets(dpp, driver, source_zone, bucket, y); if (r == -ENOTEMPTY) { ldpp_dout(dpp, 0) << "ERROR: cannot delete bucket. objects exist in the bucket on another zone " << dendl; diff --git a/src/rgw/rgw_sync_policy.h b/src/rgw/rgw_sync_policy.h index 062fb115324..fe720cb6195 100644 --- a/src/rgw/rgw_sync_policy.h +++ b/src/rgw/rgw_sync_policy.h @@ -675,6 +675,16 @@ struct rgw_sync_policy_info { return groups.empty(); } + bool is_directional() const { + for (auto& item : groups) { + auto& group = item.second; + if (!group.data_flow.directional.empty()) { + return true; + } + } + return false; + } + void get_potential_related_buckets(const rgw_bucket& bucket, std::set *sources, std::set *dests) const; diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py index 34e1d57438f..b914102d137 100644 --- a/src/test/rgw/rgw_multi/tests.py +++ b/src/test/rgw/rgw_multi/tests.py @@ -2615,7 +2615,7 @@ def check_objects_not_exist(bucket, obj_arr): check_object_not_exists(bucket, objname) @attr('sync_policy') -def test_bucket_delete_with_sync_policy(): +def test_bucket_delete_with_bucket_sync_policy_directional(): zonegroup = realm.master_zonegroup() zonegroup_conns = ZonegroupConns(zonegroup) @@ -2651,6 +2651,7 @@ def test_bucket_delete_with_sync_policy(): buckets.append(bucketA) create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) create_sync_group_flow_directional(c1, "sync-bucket", "sync-flow-bucket", zoneA.name, zoneB.name, bucketA.name) + #create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow-bucket", zones, bucketA.name) create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zoneA.name, zoneB.name, bucketA.name) set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) @@ -2679,20 +2680,256 @@ def test_bucket_delete_with_sync_policy(): bucket = get_bucket(zcA, bucketA.name) check_objects_not_exist(bucket, objnameB) - log.debug('deleting object') + log.debug('deleting object on zone A') k = get_key(zcA, bucket, objnameA) k.delete() - + zone_bucket_checkpoint(zoneA, zoneB, bucketA.name) - + # delete bucket on zoneA. it should fail to delete log.debug('deleting bucket') assert_raises(boto.exception.S3ResponseError, zcA.delete_bucket, bucketA.name) assert check_all_buckets_exist(zcA, buckets) assert check_all_buckets_exist(zcB, buckets) + + log.debug('deleting object on zone B') + k = get_key(zcB, bucket, objnameB) + k.delete() + time.sleep(config.checkpoint_delay) + + # retry deleting bucket after removing the object from zone B. should succeed + log.debug('retry deleting bucket') + zcA.delete_bucket(bucketA.name) + + zonegroup_meta_checkpoint(zonegroup) + + assert check_all_buckets_dont_exist(zcA, buckets) + assert check_all_buckets_dont_exist(zcB, buckets) + + return + +@attr('sync_policy') +def test_bucket_delete_with_bucket_sync_policy_symmetric(): + + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zonegroup_meta_checkpoint(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + + c1 = zoneA.cluster + + # configure sync policy + zones = zoneA.name + ',' + zoneB.name + c1.admin(['sync', 'policy', 'get']) + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + """ + configure symmetrical policy at bucketA level with src and dest + zones specified to zoneA and zoneB resp. + """ + + # configure sync policy for only bucketA and enable it + bucketA = create_zone_bucket(zcA) + buckets = [] + buckets.append(bucketA) + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow-bucket", zones, bucketA.name) + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucketA.name) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1, bucketA.name) + + zonegroup_meta_checkpoint(zonegroup) + + # create bucketA and objects in zoneA and zoneB + objnameA = 'a' + objnameB = 'b' + + # upload object in each zone and wait for sync. + k = new_key(zcA, bucketA, objnameA) + k.set_contents_from_string('foo') + k = new_key(zcB, bucketA, objnameB) + k.set_contents_from_string('foo') + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + log.debug('deleting object A') + k = get_key(zcA, bucketA, objnameA) + k.delete() + + log.debug('deleting object B') + k = get_key(zcA, bucketA, objnameB) + k.delete() + + zone_bucket_checkpoint(zoneA, zoneB, bucketA.name) + zone_data_checkpoint(zoneB, zoneA) + + # delete bucket on zoneA. + log.debug('deleting bucket') + zcA.delete_bucket(bucketA.name) + zonegroup_meta_checkpoint(zonegroup) + + assert check_all_buckets_dont_exist(zcA, buckets) + assert check_all_buckets_dont_exist(zcB, buckets) + return + +@attr('sync_policy') +def test_bucket_delete_with_zonegroup_sync_policy_symmetric(): + + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zonegroup_meta_checkpoint(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + + c1 = zoneA.cluster + + # configure symmetric sync policy + zones = zoneA.name + ',' + zoneB.name + c1.admin(['sync', 'policy', 'get']) + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + set_sync_policy_group_status(c1, "sync-group", "enabled") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # configure sync policy for only bucketA and enable it + bucketA = create_zone_bucket(zcA) + buckets = [] + buckets.append(bucketA) + + time.sleep(config.checkpoint_delay) + zonegroup_meta_checkpoint(zonegroup) + + # create bucketA and objects in zoneA and zoneB + objnameA = 'a' + objnameB = 'b' + + # upload object in each zone and wait for sync. + k = new_key(zcA, bucketA, objnameA) + k.set_contents_from_string('foo') + k = new_key(zcB, bucketA, objnameB) + k.set_contents_from_string('foo') + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + log.debug('deleting object A') + k = get_key(zcA, bucketA, objnameA) + k.delete() + + log.debug('deleting object B') + k = get_key(zcA, bucketA, objnameB) + k.delete() + + zone_bucket_checkpoint(zoneA, zoneB, bucketA.name) + zone_data_checkpoint(zoneB, zoneA) + + # delete bucket on zoneA. + log.debug('deleting bucket') + zcA.delete_bucket(bucketA.name) + zonegroup_meta_checkpoint(zonegroup) + + assert check_all_buckets_dont_exist(zcA, buckets) + assert check_all_buckets_dont_exist(zcB, buckets) return +@attr('sync_policy') +def test_bucket_delete_with_zonegroup_sync_policy_directional(): + + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zonegroup_meta_checkpoint(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + + c1 = zoneA.cluster + + # configure sync policy + zones = zoneA.name + ',' + zoneB.name + c1.admin(['sync', 'policy', 'get']) + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_directional(c1, "sync-group", "sync-flow1", zoneA.name, zoneB.name) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name) + set_sync_policy_group_status(c1, "sync-group", "enabled") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # configure sync policy for only bucketA and enable it + bucketA = create_zone_bucket(zcA) + buckets = [] + buckets.append(bucketA) + + time.sleep(config.checkpoint_delay) + zonegroup_meta_checkpoint(zonegroup) + + # create bucketA and objects in zoneA and zoneB + objnameA = 'a' + objnameB = 'b' + + # upload object in each zone and wait for sync. + k = new_key(zcA, bucketA, objnameA) + k.set_contents_from_string('foo') + k = new_key(zcB, bucketA, objnameB) + k.set_contents_from_string('foo') + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that objnameA is synced to bucketA in zoneB + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnameA) + + # verify that objnameB is not synced to bucketA in zoneA + bucket = get_bucket(zcA, bucketA.name) + check_objects_not_exist(bucket, objnameB) + + log.debug('deleting object on zone A') + k = get_key(zcA, bucket, objnameA) + k.delete() + + zone_bucket_checkpoint(zoneA, zoneB, bucketA.name) + + # delete bucket on zoneA. it should fail to delete + log.debug('deleting bucket') + assert_raises(boto.exception.S3ResponseError, zcA.delete_bucket, bucketA.name) + + assert check_all_buckets_exist(zcA, buckets) + assert check_all_buckets_exist(zcB, buckets) + + # retry deleting bucket after removing the object from zone B. should succeed + log.debug('deleting object on zone B') + k = get_key(zcB, bucket, objnameB) + k.delete() + time.sleep(config.checkpoint_delay) + + log.debug('retry deleting bucket') + zcA.delete_bucket(bucketA.name) + + zonegroup_meta_checkpoint(zonegroup) + + assert check_all_buckets_dont_exist(zcA, buckets) + assert check_all_buckets_dont_exist(zcB, buckets) + + return @attr('fails_with_rgw') @attr('sync_policy') -- 2.39.5