From: Seena Fallah Date: Mon, 3 Mar 2025 16:18:22 +0000 (+0100) Subject: rgw: skip replication from lock enabled buckets to lock disbaled buckets X-Git-Tag: testing/wip-pdonnell-testing-20250324.181635-debug~28^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=826d49368961677f05bc53fd1e4b6cc999057cdd;p=ceph-ci.git rgw: skip replication from lock enabled buckets to lock disbaled buckets replicating lock enabled objects require the destination bucket to have object lock enabled. Fixes: https://tracker.ceph.com/issues/70486 Signed-off-by: Seena Fallah --- diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc index 10fe3b227b3..5da3f8f1183 100644 --- a/src/rgw/driver/rados/rgw_data_sync.cc +++ b/src/rgw/driver/rados/rgw_data_sync.cc @@ -4527,6 +4527,12 @@ public: tn->log(0, SSTR("skipping entry due to versioning mismatch: " << key)); goto done; } + // if object lock is enabled on either, the other should follow as well + if (sync_pipe.source_bucket_info.obj_lock_enabled() != sync_pipe.dest_bucket_info.obj_lock_enabled()) { + set_status("skipping entry due to object lock mismatch"); + tn->log(0, SSTR("skipping entry due to object lock mismatch: " << key)); + goto done; + } if (error_injection && rand() % 10000 < cct->_conf->rgw_sync_data_inject_err_probability * 10000.0) { diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py index a08f36ca4c6..0b6db255a38 100644 --- a/src/test/rgw/rgw_multi/tests.py +++ b/src/test/rgw/rgw_multi/tests.py @@ -4042,3 +4042,131 @@ def test_bucket_replication_versioned_to_non_versioned(): # check that object not exists in destination bucket e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname) assert e.response['Error']['Code'] == 'NoSuchKey' + +@allow_bucket_replication +def test_bucket_replication_lock_enabled_to_lock_disabled(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + source = zonegroup_conns.non_account_rw_zones[0] + dest = zonegroup_conns.non_account_rw_zones[1] + + source_bucket_name = gen_bucket_name() + source.create_bucket(source_bucket_name) + # enabled versioning + source.s3_client.put_bucket_versioning( + Bucket=source_bucket_name, + VersioningConfiguration={'Status': 'Enabled'} + ) + dest_bucket = dest.create_bucket(gen_bucket_name()) + # enabled versioning + dest.s3_client.put_bucket_versioning( + Bucket=dest_bucket.name, + VersioningConfiguration={'Status': 'Enabled'} + ) + zonegroup_meta_checkpoint(zonegroup) + + # create replication configuration + source.s3_client.put_bucket_replication( + Bucket=source_bucket_name, + ReplicationConfiguration={ + 'Role': '', + 'Rules': [{ + 'ID': 'rule1', + 'Status': 'Enabled', + 'Destination': { + 'Bucket': f'arn:aws:s3:::{dest_bucket.name}', + } + }] + } + ) + zonegroup_meta_checkpoint(zonegroup) + + # enable object lock on source bucket + source.s3_client.put_object_lock_configuration( + Bucket=source_bucket_name, + ObjectLockConfiguration={ + 'ObjectLockEnabled': 'Enabled', + 'Rule': { + 'DefaultRetention': { + 'Mode': 'GOVERNANCE', + 'Days': 1 + } + } + } + ) + zonegroup_meta_checkpoint(zonegroup) + + # upload an object and wait for sync. + objname = 'dummy' + k = new_key(source, source_bucket_name, objname) + k.set_contents_from_string('foo') + zone_data_checkpoint(dest.zone, source.zone) + + # check that object does not exist in destination bucket + e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname) + assert e.response['Error']['Code'] == 'NoSuchKey' + +@allow_bucket_replication +def test_bucket_replication_lock_disabled_to_lock_enabled(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + source = zonegroup_conns.non_account_rw_zones[0] + dest = zonegroup_conns.non_account_rw_zones[1] + + source_bucket = source.create_bucket(gen_bucket_name()) + # enabled versioning + source.s3_client.put_bucket_versioning( + Bucket=source_bucket.name, + VersioningConfiguration={'Status': 'Enabled'} + ) + dest_bucket_name = gen_bucket_name() + dest.create_bucket(dest_bucket_name) + # enabled versioning + dest.s3_client.put_bucket_versioning( + Bucket=dest_bucket_name, + VersioningConfiguration={'Status': 'Enabled'} + ) + zonegroup_meta_checkpoint(zonegroup) + + # create replication configuration + source.s3_client.put_bucket_replication( + Bucket=source_bucket.name, + ReplicationConfiguration={ + 'Role': '', + 'Rules': [{ + 'ID': 'rule1', + 'Status': 'Enabled', + 'Destination': { + 'Bucket': f'arn:aws:s3:::{dest_bucket_name}', + } + }] + } + ) + zonegroup_meta_checkpoint(zonegroup) + + # enable object lock on destination bucket + dest.s3_client.put_object_lock_configuration( + Bucket=dest_bucket_name, + ObjectLockConfiguration={ + 'ObjectLockEnabled': 'Enabled', + 'Rule': { + 'DefaultRetention': { + 'Mode': 'GOVERNANCE', + 'Days': 1 + } + } + } + ) + zonegroup_meta_checkpoint(zonegroup) + + # upload an object and wait for sync. + objname = 'dummy' + k = new_key(source, source_bucket.name, objname) + k.set_contents_from_string('foo') + zone_data_checkpoint(dest.zone, source.zone) + + # check that object does not exist in destination bucket + e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket_name, Key=objname) + assert e.response['Error']['Code'] == 'NoSuchKey'