]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
Revert "rgw: skip replication from lock enabled buckets to lock disbaled buckets"
authorShilpa Jagannath <smanjara@redhat.com>
Wed, 2 Apr 2025 18:14:16 +0000 (14:14 -0400)
committerShilpa Jagannath <smanjara@redhat.com>
Thu, 3 Apr 2025 18:43:55 +0000 (14:43 -0400)
This reverts commit 826d49368961677f05bc53fd1e4b6cc999057cdd.
Signed-off-by: Shilpa Jagannath <smanjara@redhat.com>
src/rgw/driver/rados/rgw_data_sync.cc
src/test/rgw/rgw_multi/tests.py

index 5da3f8f1183d4285d2ad1f360be2e67e67f563d7..10fe3b227b38b8242334f36c7c2ba7942bb399ce 100644 (file)
@@ -4527,12 +4527,6 @@ public:
             tn->log(0, SSTR("skipping entry due to versioning mismatch: " << key));
             goto done;
           }
-          // if object lock is enabled on either, the other should follow as well
-          if (sync_pipe.source_bucket_info.obj_lock_enabled() != sync_pipe.dest_bucket_info.obj_lock_enabled()) {
-            set_status("skipping entry due to object lock mismatch");
-            tn->log(0, SSTR("skipping entry due to object lock mismatch: " << key));
-            goto done;
-          }
 
           if (error_injection &&
               rand() % 10000 < cct->_conf->rgw_sync_data_inject_err_probability * 10000.0) {
index e08ad319feb5c2002f4e72f4f1e8e20d3bb1f4d5..0206ec378478149b013f8c96a5d67e9af5a5075f 100644 (file)
@@ -4073,131 +4073,3 @@ def test_bucket_replication_versioned_to_non_versioned():
     # check that object not exists in destination bucket
     e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
     assert e.response['Error']['Code'] == 'NoSuchKey'
-
-@allow_bucket_replication
-def test_bucket_replication_lock_enabled_to_lock_disabled():
-    zonegroup = realm.master_zonegroup()
-    zonegroup_conns = ZonegroupConns(zonegroup)
-
-    source = zonegroup_conns.non_account_rw_zones[0]
-    dest = zonegroup_conns.non_account_rw_zones[1]
-
-    source_bucket_name = gen_bucket_name()
-    source.create_bucket(source_bucket_name)
-    # enabled versioning
-    source.s3_client.put_bucket_versioning(
-        Bucket=source_bucket_name,
-        VersioningConfiguration={'Status': 'Enabled'}
-    )
-    dest_bucket = dest.create_bucket(gen_bucket_name())
-    # enabled versioning
-    dest.s3_client.put_bucket_versioning(
-        Bucket=dest_bucket.name,
-        VersioningConfiguration={'Status': 'Enabled'}
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # create replication configuration
-    source.s3_client.put_bucket_replication(
-        Bucket=source_bucket_name,
-        ReplicationConfiguration={
-            'Role': '',
-            'Rules': [{
-                'ID': 'rule1',
-                'Status': 'Enabled',
-                'Destination': {
-                    'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
-                }
-            }]
-        }
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # enable object lock on source bucket
-    source.s3_client.put_object_lock_configuration(
-        Bucket=source_bucket_name,
-        ObjectLockConfiguration={
-            'ObjectLockEnabled': 'Enabled',
-            'Rule': {
-                'DefaultRetention': {
-                    'Mode': 'GOVERNANCE',
-                    'Days': 1
-                }
-            }
-        }
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # upload an object and wait for sync.
-    objname = 'dummy'
-    k = new_key(source, source_bucket_name, objname)
-    k.set_contents_from_string('foo')
-    zone_data_checkpoint(dest.zone, source.zone)
-
-    # check that object does not exist in destination bucket
-    e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
-    assert e.response['Error']['Code'] == 'NoSuchKey'
-
-@allow_bucket_replication
-def test_bucket_replication_lock_disabled_to_lock_enabled():
-    zonegroup = realm.master_zonegroup()
-    zonegroup_conns = ZonegroupConns(zonegroup)
-
-    source = zonegroup_conns.non_account_rw_zones[0]
-    dest = zonegroup_conns.non_account_rw_zones[1]
-
-    source_bucket = source.create_bucket(gen_bucket_name())
-    # enabled versioning
-    source.s3_client.put_bucket_versioning(
-        Bucket=source_bucket.name,
-        VersioningConfiguration={'Status': 'Enabled'}
-    )
-    dest_bucket_name = gen_bucket_name()
-    dest.create_bucket(dest_bucket_name)
-    # enabled versioning
-    dest.s3_client.put_bucket_versioning(
-        Bucket=dest_bucket_name,
-        VersioningConfiguration={'Status': 'Enabled'}
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # create replication configuration
-    source.s3_client.put_bucket_replication(
-        Bucket=source_bucket.name,
-        ReplicationConfiguration={
-            'Role': '',
-            'Rules': [{
-                'ID': 'rule1',
-                'Status': 'Enabled',
-                'Destination': {
-                    'Bucket': f'arn:aws:s3:::{dest_bucket_name}',
-                }
-            }]
-        }
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # enable object lock on destination bucket
-    dest.s3_client.put_object_lock_configuration(
-        Bucket=dest_bucket_name,
-        ObjectLockConfiguration={
-            'ObjectLockEnabled': 'Enabled',
-            'Rule': {
-                'DefaultRetention': {
-                    'Mode': 'GOVERNANCE',
-                    'Days': 1
-                }
-            }
-        }
-    )
-    zonegroup_meta_checkpoint(zonegroup)
-
-    # upload an object and wait for sync.
-    objname = 'dummy'
-    k = new_key(source, source_bucket.name, objname)
-    k.set_contents_from_string('foo')
-    zone_data_checkpoint(dest.zone, source.zone)
-
-    # check that object does not exist in destination bucket
-    e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket_name, Key=objname)
-    assert e.response['Error']['Code'] == 'NoSuchKey'