]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
rgw: skip replication from lock enabled buckets to lock disbaled buckets
authorSeena Fallah <seenafallah@gmail.com>
Mon, 3 Mar 2025 16:18:22 +0000 (17:18 +0100)
committerSeena Fallah <seenafallah@gmail.com>
Sat, 15 Mar 2025 11:21:33 +0000 (12:21 +0100)
replicating lock enabled objects require the destination bucket to
have object lock enabled.

Fixes: https://tracker.ceph.com/issues/70486
Signed-off-by: Seena Fallah <seenafallah@gmail.com>
src/rgw/driver/rados/rgw_data_sync.cc
src/test/rgw/rgw_multi/tests.py

index 10fe3b227b38b8242334f36c7c2ba7942bb399ce..5da3f8f1183d4285d2ad1f360be2e67e67f563d7 100644 (file)
@@ -4527,6 +4527,12 @@ public:
             tn->log(0, SSTR("skipping entry due to versioning mismatch: " << key));
             goto done;
           }
+          // if object lock is enabled on either, the other should follow as well
+          if (sync_pipe.source_bucket_info.obj_lock_enabled() != sync_pipe.dest_bucket_info.obj_lock_enabled()) {
+            set_status("skipping entry due to object lock mismatch");
+            tn->log(0, SSTR("skipping entry due to object lock mismatch: " << key));
+            goto done;
+          }
 
           if (error_injection &&
               rand() % 10000 < cct->_conf->rgw_sync_data_inject_err_probability * 10000.0) {
index a08f36ca4c6c1f9aca6ff7d88756707f341cc65b..0b6db255a387a4379765a144663e58ad2a9bd4f0 100644 (file)
@@ -4042,3 +4042,131 @@ def test_bucket_replication_versioned_to_non_versioned():
     # check that object not exists in destination bucket
     e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
     assert e.response['Error']['Code'] == 'NoSuchKey'
+
+@allow_bucket_replication
+def test_bucket_replication_lock_enabled_to_lock_disabled():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    source = zonegroup_conns.non_account_rw_zones[0]
+    dest = zonegroup_conns.non_account_rw_zones[1]
+
+    source_bucket_name = gen_bucket_name()
+    source.create_bucket(source_bucket_name)
+    # enabled versioning
+    source.s3_client.put_bucket_versioning(
+        Bucket=source_bucket_name,
+        VersioningConfiguration={'Status': 'Enabled'}
+    )
+    dest_bucket = dest.create_bucket(gen_bucket_name())
+    # enabled versioning
+    dest.s3_client.put_bucket_versioning(
+        Bucket=dest_bucket.name,
+        VersioningConfiguration={'Status': 'Enabled'}
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # create replication configuration
+    source.s3_client.put_bucket_replication(
+        Bucket=source_bucket_name,
+        ReplicationConfiguration={
+            'Role': '',
+            'Rules': [{
+                'ID': 'rule1',
+                'Status': 'Enabled',
+                'Destination': {
+                    'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
+                }
+            }]
+        }
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # enable object lock on source bucket
+    source.s3_client.put_object_lock_configuration(
+        Bucket=source_bucket_name,
+        ObjectLockConfiguration={
+            'ObjectLockEnabled': 'Enabled',
+            'Rule': {
+                'DefaultRetention': {
+                    'Mode': 'GOVERNANCE',
+                    'Days': 1
+                }
+            }
+        }
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # upload an object and wait for sync.
+    objname = 'dummy'
+    k = new_key(source, source_bucket_name, objname)
+    k.set_contents_from_string('foo')
+    zone_data_checkpoint(dest.zone, source.zone)
+
+    # check that object does not exist in destination bucket
+    e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket.name, Key=objname)
+    assert e.response['Error']['Code'] == 'NoSuchKey'
+
+@allow_bucket_replication
+def test_bucket_replication_lock_disabled_to_lock_enabled():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    source = zonegroup_conns.non_account_rw_zones[0]
+    dest = zonegroup_conns.non_account_rw_zones[1]
+
+    source_bucket = source.create_bucket(gen_bucket_name())
+    # enabled versioning
+    source.s3_client.put_bucket_versioning(
+        Bucket=source_bucket.name,
+        VersioningConfiguration={'Status': 'Enabled'}
+    )
+    dest_bucket_name = gen_bucket_name()
+    dest.create_bucket(dest_bucket_name)
+    # enabled versioning
+    dest.s3_client.put_bucket_versioning(
+        Bucket=dest_bucket_name,
+        VersioningConfiguration={'Status': 'Enabled'}
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # create replication configuration
+    source.s3_client.put_bucket_replication(
+        Bucket=source_bucket.name,
+        ReplicationConfiguration={
+            'Role': '',
+            'Rules': [{
+                'ID': 'rule1',
+                'Status': 'Enabled',
+                'Destination': {
+                    'Bucket': f'arn:aws:s3:::{dest_bucket_name}',
+                }
+            }]
+        }
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # enable object lock on destination bucket
+    dest.s3_client.put_object_lock_configuration(
+        Bucket=dest_bucket_name,
+        ObjectLockConfiguration={
+            'ObjectLockEnabled': 'Enabled',
+            'Rule': {
+                'DefaultRetention': {
+                    'Mode': 'GOVERNANCE',
+                    'Days': 1
+                }
+            }
+        }
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # upload an object and wait for sync.
+    objname = 'dummy'
+    k = new_key(source, source_bucket.name, objname)
+    k.set_contents_from_string('foo')
+    zone_data_checkpoint(dest.zone, source.zone)
+
+    # check that object does not exist in destination bucket
+    e = assert_raises(ClientError, dest.s3_client.get_object, Bucket=dest_bucket_name, Key=objname)
+    assert e.response['Error']['Code'] == 'NoSuchKey'