]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
rgw: reject PutBucketReplication on mismatched versioning and lock
authorSeena Fallah <seenafallah@gmail.com>
Thu, 6 Mar 2025 18:36:50 +0000 (19:36 +0100)
committerSeena Fallah <seenafallah@gmail.com>
Sat, 15 Mar 2025 11:21:29 +0000 (12:21 +0100)
Reject PutBucketReplication calls if versioning is not identical
between the source and destination buckets. This check also applies
to object lock configurations to ensure consistency.

Fixes: https://tracker.ceph.com/issues/70486
Signed-off-by: Seena Fallah <seenafallah@gmail.com>
src/rgw/rgw_rest_s3.cc
src/test/rgw/rgw_multi/tests.py

index 97b0be351524d5b31a55bfc7708a9e97cfef2958..9a790f011c6103759794a0e6b1c2a7a572700c9e 100644 (file)
@@ -1339,6 +1339,28 @@ struct ReplicationConfiguration {
       }
       pipe->dest.bucket.emplace(dest_bk);
 
+      std::unique_ptr<rgw::sal::Bucket> dest_bucket;
+      if (int r = driver->load_bucket(s, *pipe->dest.bucket, &dest_bucket, s->yield); r < 0) {
+        if (r == -ENOENT) {
+          s->err.message = "Destination bucket must exist.";
+          return -EINVAL;
+        }
+
+        ldpp_dout(s, 0) << "ERROR: failed to load bucket info for bucket=" << *pipe->dest.bucket << " r=" << r << dendl;
+        return r;
+      }
+
+      // check versioning identicality
+      if (dest_bucket->get_info().versioned() != s->bucket->get_info().versioned()) {
+        s->err.message = "Versioning must be identical in source and destination buckets.";
+        return -EINVAL;
+      }
+      // check object lock identicality
+      if (dest_bucket->get_info().obj_lock_enabled() != s->bucket->get_info().obj_lock_enabled()) {
+        s->err.message = "Object lock must be identical in source and destination buckets.";
+        return -EINVAL;
+      }
+
       if (filter) {
         int r = filter->to_sync_pipe_filter(s->cct, &pipe->params.source.filter);
         if (r < 0) {
index 7c388393250e0e0d0dbfaf828f85729b2ad94ad3..c01937932b72c572edbe550816e5b9826dc785a4 100644 (file)
@@ -3885,3 +3885,64 @@ def test_bucket_replication_alt_user():
     # check that object exists in destination bucket
     k = get_key(dest, dest_bucket, objname)
     assert_equal(k.get_contents_as_string().decode('utf-8'), 'foo')
+
+@allow_bucket_replication
+def test_bucket_replication_reject_versioning_identical():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    source = zonegroup_conns.non_account_rw_zones[0]
+    dest = zonegroup_conns.non_account_rw_zones[1]
+
+    source_bucket = source.create_bucket(gen_bucket_name())
+    dest_bucket = dest.create_bucket(gen_bucket_name())
+    source.s3_client.put_bucket_versioning(
+        Bucket=source_bucket.name,
+        VersioningConfiguration={'Status': 'Enabled'}
+    )
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # create replication configuration
+    e = assert_raises(ClientError,
+                      source.s3_client.put_bucket_replication,
+                      Bucket=source_bucket.name,
+                      ReplicationConfiguration={
+                          'Role': '',
+                          'Rules': [{
+                              'ID': 'rule1',
+                              'Status': 'Enabled',
+                              'Destination': {
+                                  'Bucket': f'arn:aws:s3:::{dest_bucket.name}',
+                              }
+                          }]
+                      })
+    assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400
+
+@allow_bucket_replication
+def test_bucket_replicaion_reject_objectlock_identical():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    source = zonegroup_conns.non_account_rw_zones[0]
+    dest = zonegroup_conns.non_account_rw_zones[1]
+
+    source_bucket = source.create_bucket(gen_bucket_name())
+    dest_bucket_name = gen_bucket_name()
+    dest.s3_client.create_bucket(Bucket=dest_bucket_name, ObjectLockEnabledForBucket=True)
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # create replication configuration
+    e = assert_raises(ClientError,
+                      source.s3_client.put_bucket_replication,
+                      Bucket=source_bucket.name,
+                      ReplicationConfiguration={
+                          'Role': '',
+                          'Rules': [{
+                              'ID': 'rule1',
+                              'Status': 'Enabled',
+                              'Destination': {
+                                  'Bucket': f'arn:aws:s3:::{dest_bucket_name}',
+                              }
+                          }]
+                      })
+    assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400