]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
test/rgw/notifications: fixes needed to run the tests in a multisite environment
authorYuval Lifshitz <ylifshit@ibm.com>
Wed, 18 Feb 2026 13:50:52 +0000 (13:50 +0000)
committerYuval Lifshitz <ylifshit@ibm.com>
Mon, 23 Feb 2026 17:48:53 +0000 (17:48 +0000)
the main issue was that a system user would get a JSON reply when
creating a bucket. the boto3 client is failing when this is happening.
so, the solution is to use a non-system user in the tests

Signed-off-by: Yuval Lifshitz <ylifshit@ibm.com>
src/test/rgw/bucket_notification/README.rst
src/test/rgw/bucket_notification/api.py
src/test/rgw/bucket_notification/bntests.conf.multisite [new file with mode: 0644]
src/test/rgw/bucket_notification/test_bn.py
src/test/rgw/test-rgw-multisite.sh

index 9e553cea432da9986a37bd3ed38b00129e7c34f1..02d42a2abd6fa361d396d7582ddb25d3492641e1 100644 (file)
@@ -22,8 +22,8 @@ we would need the following configuration file::
                                version = v1
 
                                [s3 main]
-                               access_key = 1234567890
-                               secret_key = pencil
+                               access_key = 0987654321
+                               secret_key = crayon
 
 Add boto3 extension to the standard client: https://github.com/ceph/ceph/tree/main/examples/rgw/boto3#introduction.
 
index ce9495d8b091f17e09492ce242c96c4f992149c5..e4f9851ad72317d28b7db4c11a37875b24f94174 100644 (file)
@@ -10,6 +10,7 @@ from urllib import parse as urlparse
 from time import gmtime, strftime
 import boto3
 from botocore.client import Config
+from botocore.exceptions import ClientError
 import os
 import subprocess
 import json
@@ -210,19 +211,24 @@ class S3Connection:
         self.endpoint_url = f'{protocol}://{host}:{port}'
 
         # Create boto3 client and resource
+        # Use path-style addressing to match the old boto OrdinaryCallingFormat
         self._s3_client = boto3.client(
             's3',
             endpoint_url=self.endpoint_url,
             aws_access_key_id=aws_access_key_id,
             aws_secret_access_key=aws_secret_access_key,
-            config=boto3.session.Config(retries={'max_attempts': self.num_retries})
+            config=Config(
+                retries={'max_attempts': self.num_retries},
+                s3={'addressing_style': 'path'}
+            )
         )
 
         self._s3_resource = boto3.resource(
             's3',
             endpoint_url=self.endpoint_url,
             aws_access_key_id=aws_access_key_id,
-            aws_secret_access_key=aws_secret_access_key
+            aws_secret_access_key=aws_secret_access_key,
+            config=Config(s3={'addressing_style': 'path'})
         )
 
         # For SSL connections
@@ -230,9 +236,8 @@ class S3Connection:
 
     def create_bucket(self, bucket_name, **kwargs):
         """Create a bucket"""
-        acl = kwargs.get('ACL', 'private')
         try:
-            self._s3_client.create_bucket(Bucket=bucket_name, ACL=acl)
+            self._s3_client.create_bucket(Bucket=bucket_name)
         except ClientError as e:
             # Bucket might already exist
             if e.response['Error']['Code'] != 'BucketAlreadyOwnedByYou':
@@ -255,7 +260,8 @@ def put_object_tagging(conn, bucket_name, key, tags):
     client = boto3.client('s3',
             endpoint_url='http://'+conn.host+':'+str(conn.port),
             aws_access_key_id=conn.aws_access_key_id,
-            aws_secret_access_key=conn.aws_secret_access_key)
+            aws_secret_access_key=conn.aws_secret_access_key,
+            config=Config(s3={'addressing_style': 'path'}))
     return client.put_object(Body='aaaaaaaaaaa', Bucket=bucket_name, Key=key, Tagging=tags)
 
 def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None):
@@ -295,7 +301,8 @@ def delete_all_objects(conn, bucket_name):
     client = boto3.client('s3',
                       endpoint_url='http://'+conn.host+':'+str(conn.port),
                       aws_access_key_id=conn.aws_access_key_id,
-                      aws_secret_access_key=conn.aws_secret_access_key)
+                      aws_secret_access_key=conn.aws_secret_access_key,
+                      config=Config(s3={'addressing_style': 'path'}))
 
     objects = []
     for key in client.list_objects(Bucket=bucket_name)['Contents']:
@@ -426,7 +433,8 @@ class PSNotificationS3:
         self.client = boto3.client('s3',
                                    endpoint_url='http://'+conn.host+':'+str(conn.port),
                                    aws_access_key_id=conn.aws_access_key_id,
-                                   aws_secret_access_key=conn.aws_secret_access_key)
+                                   aws_secret_access_key=conn.aws_secret_access_key,
+                                   config=Config(s3={'addressing_style': 'path'}))
 
     def send_request(self, method, parameters=None):
         """send request to radosgw"""
diff --git a/src/test/rgw/bucket_notification/bntests.conf.multisite b/src/test/rgw/bucket_notification/bntests.conf.multisite
new file mode 100644 (file)
index 0000000..f09e538
--- /dev/null
@@ -0,0 +1,13 @@
+[DEFAULT]
+port = 8101
+host = localhost
+zonegroup = zg1
+cluster = c1
+version = v2
+
+[s3 main]
+access_key = 0987654321
+secret_key = crayon
+display_name = RegularUser
+user_id = regular.user
+email =
index 9168841e2883e1c2b2b1e0a9091740290158bd1c..81879ef76f02b69ad8906efc0404db3d587ea92b 100644 (file)
@@ -11,6 +11,7 @@ import os
 import io
 import string
 import sys
+from botocore.client import Config
 from botocore.exceptions import ClientError
 from http.server import ThreadingHTTPServer, BaseHTTPRequestHandler
 from random import randint
@@ -1960,7 +1961,8 @@ def lifecycle(endpoint_type, conn, number_of_objects, topic_events, create_threa
     client = boto3.client('s3',
                           endpoint_url='http://'+conn.host+':'+str(conn.port),
                           aws_access_key_id=conn.aws_access_key_id,
-                          aws_secret_access_key=conn.aws_secret_access_key)
+                          aws_secret_access_key=conn.aws_secret_access_key,
+                          config=Config(s3={'addressing_style': 'path'}))
     yesterday = datetime.date.today() - datetime.timedelta(days=1)
     response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name,
                                                          LifecycleConfiguration={'Rules': rules_creator(yesterday, obj_prefix)}
@@ -2330,6 +2332,7 @@ def test_post_object_upload_amqp():
                          aws_access_key_id=get_access_key(),
                          aws_secret_access_key=get_secret_key(),
                          endpoint_url=endpoint,
+                         config=Config(s3={'addressing_style': 'path'})
                         )
 
     bucket_name = gen_bucket_name()
@@ -5617,7 +5620,7 @@ def persistent_notification_shard_config_change(endpoint_type, conn, new_num_sha
     default_num_shards = 11
     rgw_client = f'client.rgw.{get_config_port()}'
     if (old_num_shards != default_num_shards):
-        set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', old_num_shards)
+        set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', old_num_shards, get_config_cluster())
 
     bucket_name = gen_bucket_name()
     bucket = conn.create_bucket(bucket_name)
@@ -5663,7 +5666,7 @@ def persistent_notification_shard_config_change(endpoint_type, conn, new_num_sha
     create_object_and_verify_events(bucket, 'foo', topic_name, receiver, expected_keys, deletions=True)
 
     ## change config value for num_shards to new_num_shards
-    set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', new_num_shards)
+    set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', new_num_shards, get_config_cluster())
     
     ## create objects in the bucket (async)
     expected_keys = []
@@ -5678,7 +5681,7 @@ def persistent_notification_shard_config_change(endpoint_type, conn, new_num_sha
 
     ##revert config value for num_shards to default
     if (new_num_shards != default_num_shards):
-        set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', default_num_shards)
+        set_rgw_config_option(rgw_client, 'rgw_bucket_persistent_notif_num_shards', default_num_shards, get_config_cluster())
 
 
 def create_object_and_verify_events(bucket, key_name, topic_name, receiver, expected_keys, deletions=False):
index d3a1b265ca61135163e0c89d935aafa3d0b1c173..7718c1ec03a2a2489b1a24492a8ac770e2ee2332 100755 (executable)
@@ -18,6 +18,8 @@ zg=zg1
 
 system_access_key="1234567890"
 system_secret="pencil"
+regular_access_key="0987654321"
+regular_secret="crayon"
 
 # bring up first cluster
 x $(start_ceph_cluster c1) -n $(get_mstart_parameters 1)
@@ -39,6 +41,8 @@ done
 
 # create realm, zonegroup, zone, start rgws
 init_first_zone c1 $realm_name $zg ${zg}-1 $endpoints $system_access_key $system_secret
+# create a regular (non-system) user for S3 operations
+x $(rgw_admin c1) user create --uid=regular.user --display-name=RegularUser --access-key=${regular_access_key} --secret=${regular_secret}
 i=1
 while [ $i -le $rgws ]; do
   port=$((8100+i))