]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/multisite: disable sync policy, endpoint and data_sync_init tests until fixed.
authorShilpa Jagannath <smanjara@redhat.com>
Fri, 12 Apr 2024 20:46:11 +0000 (16:46 -0400)
committerShilpa Jagannath <smanjara@redhat.com>
Fri, 18 Oct 2024 18:17:20 +0000 (11:17 -0700)
Signed-off-by: Shilpa Jagannath <smanjara@redhat.com>
(cherry picked from commit 79d520698d4dc3f06535d3c8fc7ad119fd52ada1)
(cherry picked from commit 724b3d43af74ab3a23c5de58534dc6266a54d81c)

src/test/rgw/rgw_multi/tests.py

index fee0c9a3a4940b78f2faaf0ad9b3981448e43498..b540e439e2789b7e2287c2e2fda5bebeb0149a80 100644 (file)
@@ -605,6 +605,38 @@ def check_role_eq(zone_conn1, zone_conn2, role):
     if zone_conn2.zone.has_roles():
         zone_conn2.check_role_eq(zone_conn1, role['create_role_response']['create_role_result']['role']['role_name'])
 
+def create_zone_bucket(zone):
+    b_name = gen_bucket_name()
+    log.info('create bucket zone=%s name=%s', zone.name, b_name)
+    bucket = zone.create_bucket(b_name)
+    return bucket
+
+def create_object(zone_conn, bucket, objname, content):
+    k = new_key(zone_conn, bucket.name, objname)
+    k.set_contents_from_string(content)
+
+def create_objects(zone_conn, bucket, obj_arr, content):
+    for objname in obj_arr:
+        create_object(zone_conn, bucket, objname, content)
+
+def check_object_exists(bucket, objname, content = None):
+    k = bucket.get_key(objname)
+    assert_not_equal(k, None)
+    if (content != None):
+        assert_equal(k.get_contents_as_string(encoding='ascii'), content)
+
+def check_objects_exist(bucket, obj_arr, content = None):
+    for objname in obj_arr:
+        check_object_exists(bucket, objname, content)
+
+def check_object_not_exists(bucket, objname):
+    k = bucket.get_key(objname)
+    assert_equal(k, None)
+
+def check_objects_not_exist(bucket, obj_arr):
+    for objname in obj_arr:
+        check_object_not_exists(bucket, objname)
+
 def test_object_sync():
     zonegroup = realm.master_zonegroup()
     zonegroup_conns = ZonegroupConns(zonegroup)
@@ -1375,93 +1407,6 @@ def test_bucket_index_log_trim():
     cold_bilog = bilog_list(zone.zone, cold_bucket.name)
     assert(len(cold_bilog) == 0)
 
-def test_bucket_reshard_index_log_trim():
-    zonegroup = realm.master_zonegroup()
-    zonegroup_conns = ZonegroupConns(zonegroup)
-
-    zone = zonegroup_conns.rw_zones[0]
-
-    # create a test bucket, upload some objects, and wait for sync
-    def make_test_bucket():
-        name = gen_bucket_name()
-        log.info('create bucket zone=%s name=%s', zone.name, name)
-        bucket = zone.conn.create_bucket(name)
-        for objname in ('a', 'b', 'c', 'd'):
-            k = new_key(zone, name, objname)
-            k.set_contents_from_string('foo')
-        zonegroup_meta_checkpoint(zonegroup)
-        zonegroup_bucket_checkpoint(zonegroup_conns, name)
-        return bucket
-
-    # create a 'test' bucket
-    test_bucket = make_test_bucket()
-
-    # checking bucket layout before resharding
-    json_obj_1 = bucket_layout(zone.zone, test_bucket.name)
-    assert(len(json_obj_1['layout']['logs']) == 1)
-
-    first_gen = json_obj_1['layout']['current_index']['gen']
-
-    before_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(first_gen)])
-    assert(len(before_reshard_bilog) == 4)
-
-    # Resharding the bucket
-    zone.zone.cluster.admin(['bucket', 'reshard',
-        '--bucket', test_bucket.name,
-        '--num-shards', '3',
-        '--yes-i-really-mean-it'])
-
-    # checking bucket layout after 1st resharding
-    json_obj_2 = bucket_layout(zone.zone, test_bucket.name)
-    assert(len(json_obj_2['layout']['logs']) == 2)
-
-    second_gen = json_obj_2['layout']['current_index']['gen']
-
-    after_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(second_gen)])
-    assert(len(after_reshard_bilog) == 0)
-
-    # upload more objects
-    for objname in ('e', 'f', 'g', 'h'):
-        k = new_key(zone, test_bucket.name, objname)
-        k.set_contents_from_string('foo')
-    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
-
-    # Resharding the bucket again
-    zone.zone.cluster.admin(['bucket', 'reshard',
-        '--bucket', test_bucket.name,
-        '--num-shards', '3',
-        '--yes-i-really-mean-it'])
-
-    # checking bucket layout after 2nd resharding
-    json_obj_3 = bucket_layout(zone.zone, test_bucket.name)
-    assert(len(json_obj_3['layout']['logs']) == 3)
-
-    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
-
-    bilog_autotrim(zone.zone)
-
-    # checking bucket layout after 1st bilog autotrim
-    json_obj_4 = bucket_layout(zone.zone, test_bucket.name)
-    assert(len(json_obj_4['layout']['logs']) == 2)
-
-    bilog_autotrim(zone.zone)
-
-    # checking bucket layout after 2nd bilog autotrim
-    json_obj_5 = bucket_layout(zone.zone, test_bucket.name)
-    assert(len(json_obj_5['layout']['logs']) == 1)
-
-    bilog_autotrim(zone.zone)
-
-    # upload more objects
-    for objname in ('i', 'j', 'k', 'l'):
-        k = new_key(zone, test_bucket.name, objname)
-        k.set_contents_from_string('foo')
-    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
-
-    # verify the bucket has non-empty bilog
-    test_bilog = bilog_list(zone.zone, test_bucket.name)
-    assert(len(test_bilog) > 0)
-
 @attr('bucket_reshard')
 def test_bucket_reshard_incremental():
     zonegroup = realm.master_zonegroup()
@@ -1605,6 +1550,301 @@ def bucket_keys_eq(zone1, zone2, bucket_name):
                          zone2.name)
             assert False
 
+def test_role_sync():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+    roles, zone_role = create_role_per_zone(zonegroup_conns)
+
+    zonegroup_meta_checkpoint(zonegroup)
+
+    for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+        if target_conn.zone.has_roles():
+            check_roles_eq(source_conn, target_conn)
+
+def test_role_delete_sync():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+    role_name = gen_role_name()
+    log.info('create role zone=%s name=%s', zonegroup_conns.master_zone.name, role_name)
+    policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/testuser\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+    zonegroup_conns.master_zone.iam_conn.create_role(RoleName=role_name, AssumeRolePolicyDocument=policy_document)
+
+    zonegroup_meta_checkpoint(zonegroup)
+
+    for zone in zonegroup_conns.zones:
+        log.info(f'checking if zone: {zone.name} has role: {role_name}')
+        zone.iam_conn.get_role(RoleName=role_name)
+        log.info(f'success, zone: {zone.name} has role: {role_name}')
+
+    log.info(f"deleting role: {role_name}")
+    zonegroup_conns.master_zone.iam_conn.delete_role(RoleName=role_name)
+    zonegroup_meta_checkpoint(zonegroup)
+
+    for zone in zonegroup_conns.zones:
+        log.info(f'checking if zone: {zone.name} does not have role: {role_name}')
+        assert_raises(zone.iam_conn.exceptions.NoSuchEntityException,
+                      zone.iam_conn.get_role, RoleName=role_name)
+        log.info(f'success, zone: {zone.name} does not have role: {role_name}')
+
+@attr('topic notification')
+def test_topic_notification_sync():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_meta_checkpoint(zonegroup)
+    # let wait for users and other settings to sync across all zones.
+    time.sleep(config.checkpoint_delay)
+    # create topics in each zone.
+    zonegroup_conns = ZonegroupConns(zonegroup)
+    topic_arns, zone_topic = create_topic_per_zone(zonegroup_conns)
+    log.debug("topic_arns: %s", topic_arns)
+
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # verify topics exists in all zones
+    for conn in zonegroup_conns.zones:
+        topic_list = conn.list_topics()
+        log.debug("topics for zone=%s = %s", conn.name, topic_list)
+        assert_equal(len(topic_list), len(topic_arns))
+        for topic_arn_map in topic_list:
+            assert_true(topic_arn_map['TopicArn'] in topic_arns)
+
+    # create a bucket
+    bucket = zonegroup_conns.rw_zones[0].create_bucket(gen_bucket_name())
+    log.debug('created bucket=%s', bucket.name)
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # create bucket_notification in each zone.
+    notification_ids = []
+    num = 1
+    for zone_conn, topic_arn in zone_topic:
+        noti_id = "bn" + '-' + run_prefix + '-' + str(num)
+        notification_ids.append(noti_id)
+        topic_conf = {'Id': noti_id,
+                      'TopicArn': topic_arn,
+                      'Events': ['s3:ObjectCreated:*']
+                     }
+        num += 1
+        log.info('creating bucket notification for zone=%s name=%s', zone_conn.name, noti_id)
+        zone_conn.create_notification(bucket.name, [topic_conf])
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # verify notifications exists in all zones
+    for conn in zonegroup_conns.zones:
+        notification_list = conn.list_notifications(bucket.name)
+        log.debug("notifications for zone=%s = %s", conn.name, notification_list)
+        assert_equal(len(notification_list), len(topic_arns))
+        for notification in notification_list:
+            assert_true(notification['Id'] in notification_ids)
+
+    # verify bucket_topic mapping
+    # create a new bucket and subcribe it to first topic.
+    bucket_2 = zonegroup_conns.rw_zones[0].create_bucket(gen_bucket_name())
+    notif_id = "bn-2" + '-' + run_prefix
+    topic_conf = {'Id': notif_id,
+                  'TopicArn': topic_arns[0],
+                  'Events': ['s3:ObjectCreated:*']
+                  }
+    zonegroup_conns.rw_zones[0].create_notification(bucket_2.name, [topic_conf])
+    zonegroup_meta_checkpoint(zonegroup)
+    for conn in zonegroup_conns.zones:
+        topics = get_topics(conn.zone)
+        for topic in topics:
+            if topic['arn'] == topic_arns[0]:
+                assert_equal(len(topic['subscribed_buckets']), 2)
+                assert_true(bucket_2.name in topic['subscribed_buckets'])
+            else:
+                assert_equal(len(topic['subscribed_buckets']), 1)
+            assert_true(bucket.name in topic['subscribed_buckets'])
+
+    # delete the 2nd bucket and verify the mapping is removed.
+    zonegroup_conns.rw_zones[0].delete_bucket(bucket_2.name)
+    zonegroup_meta_checkpoint(zonegroup)
+    for conn in zonegroup_conns.zones:
+        topics = get_topics(conn.zone)
+        for topic in topics:
+            assert_equal(len(topic['subscribed_buckets']), 1)
+        '''TODO(Remove the break once the https://tracker.ceph.com/issues/20802
+           is fixed, as the secondary site bucket instance info is currently not
+           getting deleted coz of the bug hence the bucket-topic mapping
+           deletion is not invoked on secondary sites.)'''
+        break
+
+    # delete notifications
+    zonegroup_conns.rw_zones[0].delete_notifications(bucket.name)
+    log.debug('Deleting all notifications for  bucket=%s', bucket.name)
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # verify notification deleted in all zones
+    for conn in zonegroup_conns.zones:
+        notification_list = conn.list_notifications(bucket.name)
+        assert_equal(len(notification_list), 0)
+
+    # delete topics
+    for zone_conn, topic_arn in zone_topic:
+        log.debug('deleting topic zone=%s arn=%s', zone_conn.name, topic_arn)
+        zone_conn.delete_topic(topic_arn)
+    zonegroup_meta_checkpoint(zonegroup)
+
+    # verify topics deleted in all zones
+    for conn in zonegroup_conns.zones:
+        topic_list = conn.list_topics()
+        assert_equal(len(topic_list), 0)
+
+def test_account_metadata_sync():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    inline_policy = json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action': 's3:*', 'Resource': '*'}]})
+    managed_policy_arn = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
+
+    for source_conn in zonegroup_conns.rw_zones:
+        iam = source_conn.iam_conn
+        name = source_conn.name
+        # create user, add access key, user policy, managed policy
+        iam.create_user(UserName=name)
+        iam.create_access_key(UserName=name)
+        iam.put_user_policy(UserName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+        iam.attach_user_policy(UserName=name, PolicyArn=managed_policy_arn)
+        # create group, group policy, managed policy, add user to group
+        iam.create_group(GroupName=name)
+        iam.put_group_policy(GroupName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+        iam.attach_group_policy(GroupName=name, PolicyArn=managed_policy_arn)
+        iam.add_user_to_group(GroupName=name, UserName=name)
+        # create role, role policy, managed policy
+        iam.create_role(RoleName=name, AssumeRolePolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'AWS': 'arn:aws:iam:::user/testuser'}, 'Action': ['sts:AssumeRole']}]}))
+        iam.put_role_policy(RoleName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+        iam.attach_role_policy(RoleName=name, PolicyArn=managed_policy_arn)
+        # TODO: test oidc provider
+        #iam.create_open_id_connect_provider(ClientIDList=['clientid'], ThumbprintList=['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'], Url=f'http://{name}.example.com')
+
+    realm_meta_checkpoint(realm)
+
+    # check that all users/groups/roles are equal across all zones
+    for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+        if target_conn.zone.has_roles():
+            check_roles_eq(source_conn, target_conn)
+            check_users_eq(source_conn, target_conn)
+            check_groups_eq(source_conn, target_conn)
+            check_oidc_providers_eq(source_conn, target_conn)
+
+    for source_conn in zonegroup_conns.rw_zones:
+        iam = source_conn.iam_conn
+        name = source_conn.name
+
+        #iam.delete_open_id_connect_provider(OpenIDConnectProviderArn=f'arn:aws:iam::RGW11111111111111111:oidc-provider/{name}.example.com')
+
+        iam.detach_role_policy(RoleName=name, PolicyArn=managed_policy_arn)
+        iam.delete_role_policy(RoleName=name, PolicyName='Allow')
+        iam.delete_role(RoleName=name)
+
+        iam.remove_user_from_group(GroupName=name, UserName=name)
+        iam.detach_group_policy(GroupName=name, PolicyArn=managed_policy_arn)
+        iam.delete_group_policy(GroupName=name, PolicyName='Allow')
+        iam.delete_group(GroupName=name)
+
+        iam.detach_user_policy(UserName=name, PolicyArn=managed_policy_arn)
+        iam.delete_user_policy(UserName=name, PolicyName='Allow')
+        key_id = iam.list_access_keys(UserName=name)['AccessKeyMetadata'][0]['AccessKeyId']
+        iam.delete_access_key(UserName=name, AccessKeyId=key_id)
+        iam.delete_user(UserName=name)
+
+    realm_meta_checkpoint(realm)
+
+    # check that all users/groups/roles are equal across all zones
+    for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+        if target_conn.zone.has_roles():
+            check_roles_eq(source_conn, target_conn)
+            check_users_eq(source_conn, target_conn)
+            check_groups_eq(source_conn, target_conn)
+            check_oidc_providers_eq(source_conn, target_conn)
+
+
+# TODO: move sync policy and endpoint tests into separate test file
+# fix data sync init tests
+'''
+def test_bucket_reshard_index_log_trim():
+    zonegroup = realm.master_zonegroup()
+    zonegroup_conns = ZonegroupConns(zonegroup)
+
+    zone = zonegroup_conns.rw_zones[0]
+
+    # create a test bucket, upload some objects, and wait for sync
+    def make_test_bucket():
+        name = gen_bucket_name()
+        log.info('create bucket zone=%s name=%s', zone.name, name)
+        bucket = zone.conn.create_bucket(name)
+        for objname in ('a', 'b', 'c', 'd'):
+            k = new_key(zone, name, objname)
+            k.set_contents_from_string('foo')
+        zonegroup_meta_checkpoint(zonegroup)
+        zonegroup_bucket_checkpoint(zonegroup_conns, name)
+        return bucket
+
+    # create a 'test' bucket
+    test_bucket = make_test_bucket()
+
+    # checking bucket layout before resharding
+    json_obj_1 = bucket_layout(zone.zone, test_bucket.name)
+    assert(len(json_obj_1['layout']['logs']) == 1)
+
+    first_gen = json_obj_1['layout']['current_index']['gen']
+
+    before_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(first_gen)])
+    assert(len(before_reshard_bilog) == 4)
+
+    # Resharding the bucket
+    zone.zone.cluster.admin(['bucket', 'reshard',
+        '--bucket', test_bucket.name,
+        '--num-shards', '13'])
+
+    # checking bucket layout after 1st resharding
+    json_obj_2 = bucket_layout(zone.zone, test_bucket.name)
+    assert(len(json_obj_2['layout']['logs']) == 2)
+
+    second_gen = json_obj_2['layout']['current_index']['gen']
+
+    after_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(second_gen)])
+    assert(len(after_reshard_bilog) == 0)
+
+    # upload more objects
+    for objname in ('e', 'f', 'g', 'h'):
+        k = new_key(zone, test_bucket.name, objname)
+        k.set_contents_from_string('foo')
+    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
+
+    # Resharding the bucket again
+    zone.zone.cluster.admin(['bucket', 'reshard',
+        '--bucket', test_bucket.name,
+        '--num-shards', '15'])
+
+    # checking bucket layout after 2nd resharding
+    json_obj_3 = bucket_layout(zone.zone, test_bucket.name)
+    assert(len(json_obj_3['layout']['logs']) == 3)
+
+    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
+
+    bilog_autotrim(zone.zone)
+
+    # checking bucket layout after 1st bilog autotrim
+    json_obj_4 = bucket_layout(zone.zone, test_bucket.name)
+    assert(len(json_obj_4['layout']['logs']) == 2)
+
+    bilog_autotrim(zone.zone)
+
+    # checking bucket layout after 2nd bilog autotrim
+    json_obj_5 = bucket_layout(zone.zone, test_bucket.name)
+    time.sleep(config.checkpoint_delay)
+    assert(len(json_obj_5['layout']['logs']) == 1)
+
+    # upload more objects
+    for objname in ('i', 'j', 'k', 'l'):
+        k = new_key(zone, test_bucket.name, objname)
+        k.set_contents_from_string('foo')
+    zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name)
+
+    # verify the bucket has non-empty bilog
+    test_bilog = bilog_list(zone.zone, test_bucket.name)
+    assert(len(test_bilog) > 0)
+
 @attr('bucket_reshard')
 def test_bucket_sync_run_basic_incremental():
     """
@@ -2055,38 +2295,6 @@ def remove_sync_group_pipe(cluster, group, pipe_id, bucket = None, args = None):
         assert False, 'failed to remove sync group pipe groupid=%s, pipe_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, pipe_id, src_zones, dest_zones, bucket)
     return json.loads(result_json)
 
-def create_zone_bucket(zone):
-    b_name = gen_bucket_name()
-    log.info('create bucket zone=%s name=%s', zone.name, b_name)
-    bucket = zone.create_bucket(b_name)
-    return bucket
-
-def create_object(zone_conn, bucket, objname, content):
-    k = new_key(zone_conn, bucket.name, objname)
-    k.set_contents_from_string(content)
-
-def create_objects(zone_conn, bucket, obj_arr, content):
-    for objname in obj_arr:
-        create_object(zone_conn, bucket, objname, content)
-
-def check_object_exists(bucket, objname, content = None):
-    k = bucket.get_key(objname)
-    assert_not_equal(k, None)
-    if (content != None):
-        assert_equal(k.get_contents_as_string(encoding='ascii'), content)
-
-def check_objects_exist(bucket, obj_arr, content = None):
-    for objname in obj_arr:
-        check_object_exists(bucket, objname, content)
-
-def check_object_not_exists(bucket, objname):
-    k = bucket.get_key(objname)
-    assert_equal(k, None)
-
-def check_objects_not_exist(bucket, obj_arr):
-    for objname in obj_arr:
-        check_object_not_exists(bucket, objname)
-
 @attr('sync_policy')
 def test_sync_policy_config_zonegroup():
     """