From c5956790e6db4df0bb5e327f10c1dbd9f333e33b Mon Sep 17 00:00:00 2001 From: Ali Maredia Date: Fri, 16 Dec 2016 13:23:09 -0500 Subject: [PATCH] rgw: multisite enabled over multiple clusters Added '--cluster' to all necessary commands ex: radosgw-admin, rados, ceph, made sure necessary checks were in place so that clients can be read with our without a cluster_name preceeding them Made master_client defined in the config for radosgw-admin task Signed-off-by: Ali Maredia --- .../all/radosgw-admin-data-sync.yaml | 1 + .../all/radosgw-admin-multi-region.yaml | 1 + .../rgw/singleton/all/radosgw-admin.yaml | 1 + qa/tasks/radosgw_admin.py | 28 ++- qa/tasks/radosgw_agent.py | 5 +- qa/tasks/rgw.py | 184 +++++++++++------- qa/tasks/s3tests.py | 13 +- qa/tasks/util/rados.py | 16 +- qa/tasks/util/rgw.py | 38 ++-- 9 files changed, 176 insertions(+), 111 deletions(-) diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml index 03d7c612c4a37..ffd59f608bad2 100644 --- a/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml +++ b/qa/suites/rgw/singleton/all/radosgw-admin-data-sync.yaml @@ -63,3 +63,4 @@ tasks: - sleep: duration: 30 - radosgw-admin: + master_client: client.0 diff --git a/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml index d28dec9e7a12c..9000e0929bd5c 100644 --- a/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml +++ b/qa/suites/rgw/singleton/all/radosgw-admin-multi-region.yaml @@ -65,3 +65,4 @@ tasks: dest: client.1 metadata-only: true - radosgw-admin: + master_client: client.0 diff --git a/qa/suites/rgw/singleton/all/radosgw-admin.yaml b/qa/suites/rgw/singleton/all/radosgw-admin.yaml index aada29b5b3346..83b35a9af43cd 100644 --- a/qa/suites/rgw/singleton/all/radosgw-admin.yaml +++ b/qa/suites/rgw/singleton/all/radosgw-admin.yaml @@ -18,3 +18,4 @@ tasks: - rgw: client.0: - radosgw-admin: + master_client: client.0 diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index b0d2b65caad5e..609dfe1c06d10 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -23,7 +23,6 @@ import httplib2 import util.rgw as rgw_utils -from teuthology import misc as teuthology from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops log = logging.getLogger(__name__) @@ -63,28 +62,27 @@ def get_acl(key): remove_newlines(raw_acl) ) - def task(ctx, config): """ Test radosgw-admin functionality against a running rgw instance. """ global log - assert config is None or isinstance(config, list) \ - or isinstance(config, dict), \ - "task s3tests only supports a list or dictionary for configuration" - all_clients = ['client.{id}'.format(id=id_) - for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - if config is None: - config = all_clients - if isinstance(config, list): - config = dict.fromkeys(config) - clients = config.keys() + + assert isinstance(config, dict), \ + "task radosgw-admin only supports a dictionary with a master_client for configuration" + + # regions found just like in the rgw task + regions = ctx.rgw.regions + + log.debug('regions are: %r', regions) + log.debug('config is: %r', config) + + client = config["master_client"] multi_region_run = rgw_utils.multi_region_enabled(ctx) - client = clients[0]; # default choice, multi-region code may overwrite this - if multi_region_run: - client = rgw_utils.get_master_client(ctx, clients) + log.debug('multi_region_run is: %r', multi_region_run) + log.debug('master_client is: %r', client) # once the client is chosen, pull the host name and assigned port out of # the role_endpoints that were assigned by the rgw task diff --git a/qa/tasks/radosgw_agent.py b/qa/tasks/radosgw_agent.py index 0254805d2af9d..d6c7f77e7aa6d 100644 --- a/qa/tasks/radosgw_agent.py +++ b/qa/tasks/radosgw_agent.py @@ -18,8 +18,9 @@ def run_radosgw_agent(ctx, config): return_list = list() for (client, cconf) in config.items(): # don't process entries that are not clients - if not client.startswith('client.'): - log.debug('key {data} does not start with \'client.\', moving on'.format( + log.debug("client is %r", client) + if not 'client.' in client: + log.debug('key {data} does not contain \'client.\', moving on'.format( data=client)) continue diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index 254c5294c847f..80b2524866643 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -62,19 +62,21 @@ def create_apache_dirs(ctx, config, on_client = None, except_client = None): for client in clients_to_create_as: if client == except_client: continue + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id ctx.cluster.only(client).run( args=[ 'mkdir', '-p', - '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, - client=client), - '{tdir}/apache/tmp.{client}/fastcgi_sock'.format( + '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir, + client_with_cluster=client_with_cluster), + '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock'.format( tdir=testdir, - client=client), + client_with_cluster=client_with_cluster), run.Raw('&&'), 'mkdir', - '{tdir}/archive/apache.{client}'.format(tdir=testdir, - client=client), + '{tdir}/archive/apache.{client_with_cluster}'.format(tdir=testdir, + client_with_cluster=client_with_cluster), ], ) try: @@ -86,12 +88,12 @@ def create_apache_dirs(ctx, config, on_client = None, except_client = None): args=[ 'rm', '-rf', - '{tdir}/apache/tmp.{client}'.format(tdir=testdir, - client=client), + '{tdir}/apache/tmp.{client_with_cluster}'.format(tdir=testdir, + client_with_cluster=client_with_cluster), run.Raw('&&'), 'rmdir', - '{tdir}/apache/htdocs.{client}'.format(tdir=testdir, - client=client), + '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir, + client_with_cluster=client_with_cluster), ], ) for client in clients_to_create_as: @@ -134,6 +136,9 @@ def ship_apache_configs(ctx, config, role_endpoints, on_client = None, for client in clients_to_create_as: if client == except_client: continue + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + client_with_cluster = cluster_name + '.' + client_with_id (remote,) = ctx.cluster.only(client).remotes.keys() system_type = teuthology.get_system_type(remote) conf = config.get(client) @@ -183,7 +188,7 @@ def ship_apache_configs(ctx, config, role_endpoints, on_client = None, print_continue=print_continue, host=host, port=port, - client=client, + client=client_with_cluster, idle_timeout=idle_timeout, user=user, group=group, @@ -191,18 +196,18 @@ def ship_apache_configs(ctx, config, role_endpoints, on_client = None, ) teuthology.write_file( remote=remote, - path='{tdir}/apache/apache.{client}.conf'.format( + path='{tdir}/apache/apache.{client_with_cluster}.conf'.format( tdir=testdir, - client=client), + client_with_cluster=client_with_cluster), data=conf, ) rgw_options = [] if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote): rgw_options = [ '--rgw-socket-path', - '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( + '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format( tdir=testdir, - client=client + client_with_cluster=client_with_cluster ), '--rgw-frontends', 'fastcgi', @@ -217,21 +222,21 @@ def ship_apache_configs(ctx, config, role_endpoints, on_client = None, teuthology.write_file( remote=remote, - path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( + path='{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format( tdir=testdir, - client=client), + client_with_cluster=client_with_cluster), data="""#!/bin/sh ulimit -c unlimited -exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring {rgw_options} +exec radosgw -f -n {client_with_id} --cluster {cluster_name} -k /etc/ceph/{client_with_cluster}.keyring {rgw_options} -""".format(tdir=testdir, client=client, rgw_options=" ".join(rgw_options)) +""".format(tdir=testdir, client_with_id=client_with_id, client_with_cluster=client_with_cluster, cluster_name=cluster_name, rgw_options=" ".join(rgw_options)) ) remote.run( args=[ 'chmod', 'a=rx', - '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir, - client=client), + '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format(tdir=testdir, + client_with_cluster=client_with_cluster), ], ) try: @@ -243,14 +248,14 @@ exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring {rgw_options} args=[ 'rm', '-f', - '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, - client=client), + '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir, + client_with_cluster=client_with_cluster), run.Raw('&&'), 'rm', '-f', - '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format( + '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format( tdir=testdir, - client=client), + client_with_cluster=client_with_cluster), ], ) @@ -265,13 +270,18 @@ def start_rgw(ctx, config, on_client = None, except_client = None): clients_to_run = [on_client] if on_client is None: clients_to_run = config.keys() + log.debug('client %r', clients_to_run) testdir = teuthology.get_testdir(ctx) for client in clients_to_run: if client == except_client: continue (remote,) = ctx.cluster.only(client).remotes.iterkeys() + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + client_with_cluster = cluster_name + '.' + client_with_id zone = rgw_utils.zone_for_client(ctx, client) log.debug('zone %s', zone) + client_config = config.get(client) if client_config is None: client_config = {} @@ -293,9 +303,9 @@ def start_rgw(ctx, config, on_client = None, except_client = None): if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote): rgw_cmd.extend([ '--rgw-socket-path', - '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format( + '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format( tdir=testdir, - client=client, + client_with_cluster=client_with_cluster, ), '--rgw-frontends', 'fastcgi', @@ -320,19 +330,20 @@ def start_rgw(ctx, config, on_client = None, except_client = None): rgw_cmd.extend(['--rgw-zone', zone]) rgw_cmd.extend([ - '-n', client, - '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client), + '-n', client_with_id, + '--cluster', cluster_name, + '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster), '--log-file', - '/var/log/ceph/rgw.{client}.log'.format(client=client), + '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster), '--rgw_ops_log_socket_path', - '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, - client=client), + '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir, + client_with_cluster=client_with_cluster), '--foreground', run.Raw('|'), 'sudo', 'tee', - '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir, - client=client), + '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir, + client_with_cluster=client_with_cluster), run.Raw('2>&1'), ]) @@ -349,6 +360,7 @@ def start_rgw(ctx, config, on_client = None, except_client = None): ctx.daemons.add_daemon( remote, 'rgw', client, + cluster=cluster_name, args=run_cmd, logger=log.getChild(client), stdin=run.PIPE, @@ -375,8 +387,8 @@ def start_rgw(ctx, config, on_client = None, except_client = None): args=[ 'rm', '-f', - '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, - client=client), + '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir, + client_with_cluster=client_with_cluster), ], ) @@ -393,6 +405,8 @@ def start_apache(ctx, config, on_client = None, except_client = None): if on_client is None: clients_to_run = config.keys() for client in clients_to_run: + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id if client == except_client: continue (remote,) = ctx.cluster.only(client).remotes.keys() @@ -419,14 +433,14 @@ def start_apache(ctx, config, on_client = None, except_client = None): apache_name, '-X', '-f', - '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir, - client=client), + '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir, + client_with_cluster=client_with_cluster), ], logger=log.getChild(client), stdin=run.PIPE, wait=False, ) - apaches[client] = proc + apaches[client_with_cluster] = proc try: yield @@ -437,7 +451,6 @@ def start_apache(ctx, config, on_client = None, except_client = None): run.wait(apaches.itervalues()) - def extract_user_info(client_config): """ Extract user info from the client config specified. Returns a dict @@ -465,9 +478,11 @@ def extract_zone_info(ctx, client, client_config): :param client_config: dictionary of client configuration information :returns: zone extracted from client and client_config information """ - ceph_config = ctx.ceph['ceph'].conf.get('global', {}) - ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) - ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + ceph_config = ctx.ceph[cluster_name].conf.get('global', {}) + ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {})) + ceph_config.update(ctx.ceph[cluster_name].conf.get(client_with_id, {})) for key in ['rgw zone', 'rgw region', 'rgw zone root pool']: assert key in ceph_config, \ 'ceph conf must contain {key} for {client}'.format(key=key, @@ -703,14 +718,16 @@ def create_nonregion_pools(ctx, config, regions): for client in config.keys(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() data_pool = '.rgw.buckets' + cluster_name, daemon_type, client_id = teuthology.split_role(client) + if ctx.rgw.ec_data_pool: create_ec_pool(remote, data_pool, client, 64, - ctx.rgw.erasure_code_profile) + ctx.rgw.erasure_code_profile, cluster_name) else: - create_replicated_pool(remote, data_pool, 64) + create_replicated_pool(remote, data_pool, 64, cluster_name) if ctx.rgw.cache_pools: create_cache_pool(remote, data_pool, data_pool + '.cache', 64, - 64*1024*1024) + 64*1024*1024, cluster_name) yield @contextlib.contextmanager @@ -738,6 +755,7 @@ def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, log.debug('regions are %r', regions) log.debug('role_endpoints = %r', role_endpoints) log.debug('realm is %r', realm) + # extract the zone info role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) for client, c_config in config.iteritems()]) @@ -761,7 +779,8 @@ def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, fill_in_endpoints(region_info, role_zones, role_endpoints) # clear out the old defaults - first_mon = teuthology.get_first_mon(ctx, config) + cluster_name, daemon_type, client_id = teuthology.split_role(master_client) + first_mon = teuthology.get_first_mon(ctx, config, cluster_name) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() # read master zonegroup and master_zone @@ -795,12 +814,12 @@ def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, (remote,) = ctx.cluster.only(role).remotes.keys() for pool_info in zone_info['placement_pools']: remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', - pool_info['val']['index_pool'], '64', '64']) + pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name]) if ctx.rgw.ec_data_pool: create_ec_pool(remote, pool_info['val']['data_pool'], - zone, 64, ctx.rgw.erasure_code_profile) + zone, 64, ctx.rgw.erasure_code_profile, cluster_name) else: - create_replicated_pool(remote, pool_info['val']['data_pool'], 64) + create_replicated_pool(remote, pool_info['val']['data_pool'], 64, cluster_name) (zonegroup, zone, zone_info, user_info) = role_zones[master_client] zone_json = json.dumps(dict(zone_info.items() + user_info.items())) @@ -812,11 +831,11 @@ def configure_multisite_regions_and_zones(ctx, config, regions, role_endpoints, check_status=True) rgwadmin(ctx, master_client, - cmd=['-n', master_client, 'zone', 'default', zone], + cmd=['zone', 'default', '--rgw-zone', zone], check_status=True) rgwadmin(ctx, master_client, - cmd=['-n', master_client, 'period', 'update', '--commit'], + cmd=['period', 'update', '--commit'], check_status=True) yield @@ -870,6 +889,7 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): log.debug('regions are %r', regions) log.debug('role_endpoints = %r', role_endpoints) log.debug('realm is %r', realm) + # extract the zone info role_zones = dict([(client, extract_zone_info(ctx, client, c_config)) for client, c_config in config.iteritems()]) @@ -893,14 +913,15 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): fill_in_endpoints(region_info, role_zones, role_endpoints) # clear out the old defaults - first_mon = teuthology.get_first_mon(ctx, config) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + first_mon = teuthology.get_first_mon(ctx, config, cluster_name) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() # removing these objects from .rgw.root and the per-zone root pools # may or may not matter rados(ctx, mon, - cmd=['-p', '.rgw.root', 'rm', 'region_info.default']) + cmd=['-p', '.rgw.root', 'rm', 'region_info.default', '--cluster', cluster_name]) rados(ctx, mon, - cmd=['-p', '.rgw.root', 'rm', 'zone_info.default']) + cmd=['-p', '.rgw.root', 'rm', 'zone_info.default', '--cluster', cluster_name]) # read master zonegroup and master_zone for zonegroup, zg_info in region_info.iteritems(): @@ -920,7 +941,7 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): log.debug('master client = %r', master_client) log.debug('config %r ', config) - (ret, out)=rgwadmin(ctx, master_client, + (ret, out)=rgwadmin(ctx, client, cmd=['realm', 'create', '--rgw-realm', realm, '--default']) log.debug('realm create ret %r exists %r', -ret, errno.EEXIST) assert ret == 0 or ret != -errno.EEXIST @@ -931,22 +952,22 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems(): rados(ctx, mon, cmd=['-p', zone_info['domain_root'], - 'rm', 'region_info.default']) + 'rm', 'region_info.default', '--cluster', cluster_name]) rados(ctx, mon, cmd=['-p', zone_info['domain_root'], - 'rm', 'zone_info.default']) + 'rm', 'zone_info.default', '--cluster', cluster_name]) (remote,) = ctx.cluster.only(role).remotes.keys() for pool_info in zone_info['placement_pools']: remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create', - pool_info['val']['index_pool'], '64', '64']) + pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name]) if ctx.rgw.ec_data_pool: create_ec_pool(remote, pool_info['val']['data_pool'], - zone, 64, ctx.rgw.erasure_code_profile) + zone, 64, ctx.rgw.erasure_code_profile, cluster_name) else: create_replicated_pool( remote, pool_info['val']['data_pool'], - 64) + 64, cluster_name) zone_json = json.dumps(dict(zone_info.items() + user_info.items())) log.debug('zone info is: %r', zone_json) rgwadmin(ctx, client, @@ -969,11 +990,12 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints, realm): (zonegroup, zone, zone_info, user_info) = role_zones[client] rgwadmin(ctx, client, - cmd=['zone', 'default', zone], + cmd=['zone', 'default', '--rgw-zone', zone], check_status=True) - rgwadmin(ctx, master_client, - cmd=['-n', master_client, 'period', 'update', '--commit'], + #this used to take master_client, need to edit that accordingly + rgwadmin(ctx, client, + cmd=['period', 'update', '--commit'], check_status=True) yield @@ -1029,11 +1051,12 @@ def pull_configuration(ctx, config, regions, role_endpoints, realm, master_clien for client in config.iterkeys(): if client != master_client: + cluster_name, daemon_type, client_id = teuthology.split_role(client) host, port = role_endpoints[master_client] endpoint = 'http://{host}:{port}/'.format(host=host, port=port) log.debug("endpoint: %s", endpoint) rgwadmin(ctx, client, - cmd=['-n', client, 'realm', 'pull', '--rgw-realm', realm, '--default', '--url', + cmd=['realm', 'pull', '--rgw-realm', realm, '--default', '--url', endpoint, '--access_key', user_info['system_key']['access_key'], '--secret', user_info['system_key']['secret_key']], @@ -1041,13 +1064,21 @@ def pull_configuration(ctx, config, regions, role_endpoints, realm, master_clien (zonegroup, zone, zone_info, zone_user_info) = role_zones[client] zone_json = json.dumps(dict(zone_info.items() + zone_user_info.items())) - log.debug("zone info is: %r"), zone_json + log.debug("zone info is: %r", zone_json) rgwadmin(ctx, client, - cmd=['zone', 'set', '--rgw-zonegroup', zonegroup, + cmd=['zone', 'set', '--default', '--rgw-zone', zone], stdin=StringIO(zone_json), check_status=True) + rgwadmin(ctx, client, + cmd=['zonegroup', 'add', '--rgw-zonegroup', zonegroup, '--rgw-zone', zone], + check_status=True) + + rgwadmin(ctx, client, + cmd=['zonegroup', 'default', '--rgw-zonegroup', zonegroup], + check_status=True) + rgwadmin(ctx, client, cmd=['period', 'update', '--commit', '--url', endpoint, '--access_key', @@ -1256,6 +1287,7 @@ def task(ctx, config): lambda: create_nonregion_pools( ctx=ctx, config=config, regions=regions), ] + log.debug('Nonregion pools created') multisite = len(regions) > 1 @@ -1267,8 +1299,24 @@ def task(ctx, config): break log.debug('multisite %s', multisite) - multi_cluster = multisite and len(ctx.config['roles']) > 1 + + multi_cluster = False + if multisite: + prev_cluster_name = None + roles = ctx.config['roles'] + #check if any roles have a different cluster_name from eachother + for lst in roles: + for role in lst: + cluster_name, daemon_type, client_id = teuthology.split_role(role) + if cluster_name != prev_cluster_name and prev_cluster_name != None: + multi_cluster = True + break + prev_cluster_name = cluster_name + if multi_cluster: + break + log.debug('multi_cluster %s', multi_cluster) + ctx.rgw.config = config master_client = None if multi_cluster: diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 20f328b17994f..305025ff545b0 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -29,7 +29,8 @@ def extract_sync_client_data(ctx, client_name): """ return_region_name = None return_dict = None - client = ctx.ceph['ceph'].conf.get(client_name, None) + cluster_name, daemon_type, client_id = teuthology.split_role(client_name) + client = ctx.ceph[cluster_name].conf.get(client_name, None) if client: current_client_zone = client.get('rgw zone', None) if current_client_zone: @@ -199,19 +200,22 @@ def create_users(ctx, config): for section, user in users.iteritems(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id ctx.cluster.only(client).run( args=[ 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', - '-n', client, + '-n', client_with_id, 'user', 'create', '--uid', s3tests_conf[section]['user_id'], '--display-name', s3tests_conf[section]['display_name'], '--access-key', s3tests_conf[section]['access_key'], '--secret', s3tests_conf[section]['secret_key'], '--email', s3tests_conf[section]['email'], + '--cluster', cluster_name, ], ) try: @@ -220,16 +224,19 @@ def create_users(ctx, config): for client in config['clients']: for user in users.itervalues(): uid = '{user}.{client}'.format(user=user, client=client) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id ctx.cluster.only(client).run( args=[ 'adjust-ulimits', 'ceph-coverage', '{tdir}/archive/coverage'.format(tdir=testdir), 'radosgw-admin', - '-n', client, + '-n', client_with_id, 'user', 'rm', '--uid', uid, '--purge-data', + '--cluster', cluster_name, ], ) diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py index a5b27d5b1c782..2d9d263d45f09 100644 --- a/qa/tasks/util/rados.py +++ b/qa/tasks/util/rados.py @@ -24,26 +24,26 @@ def rados(ctx, remote, cmd, wait=True, check_status=False): else: return proc -def create_ec_pool(remote, name, profile_name, pgnum, profile={}): +def create_ec_pool(remote, name, profile_name, pgnum, profile={}, cluster_name="ceph"): remote.run(args=['sudo', 'ceph'] + - cmd_erasure_code_profile(profile_name, profile)) + cmd_erasure_code_profile(profile_name, profile) + ['--cluster', cluster_name]) remote.run(args=[ 'sudo', 'ceph', 'osd', 'pool', 'create', name, - str(pgnum), str(pgnum), 'erasure', profile_name, + str(pgnum), str(pgnum), 'erasure', profile_name, '--cluster', cluster_name ]) -def create_replicated_pool(remote, name, pgnum): +def create_replicated_pool(remote, name, pgnum, cluster_name="ceph"): remote.run(args=[ - 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), + 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), '--cluster', cluster_name ]) -def create_cache_pool(remote, base_name, cache_name, pgnum, size): +def create_cache_pool(remote, base_name, cache_name, pgnum, size, cluster_name="ceph"): remote.run(args=[ - 'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum) + 'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum), '--cluster', cluster_name ]) remote.run(args=[ 'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, - str(size), + str(size), '--cluster', cluster_name ]) def cmd_erasure_code_profile(profile_name, profile): diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py index 8abc5022f9d2a..02215b0ed48b2 100644 --- a/qa/tasks/util/rgw.py +++ b/qa/tasks/util/rgw.py @@ -10,7 +10,6 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -# simple test to indicate if multi-region testing should occur def multi_region_enabled(ctx): # this is populated by the radosgw-agent task, seems reasonable to # use that as an indicator that we're testing multi-region sync @@ -20,6 +19,8 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False, format='json'): log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd)) testdir = teuthology.get_testdir(ctx) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id pre = [ 'adjust-ulimits', 'ceph-coverage'.format(tdir=testdir), @@ -27,7 +28,8 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False, 'radosgw-admin'.format(tdir=testdir), '--log-to-stderr', '--format', format, - '-n', client, + '-n', client_with_id, + '--cluster', cluster_name, ] pre.extend(cmd) log.info('rgwadmin: cmd=%s' % pre) @@ -70,8 +72,10 @@ def get_user_successful_ops(out, user): return get_user_summary(out, user)['total']['successful_ops'] def get_zone_host_and_port(ctx, client, zone): + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id _, period = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, 'period', 'get']) + cmd=['period', 'get']) period_map = period['period_map'] zonegroups = period_map['zonegroups'] for zonegroup in zonegroups: @@ -85,8 +89,10 @@ def get_zone_host_and_port(ctx, client, zone): assert False, 'no endpoint for zone {zone} found'.format(zone=zone) def get_master_zone(ctx, client): + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id _, period = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, 'period', 'get']) + cmd=['period', 'get']) period_map = period['period_map'] zonegroups = period_map['zonegroups'] for zonegroup in zonegroups: @@ -116,27 +122,29 @@ def get_master_client(ctx, clients): def get_zone_system_keys(ctx, client, zone): _, zone_info = rgwadmin(ctx, client, check_status=True, - cmd=['-n', client, - 'zone', 'get', '--rgw-zone', zone]) + cmd=['zone', 'get', '--rgw-zone', zone]) system_key = zone_info['system_key'] return system_key['access_key'], system_key['secret_key'] def zone_for_client(ctx, client): - ceph_config = ctx.ceph['ceph'].conf.get('global', {}) - ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) - ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + ceph_config = ctx.ceph[cluster_name].conf.get('global', {}) + ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {})) + ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {})) return ceph_config.get('rgw zone') def region_for_client(ctx, client): - ceph_config = ctx.ceph['ceph'].conf.get('global', {}) - ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) - ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + ceph_config = ctx.ceph[cluster_name].conf.get('global', {}) + ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {})) + ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {})) return ceph_config.get('rgw region') def radosgw_data_log_window(ctx, client): - ceph_config = ctx.ceph['ceph'].conf.get('global', {}) - ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) - ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + ceph_config = ctx.ceph[cluster_name].conf.get('global', {}) + ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {})) + ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {})) return ceph_config.get('rgw data log window', 30) def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False): -- 2.39.5