for client in clients_to_create_as:
if client == except_client:
continue
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
ctx.cluster.only(client).run(
args=[
'mkdir',
'-p',
- '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
- client=client),
- '{tdir}/apache/tmp.{client}/fastcgi_sock'.format(
+ '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
+ '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock'.format(
tdir=testdir,
- client=client),
+ client_with_cluster=client_with_cluster),
run.Raw('&&'),
'mkdir',
- '{tdir}/archive/apache.{client}'.format(tdir=testdir,
- client=client),
+ '{tdir}/archive/apache.{client_with_cluster}'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
],
)
try:
args=[
'rm',
'-rf',
- '{tdir}/apache/tmp.{client}'.format(tdir=testdir,
- client=client),
+ '{tdir}/apache/tmp.{client_with_cluster}'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
run.Raw('&&'),
'rmdir',
- '{tdir}/apache/htdocs.{client}'.format(tdir=testdir,
- client=client),
+ '{tdir}/apache/htdocs.{client_with_cluster}'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
],
)
for client in clients_to_create_as:
for client in clients_to_create_as:
if client == except_client:
continue
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
+ client_with_cluster = cluster_name + '.' + client_with_id
(remote,) = ctx.cluster.only(client).remotes.keys()
system_type = teuthology.get_system_type(remote)
conf = config.get(client)
print_continue=print_continue,
host=host,
port=port,
- client=client,
+ client=client_with_cluster,
idle_timeout=idle_timeout,
user=user,
group=group,
)
teuthology.write_file(
remote=remote,
- path='{tdir}/apache/apache.{client}.conf'.format(
+ path='{tdir}/apache/apache.{client_with_cluster}.conf'.format(
tdir=testdir,
- client=client),
+ client_with_cluster=client_with_cluster),
data=conf,
)
rgw_options = []
if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote):
rgw_options = [
'--rgw-socket-path',
- '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
+ '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format(
tdir=testdir,
- client=client
+ client_with_cluster=client_with_cluster
),
'--rgw-frontends',
'fastcgi',
teuthology.write_file(
remote=remote,
- path='{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+ path='{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format(
tdir=testdir,
- client=client),
+ client_with_cluster=client_with_cluster),
data="""#!/bin/sh
ulimit -c unlimited
-exec radosgw -f -n {client} -k /etc/ceph/ceph.{client}.keyring {rgw_options}
+exec radosgw -f -n {client_with_id} --cluster {cluster_name} -k /etc/ceph/{client_with_cluster}.keyring {rgw_options}
-""".format(tdir=testdir, client=client, rgw_options=" ".join(rgw_options))
+""".format(tdir=testdir, client_with_id=client_with_id, client_with_cluster=client_with_cluster, cluster_name=cluster_name, rgw_options=" ".join(rgw_options))
)
remote.run(
args=[
'chmod',
'a=rx',
- '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(tdir=testdir,
- client=client),
+ '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
],
)
try:
args=[
'rm',
'-f',
- '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
- client=client),
+ '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
run.Raw('&&'),
'rm',
'-f',
- '{tdir}/apache/htdocs.{client}/rgw.fcgi'.format(
+ '{tdir}/apache/htdocs.{client_with_cluster}/rgw.fcgi'.format(
tdir=testdir,
- client=client),
+ client_with_cluster=client_with_cluster),
],
)
clients_to_run = [on_client]
if on_client is None:
clients_to_run = config.keys()
+ log.debug('client %r', clients_to_run)
testdir = teuthology.get_testdir(ctx)
for client in clients_to_run:
if client == except_client:
continue
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
+ client_with_cluster = cluster_name + '.' + client_with_id
zone = rgw_utils.zone_for_client(ctx, client)
log.debug('zone %s', zone)
+
client_config = config.get(client)
if client_config is None:
client_config = {}
if ctx.rgw.use_fastcgi or _use_uds_with_fcgi(remote):
rgw_cmd.extend([
'--rgw-socket-path',
- '{tdir}/apache/tmp.{client}/fastcgi_sock/rgw_sock'.format(
+ '{tdir}/apache/tmp.{client_with_cluster}/fastcgi_sock/rgw_sock'.format(
tdir=testdir,
- client=client,
+ client_with_cluster=client_with_cluster,
),
'--rgw-frontends',
'fastcgi',
rgw_cmd.extend(['--rgw-zone', zone])
rgw_cmd.extend([
- '-n', client,
- '-k', '/etc/ceph/ceph.{client}.keyring'.format(client=client),
+ '-n', client_with_id,
+ '--cluster', cluster_name,
+ '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster),
'--log-file',
- '/var/log/ceph/rgw.{client}.log'.format(client=client),
+ '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster),
'--rgw_ops_log_socket_path',
- '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
- client=client),
+ '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
'--foreground',
run.Raw('|'),
'sudo',
'tee',
- '/var/log/ceph/rgw.{client}.stdout'.format(tdir=testdir,
- client=client),
+ '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
run.Raw('2>&1'),
])
ctx.daemons.add_daemon(
remote, 'rgw', client,
+ cluster=cluster_name,
args=run_cmd,
logger=log.getChild(client),
stdin=run.PIPE,
args=[
'rm',
'-f',
- '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir,
- client=client),
+ '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
],
)
if on_client is None:
clients_to_run = config.keys()
for client in clients_to_run:
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_cluster = cluster_name + '.' + daemon_type + '.' + client_id
if client == except_client:
continue
(remote,) = ctx.cluster.only(client).remotes.keys()
apache_name,
'-X',
'-f',
- '{tdir}/apache/apache.{client}.conf'.format(tdir=testdir,
- client=client),
+ '{tdir}/apache/apache.{client_with_cluster}.conf'.format(tdir=testdir,
+ client_with_cluster=client_with_cluster),
],
logger=log.getChild(client),
stdin=run.PIPE,
wait=False,
)
- apaches[client] = proc
+ apaches[client_with_cluster] = proc
try:
yield
run.wait(apaches.itervalues())
-
def extract_user_info(client_config):
"""
Extract user info from the client config specified. Returns a dict
:param client_config: dictionary of client configuration information
:returns: zone extracted from client and client_config information
"""
- ceph_config = ctx.ceph['ceph'].conf.get('global', {})
- ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
- ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
+ ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
+ ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
+ ceph_config.update(ctx.ceph[cluster_name].conf.get(client_with_id, {}))
for key in ['rgw zone', 'rgw region', 'rgw zone root pool']:
assert key in ceph_config, \
'ceph conf must contain {key} for {client}'.format(key=key,
for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
data_pool = '.rgw.buckets'
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+
if ctx.rgw.ec_data_pool:
create_ec_pool(remote, data_pool, client, 64,
- ctx.rgw.erasure_code_profile)
+ ctx.rgw.erasure_code_profile, cluster_name)
else:
- create_replicated_pool(remote, data_pool, 64)
+ create_replicated_pool(remote, data_pool, 64, cluster_name)
if ctx.rgw.cache_pools:
create_cache_pool(remote, data_pool, data_pool + '.cache', 64,
- 64*1024*1024)
+ 64*1024*1024, cluster_name)
yield
@contextlib.contextmanager
log.debug('regions are %r', regions)
log.debug('role_endpoints = %r', role_endpoints)
log.debug('realm is %r', realm)
+
# extract the zone info
role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
for client, c_config in config.iteritems()])
fill_in_endpoints(region_info, role_zones, role_endpoints)
# clear out the old defaults
- first_mon = teuthology.get_first_mon(ctx, config)
+ cluster_name, daemon_type, client_id = teuthology.split_role(master_client)
+ first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
# read master zonegroup and master_zone
(remote,) = ctx.cluster.only(role).remotes.keys()
for pool_info in zone_info['placement_pools']:
remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
- pool_info['val']['index_pool'], '64', '64'])
+ pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name])
if ctx.rgw.ec_data_pool:
create_ec_pool(remote, pool_info['val']['data_pool'],
- zone, 64, ctx.rgw.erasure_code_profile)
+ zone, 64, ctx.rgw.erasure_code_profile, cluster_name)
else:
- create_replicated_pool(remote, pool_info['val']['data_pool'], 64)
+ create_replicated_pool(remote, pool_info['val']['data_pool'], 64, cluster_name)
(zonegroup, zone, zone_info, user_info) = role_zones[master_client]
zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
check_status=True)
rgwadmin(ctx, master_client,
- cmd=['-n', master_client, 'zone', 'default', zone],
+ cmd=['zone', 'default', '--rgw-zone', zone],
check_status=True)
rgwadmin(ctx, master_client,
- cmd=['-n', master_client, 'period', 'update', '--commit'],
+ cmd=['period', 'update', '--commit'],
check_status=True)
yield
log.debug('regions are %r', regions)
log.debug('role_endpoints = %r', role_endpoints)
log.debug('realm is %r', realm)
+
# extract the zone info
role_zones = dict([(client, extract_zone_info(ctx, client, c_config))
for client, c_config in config.iteritems()])
fill_in_endpoints(region_info, role_zones, role_endpoints)
# clear out the old defaults
- first_mon = teuthology.get_first_mon(ctx, config)
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
# removing these objects from .rgw.root and the per-zone root pools
# may or may not matter
rados(ctx, mon,
- cmd=['-p', '.rgw.root', 'rm', 'region_info.default'])
+ cmd=['-p', '.rgw.root', 'rm', 'region_info.default', '--cluster', cluster_name])
rados(ctx, mon,
- cmd=['-p', '.rgw.root', 'rm', 'zone_info.default'])
+ cmd=['-p', '.rgw.root', 'rm', 'zone_info.default', '--cluster', cluster_name])
# read master zonegroup and master_zone
for zonegroup, zg_info in region_info.iteritems():
log.debug('master client = %r', master_client)
log.debug('config %r ', config)
- (ret, out)=rgwadmin(ctx, master_client,
+ (ret, out)=rgwadmin(ctx, client,
cmd=['realm', 'create', '--rgw-realm', realm, '--default'])
log.debug('realm create ret %r exists %r', -ret, errno.EEXIST)
assert ret == 0 or ret != -errno.EEXIST
for role, (zonegroup, zone, zone_info, user_info) in role_zones.iteritems():
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
- 'rm', 'region_info.default'])
+ 'rm', 'region_info.default', '--cluster', cluster_name])
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
- 'rm', 'zone_info.default'])
+ 'rm', 'zone_info.default', '--cluster', cluster_name])
(remote,) = ctx.cluster.only(role).remotes.keys()
for pool_info in zone_info['placement_pools']:
remote.run(args=['sudo', 'ceph', 'osd', 'pool', 'create',
- pool_info['val']['index_pool'], '64', '64'])
+ pool_info['val']['index_pool'], '64', '64', '--cluster', cluster_name])
if ctx.rgw.ec_data_pool:
create_ec_pool(remote, pool_info['val']['data_pool'],
- zone, 64, ctx.rgw.erasure_code_profile)
+ zone, 64, ctx.rgw.erasure_code_profile, cluster_name)
else:
create_replicated_pool(
remote, pool_info['val']['data_pool'],
- 64)
+ 64, cluster_name)
zone_json = json.dumps(dict(zone_info.items() + user_info.items()))
log.debug('zone info is: %r', zone_json)
rgwadmin(ctx, client,
(zonegroup, zone, zone_info, user_info) = role_zones[client]
rgwadmin(ctx, client,
- cmd=['zone', 'default', zone],
+ cmd=['zone', 'default', '--rgw-zone', zone],
check_status=True)
- rgwadmin(ctx, master_client,
- cmd=['-n', master_client, 'period', 'update', '--commit'],
+ #this used to take master_client, need to edit that accordingly
+ rgwadmin(ctx, client,
+ cmd=['period', 'update', '--commit'],
check_status=True)
yield
for client in config.iterkeys():
if client != master_client:
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
host, port = role_endpoints[master_client]
endpoint = 'http://{host}:{port}/'.format(host=host, port=port)
log.debug("endpoint: %s", endpoint)
rgwadmin(ctx, client,
- cmd=['-n', client, 'realm', 'pull', '--rgw-realm', realm, '--default', '--url',
+ cmd=['realm', 'pull', '--rgw-realm', realm, '--default', '--url',
endpoint, '--access_key',
user_info['system_key']['access_key'], '--secret',
user_info['system_key']['secret_key']],
(zonegroup, zone, zone_info, zone_user_info) = role_zones[client]
zone_json = json.dumps(dict(zone_info.items() + zone_user_info.items()))
- log.debug("zone info is: %r"), zone_json
+ log.debug("zone info is: %r", zone_json)
rgwadmin(ctx, client,
- cmd=['zone', 'set', '--rgw-zonegroup', zonegroup,
+ cmd=['zone', 'set', '--default',
'--rgw-zone', zone],
stdin=StringIO(zone_json),
check_status=True)
+ rgwadmin(ctx, client,
+ cmd=['zonegroup', 'add', '--rgw-zonegroup', zonegroup, '--rgw-zone', zone],
+ check_status=True)
+
+ rgwadmin(ctx, client,
+ cmd=['zonegroup', 'default', '--rgw-zonegroup', zonegroup],
+ check_status=True)
+
rgwadmin(ctx, client,
cmd=['period', 'update', '--commit', '--url',
endpoint, '--access_key',
lambda: create_nonregion_pools(
ctx=ctx, config=config, regions=regions),
]
+ log.debug('Nonregion pools created')
multisite = len(regions) > 1
break
log.debug('multisite %s', multisite)
- multi_cluster = multisite and len(ctx.config['roles']) > 1
+
+ multi_cluster = False
+ if multisite:
+ prev_cluster_name = None
+ roles = ctx.config['roles']
+ #check if any roles have a different cluster_name from eachother
+ for lst in roles:
+ for role in lst:
+ cluster_name, daemon_type, client_id = teuthology.split_role(role)
+ if cluster_name != prev_cluster_name and prev_cluster_name != None:
+ multi_cluster = True
+ break
+ prev_cluster_name = cluster_name
+ if multi_cluster:
+ break
+
log.debug('multi_cluster %s', multi_cluster)
+ ctx.rgw.config = config
master_client = None
if multi_cluster:
log = logging.getLogger(__name__)
-# simple test to indicate if multi-region testing should occur
def multi_region_enabled(ctx):
# this is populated by the radosgw-agent task, seems reasonable to
# use that as an indicator that we're testing multi-region sync
format='json'):
log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd))
testdir = teuthology.get_testdir(ctx)
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
pre = [
'adjust-ulimits',
'ceph-coverage'.format(tdir=testdir),
'radosgw-admin'.format(tdir=testdir),
'--log-to-stderr',
'--format', format,
- '-n', client,
+ '-n', client_with_id,
+ '--cluster', cluster_name,
]
pre.extend(cmd)
log.info('rgwadmin: cmd=%s' % pre)
return get_user_summary(out, user)['total']['successful_ops']
def get_zone_host_and_port(ctx, client, zone):
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
_, period = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client, 'period', 'get'])
+ cmd=['period', 'get'])
period_map = period['period_map']
zonegroups = period_map['zonegroups']
for zonegroup in zonegroups:
assert False, 'no endpoint for zone {zone} found'.format(zone=zone)
def get_master_zone(ctx, client):
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ client_with_id = daemon_type + '.' + client_id
_, period = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client, 'period', 'get'])
+ cmd=['period', 'get'])
period_map = period['period_map']
zonegroups = period_map['zonegroups']
for zonegroup in zonegroups:
def get_zone_system_keys(ctx, client, zone):
_, zone_info = rgwadmin(ctx, client, check_status=True,
- cmd=['-n', client,
- 'zone', 'get', '--rgw-zone', zone])
+ cmd=['zone', 'get', '--rgw-zone', zone])
system_key = zone_info['system_key']
return system_key['access_key'], system_key['secret_key']
def zone_for_client(ctx, client):
- ceph_config = ctx.ceph['ceph'].conf.get('global', {})
- ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
- ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
+ ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
+ ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
return ceph_config.get('rgw zone')
def region_for_client(ctx, client):
- ceph_config = ctx.ceph['ceph'].conf.get('global', {})
- ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
- ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
+ ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
+ ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
return ceph_config.get('rgw region')
def radosgw_data_log_window(ctx, client):
- ceph_config = ctx.ceph['ceph'].conf.get('global', {})
- ceph_config.update(ctx.ceph['ceph'].conf.get('client', {}))
- ceph_config.update(ctx.ceph['ceph'].conf.get(client, {}))
+ cluster_name, daemon_type, client_id = teuthology.split_role(client)
+ ceph_config = ctx.ceph[cluster_name].conf.get('global', {})
+ ceph_config.update(ctx.ceph[cluster_name].conf.get('client', {}))
+ ceph_config.update(ctx.ceph[cluster_name].conf.get(client, {}))
return ceph_config.get('rgw data log window', 30)
def radosgw_agent_sync_data(ctx, agent_host, agent_port, full=False):