PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = get_single_remote_value(ctx, role)
yield (id_, remote)
if not machinetypes:
machinetypes.append(machinetype)
return machinetypes
+
+
+def get_single_remote_value(ctx, role):
+ """
+ Return the first (and hopefully only) remotes value for this role.
+ Added log.errors so that error conditions are not as confusing as
+ they used to be. This code still throws a value error so that the
+ stack is dumped and the location of the fault can be found easily.
+ """
+ keyz = ctx.cluster.only(role).remotes.keys()
+ if len(keyz) == 0:
+ log.error("Role list for %s is empty" % role)
+ if len(keyz) > 1:
+ bad_keys = ", ".join(keyz)
+ log.error("Only one remote value should exist for %s -- %s found" %
+ role, bad_keys)
+ (remote,) = keyz
+ return remote
"""
testdir = teuthology.get_testdir(ctx)
log.debug('Running admin socket tests on %s', client)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
testdir = teuthology.get_testdir(ctx)
with parallel() as p:
for role in config.iterkeys():
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
p.spawn(_download, testdir, remote)
log.info('Making a separate scratch dir for every client...')
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
remote.run(
with parallel() as p:
for role, tests in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
p.spawn(_run_tests, testdir, remote, role, tests)
def _download(testdir, remote):
def _remotes(ctx, selector):
- return ctx.cluster.only(selector).remotes.keys()
+ return teuthology.get_single_remote_value(ctx, selector)
"""
Tasks
"""Wait until a Ceph cluster is healthy."""
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, ceph_admin)
max_tries = 90 # 90 tries * 10 secs --> 15 minutes
tries = 0
while True:
testdir = teuthology.get_testdir(ctx)
ceph_admin = teuthology.get_first_mon(ctx, config)
exec_cmd = cmd
- (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, ceph_admin)
proc = remote.run(
args = [
'cd',
testdir = teuthology.get_testdir(ctx)
conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
first_mon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, first_mon)
lines = None
if config.get('conf') is not None:
conf_path = '/etc/ceph/ceph.conf'
admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, first_mon)
conf_data = teuthology.get_file(
remote=mon0_remote,
path=conf_path,
keyring_path,
],
)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, firstmon)
fsid = teuthology.create_simple_monmap(
ctx,
remote=mon0_remote,
ctx.summary['success'] = False
raise
finally:
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, firstmon)
log.info('Checking cluster log for badness...')
def first_in_ceph_log(pattern, excludes):
if type_ == 'mds':
firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, firstmon)
mon0_remote.run(args=[
'adjust-ulimits',
"""
log.info('Waiting until ceph is healthy...')
firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, firstmon)
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
"""
log.info('Waiting until ceph osds are all up...')
firstmon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ mon0_remote = teuthology.get_single_remote_value(ctx, firstmon)
teuthology.wait_until_osds_up(
ctx,
cluster=ctx.cluster,
assert isinstance(config, list)
firstmon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(firstmon).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, firstmon)
while True:
r = remote.run(
args=[
or by stopping.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ remote = teuthology.get_single_remotes_value(self.ctx,
+ 'osd.{o}'.format(o=osd))
self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_off()
or by restarting.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys()
+ remote = teuthology.get_single_remotes_value(self.ctx,
+ 'osd.{o}'.format(o=osd))
self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_on()
or by doing a stop.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(self.ctx,
+ 'mon.{m}'.format(m=mon))
self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_off()
or by doing a normal restart.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(self.ctx,
+ 'mon.{m}'.format(m=mon))
self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_on()
Powercyle if set in config, otherwise just stop.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(self.ctx,
+ 'mds.{m}'.format(m=mds))
self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_off()
and then restart (using --hot-standby if specified.
"""
if self.config.get('powercycle'):
- (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(self.ctx,
+ 'mds.{m}'.format(m=mds))
self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name))
assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config."
remote.console.power_on()
for role, properties in images:
if properties is None:
properties = {}
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
image = properties.get('image_name', default_image_name(role))
fs_type = properties.get('fs_type', 'ext3')
remote.run(
for role, image in role_images:
if image is None:
image = default_image_name(role)
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
id_ = strip_client_prefix(role)
mnt = mnt_template.format(tdir=testdir, id=id_)
mounted.append((remote, mnt))
try:
for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
config = {}
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
while True:
for i in range(num_osds):
- (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
+ osd_remote = teuthology.get_single_remote_value(ctx, 'osd.%d' % i)
p = osd_remote.run(
args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
wait=True,
assert isinstance(config, dict), \
'divergent_priors task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
timeout = 60
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
config = dict((id_, a) for id_ in roles)
for role, ls in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
c.replace('$TESTDIR', testdir)
# just use the first client...
client = clients[0];
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
testdir = teuthology.get_testdir(ctx)
remotes[remote] = config.get('all')
else:
for role in config.keys():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
if remote in remotes:
log.warn('remote %s came up twice (role %s)', remote, role)
continue
if config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
- (role_remote,) = ctx.cluster.only(role).remotes.keys()
+ role_remote = teuthology.get_single_remote_value(ctx, role)
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
procs = {}
#Don't need to download distro kernels
for role, src in config.iteritems():
- (role_remote,) = ctx.cluster.only(role).remotes.keys()
+ role_remote = teuthology.get_single_remote_value(ctx, role)
if src.find('distro') >= 0:
log.info('Installing newest kernel distro');
return
procs = {}
kernel_title = ''
for role, src in config.iteritems():
- (role_remote,) = ctx.cluster.only(role).remotes.keys()
+ role_remote = teuthology.get_single_remote_value(ctx, role)
if src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_distro_kernel(role_remote)
:param config: Configuration
"""
for role, enable in config.iteritems():
- (role_remote,) = ctx.cluster.only(role).remotes.keys()
+ role_remote = teuthology.get_single_remote_value(ctx, role)
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
and compares against current (uname -r) and returns true if newest != current.
Similar check for deb.
"""
- (role_remote,) = ctx.cluster.only(role).remotes.keys()
+ role_remote = teuthology.get_single_remote_value(ctx, role)
system_type = teuthology.get_system_type(role_remote)
output, err_mess = StringIO(), StringIO()
role_remote.run(args=['uname', '-r' ], stdout=output, stderr=err_mess )
files = set(files)
lock_procs = list()
for client in clients:
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ client_remote = teuthology.get_single_remote_value(ctx, client)
log.info("got a client remote")
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
# create the files to run these locks on
client = clients.pop()
clients.add(client)
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ client_remote = teuthology.get_single_remote_value(ctx, client)
(_, _, client_id) = client.partition('.')
file_procs = list()
for lockfile in files:
greenlet.kill(block=True)
for client in clients:
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ client_remote = teuthology.get_single_remote_value(ctx, client)
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
timeout = None
proc = None
result = None
- (client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys()
+ client_remote = teuthology.get_single_remote_value(ctx, op['client'])
(_, _, client_id) = op['client'].partition('.')
testdir = teuthology.get_testdir(ctx)
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
for role in client_roles:
log.info('role={role_}'.format(role_=role))
- (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
+ creator_remote = teuthology.get_single_remote_value(ctx,
+ 'client.{id}'.format(id=role))
creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
remaining_pools = poolnum
raise RuntimeError("This task requires exactly one MDS")
mds_id = mdslist[0]
- (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
+ mds_remote = misc.get_single_remote_value(ctx,
+ 'mds.{_id}'.format(_id=mds_id))
manager = ceph_manager.CephManager(
mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
)
max_thrashers = config.get('max_thrash', 1)
thrashers = {}
- (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
+ first = teuthology.get_single_remote_value(ctx,
+ 'mds.{_id}'.format(_id=mdslist[0]))
manager = ceph_manager.CephManager(
first, ctx=ctx, logger=log.getChild('ceph_manager'),
)
'mon_clock_skew_check task only accepts a dict for configuration'
log.info('Beginning mon_clock_skew_check...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
'mon_thrash task requires at least 3 monitors'
log.info('Beginning mon_thrash...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
if 'nodes' in config:
if isinstance(config['nodes'], basestring) and config['nodes'] == 'all':
for role in teuthology.all_roles(ctx.cluster):
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx,role)
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
- (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
+ master_remote = teuthology.get_single_remote_value(ctx,
+ config['nodes'][0])
elif isinstance(config['nodes'], list):
for role in config['nodes']:
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
- (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
+ master_remote = teuthology.get_single_remote_value(ctx,
+ config['nodes'][0])
else:
roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- (master_remote,) = ctx.cluster.only(roles[0]).remotes.iterkeys()
+ master_remote = teuthology.get_single_remote_value(ctx, roles[0])
for role in roles:
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
proc = remote.run(
args=[
"/bin/sh", "-c",
assert isinstance(config, dict), \
'thrashosds task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert isinstance(config, dict), \
'osd_failsafe_enospc task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
'task only accepts a dict for configuration'
testdir = teuthology.get_testdir(ctx)
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert isinstance(config, dict), \
'peer task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
Setup peering test on remotes.
"""
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
ctx.manager = ceph_manager.CephManager(
mon,
ctx=ctx,
elif 'clients' in config:
ls = config['clients']
for role in teuthology.all_roles_of_type(ctx.cluster, 'client'):
- (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx,
+ 'client.{r}'.format(r=role))
yield (remote, ls)
del config['clients']
for role, ls in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
yield (remote, ls)
else:
for role, ls in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
yield (remote, ls)
def task(ctx, config):
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
assert 'test' in client_config, 'You must specify a test to run'
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'install', '-d', '-m0755', '--',
finally:
for client, client_config in config.iteritems():
assert 'test' in client_config, 'You must specify a test to run'
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
/mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success
""" + test_teardown
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
teuthology.write_file(remote, userdata_path, StringIO(user_data))
with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
yield
finally:
for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'rm', '-f',
log.info('downloading base image')
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
remote.run(
args=[
tdir=testdir,
client=client,
)
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'rm', '-f', base_file,
procs = []
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
remote.run(
args=[
log.debug('checking that qemu tests succeeded...')
for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'test', '-f',
"""Thread spawned by gevent"""
if not hasattr(ctx, 'manager'):
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
ctx.manager = CephManager(
mon,
ctx=ctx,
pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False))
created_pools.append(pool)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
proc = remote.run(
args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
["--pool", pool],
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
pool = 'data'
if config.get('pool'):
'--format', 'json',
]
pre.extend(cmd)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
proc = remote.run(
args=pre,
check_status=False,
logging.error(err)
assert not err
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote_host = remote.name.split('@')[1]
admin_conn = boto.s3.connection.S3Connection(
aws_access_key_id=admin_access_key,
log.info("dest is %s", dest_zone)
testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
# figure out which branch to pull from
branch = cconf.get('force-branch', None)
if not branch:
name = properties.get('image_name', default_image_name(role))
size = properties.get('image_size', 10240)
fmt = properties.get('image_format', 1)
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
log.info('Creating image {name} with size {size}'.format(name=name,
size=size))
args = [
if properties is None:
properties = {}
name = properties.get('image_name', default_image_name(role))
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
'adjust-ulimits',
"""
log.info('Loading rbd kernel module...')
for role in config:
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
'sudo',
finally:
log.info('Unloading rbd kernel module...')
for role in config:
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
'sudo',
for role, image in role_images:
if image is None:
image = default_image_name(role)
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
for role, image in role_images:
if image is None:
image = default_image_name(role)
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
fs_type = properties.get('fs_type')
tests = properties.get('tests')
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
# Fetch the test script
test_root = teuthology.get_testdir(ctx)
def _run_one_client(ctx, config, role):
"""Spawned task that runs the client"""
testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remote.run(
args=[
'adjust-ulimits',
log.info('Beginning recovery bench...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
io_size = self.config.get("io_size", 4096)
osd = str(random.choice(self.osds))
- (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys()
+ osd_remote = teuthology.get_single_remote_value(self.ceph_manager.ctx,
+ 'osd.%s' % osd)
testdir = teuthology.get_testdir(self.ceph_manager.ctx)
log.info("starting repair test type 2")
victim_osd = chooser(pool, 0)
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
# create object
log.info("doing put and setomapval")
if not hasattr(ctx, 'manager'):
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
ctx.manager = ceph_manager.CephManager(
mon,
ctx=ctx,
assert 'exec' in config, "config requires exec key with <role>: <command> entries"
for role, task in config['exec'].iteritems():
log.info('restart for role {r}'.format(r=role))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
log.info('Running command on role %s host %s', role, remote.name)
spec = '{spec}'.format(spec=task[0])
log.info('Shipping apache config and rgw.fcgi...')
src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
for client, conf in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
system_type = teuthology.get_system_type(remote)
if not conf:
conf = {}
log.info('Starting rgw...')
testdir = teuthology.get_testdir(ctx)
for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
client_config = config.get(client)
if client_config is None:
testdir = teuthology.get_testdir(ctx)
apaches = {}
for client in config.iterkeys():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
system_type = teuthology.get_system_type(remote)
if system_type == 'deb':
apache_name = 'apache2'
log.info('creating data pools')
for client in config.keys():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
data_pool = '.rgw.buckets'
if ctx.rgw.ec_data_pool:
create_ec_pool(remote, data_pool, client, 64)
# clear out the old defaults
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
# removing these objects from .rgw.root and the per-zone root pools
# may or may not matter
rados(ctx, mon,
cmd=['-p', zone_info['domain_root'],
'rm', 'zone_info.default'])
- (remote,) = ctx.cluster.only(role).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, role)
for pool_info in zone_info['placement_pools']:
remote.run(args=['ceph', 'osd', 'pool', 'create',
pool_info['val']['index_pool'], '64', '64'])
s3tests_conf['s3'].setdefault('port', def_conf['port'])
s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'cd',
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
'{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
s3tests_conf['s3'].setdefault('port', def_conf['port'])
s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'cd',
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
'{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir),
else:
s3tests_conf['DEFAULT']['host'] = 'localhost'
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'cd',
PREFIX = 'samba.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
yield (id_, remote)
@contextlib.contextmanager
log.info('Beginning scrub...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
assert isinstance(config, dict), \
'scrub_test task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
log.info('messing with PG %s on osd %d' % (victim, osd))
- (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
+ osd_remote = teuthology.get_single_remote_value(ctx, 'osd.%d' % osd)
data_path = os.path.join(
'/var/lib/ceph/osd',
'ceph-{id}'.format(id=osd),
testswift_conf['func_test']['auth_host'] = 'localhost'
log.info(client)
- (remote,) = ctx.cluster.only(client).remotes.keys()
+ remote = teuthology.get_single_remote_value(ctx, client)
remote.run(
args=[
'cd',
log.info('Beginning thrashosds...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ mon = teuthology.get_single_remote_value(ctx, first_mon)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
import proc_thrasher
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
remotes.append(remote)
args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
PREFIX = 'client.'
testdir = teuthology.get_testdir(ctx)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# Is there any reason why this is not: join(mnt, role) ?
client = os.path.join(mnt, 'client.{id}'.format(id=id_))
PREFIX = 'client.'
id_ = role[len(PREFIX):]
log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
dir_owner = remote.shortname.split('@', 1)[0]
mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
# if neither kclient nor ceph-fuse are required for a workunit,
client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client')
client_remotes = list()
for client in client_generator:
- (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys()
+ client_remote = teuthology.get_single_remote_value(ctx,
+ 'client.{id}'.format(id=client))
client_remotes.append((client_remote, 'client.{id}'.format(id=client)))
_make_scratch_dir(ctx, "client.{id}".format(id=client), subdir)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, role)
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# subdir so we can remove and recreate this a lot without sudo
if subdir is None:
]
pre.extend(cmd)
log.info('rgwadmin: cmd=%s' % pre)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote = teuthology.get_single_remote_value(ctx, client)
proc = remote.run(
args=pre,
check_status=check_status,