From d693b3f8950ffd1f2492a4db0f8234fee31f00f0 Mon Sep 17 00:00:00 2001 From: Warren Usui Date: Fri, 28 Feb 2014 19:13:40 -0800 Subject: [PATCH] Lines formerly of the form '(remote,) = ctx.cluster.only(role).remotes.keys()' and '(remote,) = ctx.cluster.only(role).remotes.iterkeys()' would fail with ValueError and no message if there were less than 0 or more than 1 key. Now a new function, get_single_remote_value() is called which prints out more understandable messages. Fixes: 7510 Reviewed-by: Josh Durgin Signed-off-by: Warren Usui --- teuthology/misc.py | 20 +++++++++++++++++++- teuthology/task/admin_socket.py | 2 +- teuthology/task/autotest.py | 6 +++--- teuthology/task/calamari.py | 2 +- teuthology/task/ceph-deploy.py | 8 ++++---- teuthology/task/ceph.py | 12 ++++++------ teuthology/task/ceph_manager.py | 18 ++++++++++++------ teuthology/task/common_fs_utils.py | 4 ++-- teuthology/task/cram.py | 6 +++--- teuthology/task/die_on_err.py | 4 ++-- teuthology/task/divergent_priors.py | 2 +- teuthology/task/dump_stuck.py | 2 +- teuthology/task/exec.py | 2 +- teuthology/task/filestore_idempotent.py | 2 +- teuthology/task/install.py | 2 +- teuthology/task/kernel.py | 10 +++++----- teuthology/task/lockfile.py | 8 ++++---- teuthology/task/lost_unfound.py | 2 +- teuthology/task/manypools.py | 3 ++- teuthology/task/mds_creation_failure.py | 3 ++- teuthology/task/mds_thrash.py | 3 ++- teuthology/task/mon_clock_skew_check.py | 2 +- teuthology/task/mon_recovery.py | 2 +- teuthology/task/mon_thrash.py | 2 +- teuthology/task/mpi.py | 14 ++++++++------ teuthology/task/object_source_down.py | 2 +- teuthology/task/omapbench.py | 2 +- teuthology/task/osd_backfill.py | 2 +- teuthology/task/osd_failsafe_enospc.py | 2 +- teuthology/task/osd_recovery.py | 4 ++-- teuthology/task/peer.py | 2 +- teuthology/task/peering_speed_test.py | 2 +- teuthology/task/pexec.py | 7 ++++--- teuthology/task/qemu.py | 16 ++++++++-------- teuthology/task/rados.py | 4 ++-- teuthology/task/radosbench.py | 2 +- teuthology/task/radosgw-admin-rest.py | 4 ++-- teuthology/task/radosgw-agent.py | 2 +- teuthology/task/rbd.py | 14 +++++++------- teuthology/task/rbd_fsx.py | 2 +- teuthology/task/recovery_bench.py | 5 +++-- teuthology/task/repair_test.py | 4 ++-- teuthology/task/restart.py | 2 +- teuthology/task/rgw.py | 12 ++++++------ teuthology/task/s3readwrite.py | 4 ++-- teuthology/task/s3roundtrip.py | 4 ++-- teuthology/task/s3tests.py | 2 +- teuthology/task/samba.py | 2 +- teuthology/task/scrub.py | 2 +- teuthology/task/scrub_test.py | 4 ++-- teuthology/task/swift.py | 2 +- teuthology/task/thrashosds.py | 2 +- teuthology/task/watch_notify_stress.py | 3 ++- teuthology/task/workunit.py | 9 +++++---- teuthology/task_util/rgw.py | 2 +- 55 files changed, 150 insertions(+), 117 deletions(-) diff --git a/teuthology/misc.py b/teuthology/misc.py index c075e56d6dd51..49220ee41e428 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -987,7 +987,7 @@ def get_clients(ctx, roles): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = get_single_remote_value(ctx, role) yield (id_, remote) @@ -1211,3 +1211,21 @@ def get_multi_machine_types(machinetype): if not machinetypes: machinetypes.append(machinetype) return machinetypes + + +def get_single_remote_value(ctx, role): + """ + Return the first (and hopefully only) remotes value for this role. + Added log.errors so that error conditions are not as confusing as + they used to be. This code still throws a value error so that the + stack is dumped and the location of the fault can be found easily. + """ + keyz = ctx.cluster.only(role).remotes.keys() + if len(keyz) == 0: + log.error("Role list for %s is empty" % role) + if len(keyz) > 1: + bad_keys = ", ".join(keyz) + log.error("Only one remote value should exist for %s -- %s found" % + role, bad_keys) + (remote,) = keyz + return remote diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index 20a670122a9b6..4b01fae8c0afe 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -123,7 +123,7 @@ def _run_tests(ctx, client, tests): """ testdir = teuthology.get_testdir(ctx) log.debug('Running admin socket tests on %s', client) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) diff --git a/teuthology/task/autotest.py b/teuthology/task/autotest.py index 24a7675df277f..dde2950bfedbb 100644 --- a/teuthology/task/autotest.py +++ b/teuthology/task/autotest.py @@ -43,7 +43,7 @@ def task(ctx, config): testdir = teuthology.get_testdir(ctx) with parallel() as p: for role in config.iterkeys(): - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) p.spawn(_download, testdir, remote) log.info('Making a separate scratch dir for every client...') @@ -52,7 +52,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) remote.run( @@ -69,7 +69,7 @@ def task(ctx, config): with parallel() as p: for role, tests in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) p.spawn(_run_tests, testdir, remote, role, tests) def _download(testdir, remote): diff --git a/teuthology/task/calamari.py b/teuthology/task/calamari.py index f3c1495f699e5..b5b1d44ddd0e0 100644 --- a/teuthology/task/calamari.py +++ b/teuthology/task/calamari.py @@ -108,7 +108,7 @@ def _setup_calamari_cluster(remote, restapi_remote): def _remotes(ctx, selector): - return ctx.cluster.only(selector).remotes.keys() + return teuthology.get_single_remote_value(ctx, selector) """ Tasks diff --git a/teuthology/task/ceph-deploy.py b/teuthology/task/ceph-deploy.py index aec9d1b2c4327..3a676c0c5a3d2 100644 --- a/teuthology/task/ceph-deploy.py +++ b/teuthology/task/ceph-deploy.py @@ -65,7 +65,7 @@ def is_healthy(ctx, config): """Wait until a Ceph cluster is healthy.""" testdir = teuthology.get_testdir(ctx) ceph_admin = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(ceph_admin).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, ceph_admin) max_tries = 90 # 90 tries * 10 secs --> 15 minutes tries = 0 while True: @@ -135,7 +135,7 @@ def execute_ceph_deploy(ctx, config, cmd): testdir = teuthology.get_testdir(ctx) ceph_admin = teuthology.get_first_mon(ctx, config) exec_cmd = cmd - (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, ceph_admin) proc = remote.run( args = [ 'cd', @@ -190,7 +190,7 @@ def build_ceph_cluster(ctx, config): testdir = teuthology.get_testdir(ctx) conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir) first_mon = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(first_mon).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, first_mon) lines = None if config.get('conf') is not None: @@ -279,7 +279,7 @@ def build_ceph_cluster(ctx, config): conf_path = '/etc/ceph/ceph.conf' admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring' first_mon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, first_mon) conf_data = teuthology.get_file( remote=mon0_remote, path=conf_path, diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 5a695b57081dc..c1d7f6ed791ea 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -565,7 +565,7 @@ def cluster(ctx, config): keyring_path, ], ) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, firstmon) fsid = teuthology.create_simple_monmap( ctx, remote=mon0_remote, @@ -904,7 +904,7 @@ def cluster(ctx, config): ctx.summary['success'] = False raise finally: - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, firstmon) log.info('Checking cluster log for badness...') def first_in_ceph_log(pattern, excludes): @@ -1185,7 +1185,7 @@ def run_daemon(ctx, config, type_): if type_ == 'mds': firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, firstmon) mon0_remote.run(args=[ 'adjust-ulimits', @@ -1208,7 +1208,7 @@ def healthy(ctx, config): """ log.info('Waiting until ceph is healthy...') firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, firstmon) teuthology.wait_until_osds_up( ctx, cluster=ctx.cluster, @@ -1228,7 +1228,7 @@ def wait_for_osds_up(ctx, config): """ log.info('Waiting until ceph osds are all up...') firstmon = teuthology.get_first_mon(ctx, config) - (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + mon0_remote = teuthology.get_single_remote_value(ctx, firstmon) teuthology.wait_until_osds_up( ctx, cluster=ctx.cluster, @@ -1245,7 +1245,7 @@ def wait_for_mon_quorum(ctx, config): assert isinstance(config, list) firstmon = teuthology.get_first_mon(ctx, config) - (remote,) = ctx.cluster.only(firstmon).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, firstmon) while True: r = remote.run( args=[ diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index 3952950b2d2ed..c1973852fc6b6 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -1190,7 +1190,8 @@ class CephManager: or by stopping. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + remote = teuthology.get_single_remotes_value(self.ctx, + 'osd.{o}'.format(o=osd)) self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_off() @@ -1212,7 +1213,8 @@ class CephManager: or by restarting. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() + remote = teuthology.get_single_remotes_value(self.ctx, + 'osd.{o}'.format(o=osd)) self.log('kill_osd on osd.{o} doing powercycle of {s}'.format(o=osd, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_on() @@ -1258,7 +1260,8 @@ class CephManager: or by doing a stop. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() + remote = teuthology.get_single_remote_value(self.ctx, + 'mon.{m}'.format(m=mon)) self.log('kill_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_off() @@ -1271,7 +1274,8 @@ class CephManager: or by doing a normal restart. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mon.{m}'.format(m=mon)).remotes.iterkeys() + remote = teuthology.get_single_remote_value(self.ctx, + 'mon.{m}'.format(m=mon)) self.log('revive_mon on mon.{m} doing powercycle of {s}'.format(m=mon, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_on() @@ -1324,7 +1328,8 @@ class CephManager: Powercyle if set in config, otherwise just stop. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() + remote = teuthology.get_single_remote_value(self.ctx, + 'mds.{m}'.format(m=mds)) self.log('kill_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_off() @@ -1344,7 +1349,8 @@ class CephManager: and then restart (using --hot-standby if specified. """ if self.config.get('powercycle'): - (remote,) = self.ctx.cluster.only('mds.{m}'.format(m=mds)).remotes.iterkeys() + remote = teuthology.get_single_remote_value(self.ctx, + 'mds.{m}'.format(m=mds)) self.log('revive_mds on mds.{m} doing powercycle of {s}'.format(m=mds, s=remote.name)) assert remote.console is not None, "powercycling requested but RemoteConsole is not initialized. Check ipmi config." remote.console.power_on() diff --git a/teuthology/task/common_fs_utils.py b/teuthology/task/common_fs_utils.py index b963e98d429a1..3c79a26429781 100644 --- a/teuthology/task/common_fs_utils.py +++ b/teuthology/task/common_fs_utils.py @@ -40,7 +40,7 @@ def generic_mkfs(ctx, config, devname_rtn): for role, properties in images: if properties is None: properties = {} - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) image = properties.get('image_name', default_image_name(role)) fs_type = properties.get('fs_type', 'ext3') remote.run( @@ -90,7 +90,7 @@ def generic_mount(ctx, config, devname_rtn): for role, image in role_images: if image is None: image = default_image_name(role) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) id_ = strip_client_prefix(role) mnt = mnt_template.format(tdir=testdir, id=id_) mounted.append((remote, mnt)) diff --git a/teuthology/task/cram.py b/teuthology/task/cram.py index 05824d26ab0e6..8627e02a67465 100644 --- a/teuthology/task/cram.py +++ b/teuthology/task/cram.py @@ -51,7 +51,7 @@ def task(ctx, config): try: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( args=[ @@ -77,7 +77,7 @@ def task(ctx, config): p.spawn(_run_tests, ctx, role) finally: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) @@ -115,7 +115,7 @@ def _run_tests(ctx, role): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) ceph_ref = ctx.summary.get('ceph-sha1', 'master') testdir = teuthology.get_testdir(ctx) diff --git a/teuthology/task/die_on_err.py b/teuthology/task/die_on_err.py index 1dfd37073628d..c1c198f8f7807 100644 --- a/teuthology/task/die_on_err.py +++ b/teuthology/task/die_on_err.py @@ -20,7 +20,7 @@ def task(ctx, config): config = {} first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -38,7 +38,7 @@ def task(ctx, config): while True: for i in range(num_osds): - (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() + osd_remote = teuthology.get_single_remote_value(ctx, 'osd.%d' % i) p = osd_remote.run( args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], wait=True, diff --git a/teuthology/task/divergent_priors.py b/teuthology/task/divergent_priors.py index 432614f233cb1..9767a02b124b1 100644 --- a/teuthology/task/divergent_priors.py +++ b/teuthology/task/divergent_priors.py @@ -25,7 +25,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'divergent_priors task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/dump_stuck.py b/teuthology/task/dump_stuck.py index 9e1780f01565a..0b41021eee927 100644 --- a/teuthology/task/dump_stuck.py +++ b/teuthology/task/dump_stuck.py @@ -57,7 +57,7 @@ def task(ctx, config): timeout = 60 first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/exec.py b/teuthology/task/exec.py index f951f77a8a379..af67eb9d15f99 100644 --- a/teuthology/task/exec.py +++ b/teuthology/task/exec.py @@ -34,7 +34,7 @@ def task(ctx, config): config = dict((id_, a) for id_ in roles) for role, ls in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) log.info('Running commands on role %s host %s', role, remote.name) for c in ls: c.replace('$TESTDIR', testdir) diff --git a/teuthology/task/filestore_idempotent.py b/teuthology/task/filestore_idempotent.py index d33ad6458413b..3621fd4cf0857 100644 --- a/teuthology/task/filestore_idempotent.py +++ b/teuthology/task/filestore_idempotent.py @@ -32,7 +32,7 @@ def task(ctx, config): # just use the first client... client = clients[0]; - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) testdir = teuthology.get_testdir(ctx) diff --git a/teuthology/task/install.py b/teuthology/task/install.py index eaf8de9d5bde6..6459a2834da27 100644 --- a/teuthology/task/install.py +++ b/teuthology/task/install.py @@ -1049,7 +1049,7 @@ def upgrade(ctx, config): remotes[remote] = config.get('all') else: for role in config.keys(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) if remote in remotes: log.warn('remote %s came up twice (role %s)', remote, role) continue diff --git a/teuthology/task/kernel.py b/teuthology/task/kernel.py index 7d63190dcc04f..b9b0acb40968d 100644 --- a/teuthology/task/kernel.py +++ b/teuthology/task/kernel.py @@ -189,7 +189,7 @@ def install_firmware(ctx, config): if config[role].find('distro') >= 0: log.info('Skipping firmware on distro kernel'); return - (role_remote,) = ctx.cluster.only(role).remotes.keys() + role_remote = teuthology.get_single_remote_value(ctx, role) log.info('Installing linux-firmware on {role}...'.format(role=role)) role_remote.run( args=[ @@ -239,7 +239,7 @@ def download_deb(ctx, config): procs = {} #Don't need to download distro kernels for role, src in config.iteritems(): - (role_remote,) = ctx.cluster.only(role).remotes.keys() + role_remote = teuthology.get_single_remote_value(ctx, role) if src.find('distro') >= 0: log.info('Installing newest kernel distro'); return @@ -331,7 +331,7 @@ def install_and_reboot(ctx, config): procs = {} kernel_title = '' for role, src in config.iteritems(): - (role_remote,) = ctx.cluster.only(role).remotes.keys() + role_remote = teuthology.get_single_remote_value(ctx, role) if src.find('distro') >= 0: log.info('Installing distro kernel on {role}...'.format(role=role)) install_distro_kernel(role_remote) @@ -480,7 +480,7 @@ def enable_disable_kdb(ctx, config): :param config: Configuration """ for role, enable in config.iteritems(): - (role_remote,) = ctx.cluster.only(role).remotes.keys() + role_remote = teuthology.get_single_remote_value(ctx, role) if "mira" in role_remote.name: serialdev = "ttyS2" else: @@ -552,7 +552,7 @@ def need_to_install_distro(ctx, role): and compares against current (uname -r) and returns true if newest != current. Similar check for deb. """ - (role_remote,) = ctx.cluster.only(role).remotes.keys() + role_remote = teuthology.get_single_remote_value(ctx, role) system_type = teuthology.get_system_type(role_remote) output, err_mess = StringIO(), StringIO() role_remote.run(args=['uname', '-r' ], stdout=output, stderr=err_mess ) diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index 10ac1e8e91b29..68144d0031904 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -78,7 +78,7 @@ def task(ctx, config): files = set(files) lock_procs = list() for client in clients: - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + client_remote = teuthology.get_single_remote_value(ctx, client) log.info("got a client remote") (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) @@ -111,7 +111,7 @@ def task(ctx, config): # create the files to run these locks on client = clients.pop() clients.add(client) - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + client_remote = teuthology.get_single_remote_value(ctx, client) (_, _, client_id) = client.partition('.') file_procs = list() for lockfile in files: @@ -168,7 +168,7 @@ def task(ctx, config): greenlet.kill(block=True) for client in clients: - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + client_remote = teuthology.get_single_remote_value(ctx, client) (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) proc = client_remote.run( @@ -190,7 +190,7 @@ def lock_one(op, ctx): timeout = None proc = None result = None - (client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys() + client_remote = teuthology.get_single_remote_value(ctx, op['client']) (_, _, client_id) = op['client'].partition('.') testdir = teuthology.get_testdir(ctx) filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) diff --git a/teuthology/task/lost_unfound.py b/teuthology/task/lost_unfound.py index 700a300bf033e..379e759065b86 100644 --- a/teuthology/task/lost_unfound.py +++ b/teuthology/task/lost_unfound.py @@ -19,7 +19,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/manypools.py b/teuthology/task/manypools.py index 32b9d562bf46a..699c41ae927ac 100644 --- a/teuthology/task/manypools.py +++ b/teuthology/task/manypools.py @@ -39,7 +39,8 @@ def task(ctx, config): log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) for role in client_roles: log.info('role={role_}'.format(role_=role)) - (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys() + creator_remote = teuthology.get_single_remote_value(ctx, + 'client.{id}'.format(id=role)) creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) remaining_pools = poolnum diff --git a/teuthology/task/mds_creation_failure.py b/teuthology/task/mds_creation_failure.py index a3d052fb95cdf..b2ba6b34277c2 100644 --- a/teuthology/task/mds_creation_failure.py +++ b/teuthology/task/mds_creation_failure.py @@ -23,7 +23,8 @@ def task(ctx, config): raise RuntimeError("This task requires exactly one MDS") mds_id = mdslist[0] - (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys() + mds_remote = misc.get_single_remote_value(ctx, + 'mds.{_id}'.format(_id=mds_id)) manager = ceph_manager.CephManager( mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/teuthology/task/mds_thrash.py b/teuthology/task/mds_thrash.py index c60b741a49e12..54f9a545e0dff 100644 --- a/teuthology/task/mds_thrash.py +++ b/teuthology/task/mds_thrash.py @@ -276,7 +276,8 @@ def task(ctx, config): max_thrashers = config.get('max_thrash', 1) thrashers = {} - (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys() + first = teuthology.get_single_remote_value(ctx, + 'mds.{_id}'.format(_id=mdslist[0])) manager = ceph_manager.CephManager( first, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/teuthology/task/mon_clock_skew_check.py b/teuthology/task/mon_clock_skew_check.py index 891e6ec484ede..e71f75fe8af18 100644 --- a/teuthology/task/mon_clock_skew_check.py +++ b/teuthology/task/mon_clock_skew_check.py @@ -240,7 +240,7 @@ def task(ctx, config): 'mon_clock_skew_check task only accepts a dict for configuration' log.info('Beginning mon_clock_skew_check...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/teuthology/task/mon_recovery.py b/teuthology/task/mon_recovery.py index bfa2cdf78f15d..a593f262c7979 100644 --- a/teuthology/task/mon_recovery.py +++ b/teuthology/task/mon_recovery.py @@ -17,7 +17,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/mon_thrash.py b/teuthology/task/mon_thrash.py index 7dc7caad8463d..c75d20bb95d6e 100644 --- a/teuthology/task/mon_thrash.py +++ b/teuthology/task/mon_thrash.py @@ -324,7 +324,7 @@ def task(ctx, config): 'mon_thrash task requires at least 3 monitors' log.info('Beginning mon_thrash...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/teuthology/task/mpi.py b/teuthology/task/mpi.py index 6d2381ee58e09..22d048c85b3b5 100644 --- a/teuthology/task/mpi.py +++ b/teuthology/task/mpi.py @@ -70,23 +70,25 @@ def task(ctx, config): if 'nodes' in config: if isinstance(config['nodes'], basestring) and config['nodes'] == 'all': for role in teuthology.all_roles(ctx.cluster): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx,role) ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) - (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys() + master_remote = teuthology.get_single_remote_value(ctx, + config['nodes'][0]) elif isinstance(config['nodes'], list): for role in config['nodes']: - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) - (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys() + master_remote = teuthology.get_single_remote_value(ctx, + config['nodes'][0]) else: roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - (master_remote,) = ctx.cluster.only(roles[0]).remotes.iterkeys() + master_remote = teuthology.get_single_remote_value(ctx, roles[0]) for role in roles: - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) diff --git a/teuthology/task/object_source_down.py b/teuthology/task/object_source_down.py index 1696c55214aac..26e936d18b08b 100644 --- a/teuthology/task/object_source_down.py +++ b/teuthology/task/object_source_down.py @@ -17,7 +17,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/omapbench.py b/teuthology/task/omapbench.py index 7d2535453231a..0d07c5d905059 100644 --- a/teuthology/task/omapbench.py +++ b/teuthology/task/omapbench.py @@ -52,7 +52,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) proc = remote.run( args=[ "/bin/sh", "-c", diff --git a/teuthology/task/osd_backfill.py b/teuthology/task/osd_backfill.py index d80ea22ef2248..4c5ad7ad2a326 100644 --- a/teuthology/task/osd_backfill.py +++ b/teuthology/task/osd_backfill.py @@ -38,7 +38,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'thrashosds task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/teuthology/task/osd_failsafe_enospc.py b/teuthology/task/osd_failsafe_enospc.py index 39b5b5c530096..cf33cadfbc59f 100644 --- a/teuthology/task/osd_failsafe_enospc.py +++ b/teuthology/task/osd_failsafe_enospc.py @@ -32,7 +32,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'osd_failsafe_enospc task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/osd_recovery.py b/teuthology/task/osd_recovery.py index 1ff17335b15e7..f7c4b838c9f44 100644 --- a/teuthology/task/osd_recovery.py +++ b/teuthology/task/osd_recovery.py @@ -38,7 +38,7 @@ def task(ctx, config): 'task only accepts a dict for configuration' testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -119,7 +119,7 @@ def test_incomplete_pgs(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/teuthology/task/peer.py b/teuthology/task/peer.py index 8006c3812ad7a..9b233a75556f6 100644 --- a/teuthology/task/peer.py +++ b/teuthology/task/peer.py @@ -19,7 +19,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'peer task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/peering_speed_test.py b/teuthology/task/peering_speed_test.py index 6c885f1c961b7..8db4a5dba784f 100644 --- a/teuthology/task/peering_speed_test.py +++ b/teuthology/task/peering_speed_test.py @@ -26,7 +26,7 @@ def setup(ctx, config): Setup peering test on remotes. """ first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) ctx.manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/teuthology/task/pexec.py b/teuthology/task/pexec.py index 742ac0010bc22..02d84a1c0086e 100644 --- a/teuthology/task/pexec.py +++ b/teuthology/task/pexec.py @@ -68,15 +68,16 @@ def _generate_remotes(ctx, config): elif 'clients' in config: ls = config['clients'] for role in teuthology.all_roles_of_type(ctx.cluster, 'client'): - (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, + 'client.{r}'.format(r=role)) yield (remote, ls) del config['clients'] for role, ls in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) yield (remote, ls) else: for role, ls in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) yield (remote, ls) def task(ctx, config): diff --git a/teuthology/task/qemu.py b/teuthology/task/qemu.py index db93107a9871a..423e04af80a42 100644 --- a/teuthology/task/qemu.py +++ b/teuthology/task/qemu.py @@ -26,7 +26,7 @@ def create_dirs(ctx, config): testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'install', '-d', '-m0755', '--', @@ -39,7 +39,7 @@ def create_dirs(ctx, config): finally: for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true', @@ -84,7 +84,7 @@ def generate_iso(ctx, config): /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success """ + test_teardown - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) teuthology.write_file(remote, userdata_path, StringIO(user_data)) with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: @@ -114,7 +114,7 @@ def generate_iso(ctx, config): yield finally: for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'rm', '-f', @@ -131,7 +131,7 @@ def download_image(ctx, config): log.info('downloading base image') testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) remote.run( args=[ @@ -147,7 +147,7 @@ def download_image(ctx, config): tdir=testdir, client=client, ) - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'rm', '-f', base_file, @@ -160,7 +160,7 @@ def run_qemu(ctx, config): procs = [] testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) remote.run( args=[ @@ -228,7 +228,7 @@ def run_qemu(ctx, config): log.debug('checking that qemu tests succeeded...') for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'test', '-f', diff --git a/teuthology/task/rados.py b/teuthology/task/rados.py index 0897726a6bcf9..ffb33b4c1aa8c 100644 --- a/teuthology/task/rados.py +++ b/teuthology/task/rados.py @@ -120,7 +120,7 @@ def task(ctx, config): """Thread spawned by gevent""" if not hasattr(ctx, 'manager'): first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) ctx.manager = CephManager( mon, ctx=ctx, @@ -147,7 +147,7 @@ def task(ctx, config): pool = ctx.manager.create_pool_with_unique_name(ec_pool=config.get('ec_pool', False)) created_pools.append(pool) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) proc = remote.run( args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + ["--pool", pool], diff --git a/teuthology/task/radosbench.py b/teuthology/task/radosbench.py index d2e75716e9194..b3e4c8b0fdb9c 100644 --- a/teuthology/task/radosbench.py +++ b/teuthology/task/radosbench.py @@ -44,7 +44,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) pool = 'data' if config.get('pool'): diff --git a/teuthology/task/radosgw-admin-rest.py b/teuthology/task/radosgw-admin-rest.py index 43cf735394c4c..1d31f1dc567ca 100644 --- a/teuthology/task/radosgw-admin-rest.py +++ b/teuthology/task/radosgw-admin-rest.py @@ -53,7 +53,7 @@ def rgwadmin(ctx, client, cmd): '--format', 'json', ] pre.extend(cmd) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) proc = remote.run( args=pre, check_status=False, @@ -217,7 +217,7 @@ def task(ctx, config): logging.error(err) assert not err - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) remote_host = remote.name.split('@')[1] admin_conn = boto.s3.connection.S3Connection( aws_access_key_id=admin_access_key, diff --git a/teuthology/task/radosgw-agent.py b/teuthology/task/radosgw-agent.py index 6d48e0df0510c..ae0c4f87e7ff6 100644 --- a/teuthology/task/radosgw-agent.py +++ b/teuthology/task/radosgw-agent.py @@ -33,7 +33,7 @@ def run_radosgw_agent(ctx, config): log.info("dest is %s", dest_zone) testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) # figure out which branch to pull from branch = cconf.get('force-branch', None) if not branch: diff --git a/teuthology/task/rbd.py b/teuthology/task/rbd.py index 7d07a6115af24..16a91f61e2a04 100644 --- a/teuthology/task/rbd.py +++ b/teuthology/task/rbd.py @@ -53,7 +53,7 @@ def create_image(ctx, config): name = properties.get('image_name', default_image_name(role)) size = properties.get('image_size', 10240) fmt = properties.get('image_format', 1) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) log.info('Creating image {name} with size {size}'.format(name=name, size=size)) args = [ @@ -79,7 +79,7 @@ def create_image(ctx, config): if properties is None: properties = {} name = properties.get('image_name', default_image_name(role)) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ 'adjust-ulimits', @@ -106,7 +106,7 @@ def modprobe(ctx, config): """ log.info('Loading rbd kernel module...') for role in config: - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ 'sudo', @@ -119,7 +119,7 @@ def modprobe(ctx, config): finally: log.info('Unloading rbd kernel module...') for role in config: - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ 'sudo', @@ -164,7 +164,7 @@ def dev_create(ctx, config): for role, image in role_images: if image is None: image = default_image_name(role) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ @@ -191,7 +191,7 @@ def dev_create(ctx, config): for role, image in role_images: if image is None: image = default_image_name(role) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), @@ -287,7 +287,7 @@ def run_xfstests_one_client(ctx, role, properties): fs_type = properties.get('fs_type') tests = properties.get('tests') - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) # Fetch the test script test_root = teuthology.get_testdir(ctx) diff --git a/teuthology/task/rbd_fsx.py b/teuthology/task/rbd_fsx.py index 6d55b5cf45759..8cb5101fec9d3 100644 --- a/teuthology/task/rbd_fsx.py +++ b/teuthology/task/rbd_fsx.py @@ -43,7 +43,7 @@ def task(ctx, config): def _run_one_client(ctx, config, role): """Spawned task that runs the client""" testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) remote.run( args=[ 'adjust-ulimits', diff --git a/teuthology/task/recovery_bench.py b/teuthology/task/recovery_bench.py index 1984b97d31eff..5c12bdbc9b9d5 100644 --- a/teuthology/task/recovery_bench.py +++ b/teuthology/task/recovery_bench.py @@ -48,7 +48,7 @@ def task(ctx, config): log.info('Beginning recovery bench...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, @@ -113,7 +113,8 @@ class RecoveryBencher: io_size = self.config.get("io_size", 4096) osd = str(random.choice(self.osds)) - (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys() + osd_remote = teuthology.get_single_remote_value(self.ceph_manager.ctx, + 'osd.%s' % osd) testdir = teuthology.get_testdir(self.ceph_manager.ctx) diff --git a/teuthology/task/repair_test.py b/teuthology/task/repair_test.py index 1dd8f2fdefa6c..f1063a186dc20 100644 --- a/teuthology/task/repair_test.py +++ b/teuthology/task/repair_test.py @@ -98,7 +98,7 @@ def gen_repair_test_2(chooser): log.info("starting repair test type 2") victim_osd = chooser(pool, 0) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) # create object log.info("doing put and setomapval") @@ -191,7 +191,7 @@ def task(ctx, config): if not hasattr(ctx, 'manager'): first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) ctx.manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/teuthology/task/restart.py b/teuthology/task/restart.py index 87ca2b099e322..b508df0f16dca 100644 --- a/teuthology/task/restart.py +++ b/teuthology/task/restart.py @@ -98,7 +98,7 @@ def task(ctx, config): assert 'exec' in config, "config requires exec key with : entries" for role, task in config['exec'].iteritems(): log.info('restart for role {r}'.format(r=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) srcdir, restarts = get_tests(ctx, config, role, remote, testdir) log.info('Running command on role %s host %s', role, remote.name) spec = '{spec}'.format(spec=task[0]) diff --git a/teuthology/task/rgw.py b/teuthology/task/rgw.py index 674683a636a54..18b89279c8a7f 100644 --- a/teuthology/task/rgw.py +++ b/teuthology/task/rgw.py @@ -78,7 +78,7 @@ def ship_config(ctx, config, role_endpoints): log.info('Shipping apache config and rgw.fcgi...') src = os.path.join(os.path.dirname(__file__), 'apache.conf.template') for client, conf in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) system_type = teuthology.get_system_type(remote) if not conf: conf = {} @@ -152,7 +152,7 @@ def start_rgw(ctx, config): log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) client_config = config.get(client) if client_config is None: @@ -235,7 +235,7 @@ def start_apache(ctx, config): testdir = teuthology.get_testdir(ctx) apaches = {} for client in config.iterkeys(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) system_type = teuthology.get_system_type(remote) if system_type == 'deb': apache_name = 'apache2' @@ -471,7 +471,7 @@ def create_nonregion_pools(ctx, config, regions): log.info('creating data pools') for client in config.keys(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) data_pool = '.rgw.buckets' if ctx.rgw.ec_data_pool: create_ec_pool(remote, data_pool, client, 64) @@ -516,7 +516,7 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints): # clear out the old defaults first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) # removing these objects from .rgw.root and the per-zone root pools # may or may not matter rados(ctx, mon, @@ -533,7 +533,7 @@ def configure_regions_and_zones(ctx, config, regions, role_endpoints): cmd=['-p', zone_info['domain_root'], 'rm', 'zone_info.default']) - (remote,) = ctx.cluster.only(role).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, role) for pool_info in zone_info['placement_pools']: remote.run(args=['ceph', 'osd', 'pool', 'create', pool_info['val']['index_pool'], '64', '64']) diff --git a/teuthology/task/s3readwrite.py b/teuthology/task/s3readwrite.py index 476015d76b59c..4c29b1314d150 100644 --- a/teuthology/task/s3readwrite.py +++ b/teuthology/task/s3readwrite.py @@ -182,7 +182,7 @@ def configure(ctx, config): s3tests_conf['s3'].setdefault('port', def_conf['port']) s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'cd', @@ -216,7 +216,7 @@ def run_tests(ctx, config): assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir), diff --git a/teuthology/task/s3roundtrip.py b/teuthology/task/s3roundtrip.py index 5a7093d6f4304..38b5705d28e18 100644 --- a/teuthology/task/s3roundtrip.py +++ b/teuthology/task/s3roundtrip.py @@ -147,7 +147,7 @@ def configure(ctx, config): s3tests_conf['s3'].setdefault('port', def_conf['port']) s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'cd', @@ -181,7 +181,7 @@ def run_tests(ctx, config): assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir), diff --git a/teuthology/task/s3tests.py b/teuthology/task/s3tests.py index abbacb9bfaa3a..f2c2b07284865 100644 --- a/teuthology/task/s3tests.py +++ b/teuthology/task/s3tests.py @@ -246,7 +246,7 @@ def configure(ctx, config): else: s3tests_conf['DEFAULT']['host'] = 'localhost' - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'cd', diff --git a/teuthology/task/samba.py b/teuthology/task/samba.py index c2e6e6a21747b..079120cd25d08 100644 --- a/teuthology/task/samba.py +++ b/teuthology/task/samba.py @@ -23,7 +23,7 @@ def get_sambas(ctx, roles): PREFIX = 'samba.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) yield (id_, remote) @contextlib.contextmanager diff --git a/teuthology/task/scrub.py b/teuthology/task/scrub.py index 7a25300a677dc..e7285dfe74d6d 100644 --- a/teuthology/task/scrub.py +++ b/teuthology/task/scrub.py @@ -39,7 +39,7 @@ def task(ctx, config): log.info('Beginning scrub...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, diff --git a/teuthology/task/scrub_test.py b/teuthology/task/scrub_test.py index 3443ae9f45e92..f5249666b39d2 100644 --- a/teuthology/task/scrub_test.py +++ b/teuthology/task/scrub_test.py @@ -33,7 +33,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'scrub_test task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -73,7 +73,7 @@ def task(ctx, config): log.info('messing with PG %s on osd %d' % (victim, osd)) - (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() + osd_remote = teuthology.get_single_remote_value(ctx, 'osd.%d' % osd) data_path = os.path.join( '/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd), diff --git a/teuthology/task/swift.py b/teuthology/task/swift.py index 6285eb6b1c510..efc8587765923 100644 --- a/teuthology/task/swift.py +++ b/teuthology/task/swift.py @@ -133,7 +133,7 @@ def configure(ctx, config): testswift_conf['func_test']['auth_host'] = 'localhost' log.info(client) - (remote,) = ctx.cluster.only(client).remotes.keys() + remote = teuthology.get_single_remote_value(ctx, client) remote.run( args=[ 'cd', diff --git a/teuthology/task/thrashosds.py b/teuthology/task/thrashosds.py index c5f26dcb740c8..37ac037657423 100644 --- a/teuthology/task/thrashosds.py +++ b/teuthology/task/thrashosds.py @@ -152,7 +152,7 @@ def task(ctx, config): log.info('Beginning thrashosds...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + mon = teuthology.get_single_remote_value(ctx, first_mon) manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/teuthology/task/watch_notify_stress.py b/teuthology/task/watch_notify_stress.py index ab611c3dd4a70..2402840e09e10 100644 --- a/teuthology/task/watch_notify_stress.py +++ b/teuthology/task/watch_notify_stress.py @@ -6,6 +6,7 @@ import logging import proc_thrasher from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -40,7 +41,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) remotes.append(remote) args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), diff --git a/teuthology/task/workunit.py b/teuthology/task/workunit.py index b504eeb50c58d..412d800e57bb6 100644 --- a/teuthology/task/workunit.py +++ b/teuthology/task/workunit.py @@ -127,7 +127,7 @@ def _delete_dir(ctx, role): PREFIX = 'client.' testdir = teuthology.get_testdir(ctx) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) # Is there any reason why this is not: join(mnt, role) ? client = os.path.join(mnt, 'client.{id}'.format(id=id_)) @@ -169,7 +169,7 @@ def _make_scratch_dir(ctx, role, subdir): PREFIX = 'client.' id_ = role[len(PREFIX):] log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) dir_owner = remote.shortname.split('@', 1)[0] mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) # if neither kclient nor ceph-fuse are required for a workunit, @@ -240,7 +240,8 @@ def _spawn_on_all_clients(ctx, refspec, tests, env, subdir, timeout=None): client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client') client_remotes = list() for client in client_generator: - (client_remote,) = ctx.cluster.only('client.{id}'.format(id=client)).remotes.iterkeys() + client_remote = teuthology.get_single_remote_value(ctx, + 'client.{id}'.format(id=client)) client_remotes.append((client_remote, 'client.{id}'.format(id=client))) _make_scratch_dir(ctx, "client.{id}".format(id=client), subdir) @@ -279,7 +280,7 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None, timeout=None): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, role) mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo if subdir is None: diff --git a/teuthology/task_util/rgw.py b/teuthology/task_util/rgw.py index cbe3071fbe5f4..7650c4cdfa531 100644 --- a/teuthology/task_util/rgw.py +++ b/teuthology/task_util/rgw.py @@ -29,7 +29,7 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False): ] pre.extend(cmd) log.info('rgwadmin: cmd=%s' % pre) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + remote = teuthology.get_single_remote_value(ctx, client) proc = remote.run( args=pre, check_status=check_status, -- 2.39.5