From 5f95b532aa26249cecea4586166399a0bf10ad47 Mon Sep 17 00:00:00 2001 From: Kyr Shatskyy Date: Fri, 11 Oct 2019 17:57:47 +0200 Subject: [PATCH] qa: get rid of iterkeys for py3 compatibility Fixes: https://tracker.ceph.com/issues/42287 Signed-off-by: Kyr Shatskyy --- qa/tasks/admin_socket.py | 2 +- qa/tasks/autotest.py | 6 +++--- qa/tasks/barbican.py | 4 ++-- qa/tasks/ceph.py | 18 +++++++++--------- qa/tasks/ceph_deploy.py | 12 ++++++------ qa/tasks/ceph_objectstore_tool.py | 16 ++++++++-------- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cram.py | 8 ++++---- qa/tasks/die_on_err.py | 4 ++-- qa/tasks/divergent_priors.py | 2 +- qa/tasks/divergent_priors2.py | 4 ++-- qa/tasks/dump_stuck.py | 2 +- qa/tasks/ec_lost_unfound.py | 2 +- qa/tasks/exec_on_cleanup.py | 2 +- qa/tasks/filestore_idempotent.py | 2 +- qa/tasks/keystone.py | 6 +++--- qa/tasks/lost_unfound.py | 2 +- qa/tasks/manypools.py | 2 +- qa/tasks/mds_creation_failure.py | 2 +- qa/tasks/mds_thrash.py | 6 +++--- qa/tasks/mon_clock_skew_check.py | 2 +- qa/tasks/mon_recovery.py | 2 +- qa/tasks/mon_thrash.py | 2 +- qa/tasks/netem.py | 6 +++--- qa/tasks/object_source_down.py | 2 +- qa/tasks/omapbench.py | 2 +- qa/tasks/osd_backfill.py | 2 +- qa/tasks/osd_failsafe_enospc.py | 2 +- qa/tasks/osd_recovery.py | 4 ++-- qa/tasks/peer.py | 2 +- qa/tasks/populate_rbd_pool.py | 2 +- qa/tasks/qemu.py | 6 +++--- qa/tasks/rados.py | 2 +- qa/tasks/radosbench.py | 2 +- qa/tasks/radosbenchsweep.py | 4 ++-- qa/tasks/ragweed.py | 2 +- qa/tasks/rbd_fsx.py | 2 +- qa/tasks/rebuild_mondb.py | 2 +- qa/tasks/reg11184.py | 6 +++--- qa/tasks/rep_lost_unfound_delete.py | 2 +- qa/tasks/repair_test.py | 2 +- qa/tasks/resolve_stuck_peering.py | 2 +- qa/tasks/restart.py | 2 +- qa/tasks/rgw.py | 6 +++--- qa/tasks/s3readwrite.py | 4 ++-- qa/tasks/s3roundtrip.py | 2 +- qa/tasks/s3tests.py | 2 +- qa/tasks/samba.py | 2 +- qa/tasks/scrub.py | 2 +- qa/tasks/scrub_test.py | 4 ++-- qa/tasks/swift.py | 2 +- qa/tasks/systemd.py | 2 +- qa/tasks/tempest.py | 2 +- qa/tasks/util/rgw.py | 2 +- qa/tasks/util/workunit.py | 4 ++-- qa/tasks/vault.py | 4 ++-- qa/tasks/watch_notify_same_primary.py | 2 +- qa/tasks/watch_notify_stress.py | 2 +- qa/tasks/workunit.py | 2 +- qa/workunits/mon/caps.py | 2 +- 60 files changed, 106 insertions(+), 106 deletions(-) diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py index 3301372756be..ed134e7d2f6d 100644 --- a/qa/tasks/admin_socket.py +++ b/qa/tasks/admin_socket.py @@ -124,7 +124,7 @@ def _run_tests(ctx, client, tests): """ testdir = teuthology.get_testdir(ctx) log.debug('Running admin socket tests on %s', client) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py index efa972123d25..1735f677380d 100644 --- a/qa/tasks/autotest.py +++ b/qa/tasks/autotest.py @@ -42,17 +42,17 @@ def task(ctx, config): log.info('Setting up autotest...') testdir = teuthology.get_testdir(ctx) with parallel() as p: - for role in config.iterkeys(): + for role in config.keys(): (remote,) = ctx.cluster.only(role).remotes.keys() p.spawn(_download, testdir, remote) log.info('Making a separate scratch dir for every client...') - for role in config.iterkeys(): + for role in config.keys(): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) remote.run( diff --git a/qa/tasks/barbican.py b/qa/tasks/barbican.py index 360b11cfb7ef..0b54cbddc702 100644 --- a/qa/tasks/barbican.py +++ b/qa/tasks/barbican.py @@ -196,7 +196,7 @@ def run_barbican(ctx, config): log.info('Running barbican...') for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, _, client_id = teuthology.split_role(client) # start the public endpoint @@ -491,7 +491,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('barbican', {})) diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index 499df3e3e689..287946bd7230 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -87,7 +87,7 @@ def ceph_crash(ctx, config): os.makedirs(path) except OSError as e: pass - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) @@ -197,7 +197,7 @@ def ceph_log(ctx, config): conf += f.read().format(daemon_type=daemon, max_size=size) f.seek(0, 0) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): teuthology.write_file(remote=remote, path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir), data=StringIO(conf) @@ -272,7 +272,7 @@ def ceph_log(ctx, config): os.makedirs(path) except OSError as e: pass - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) @@ -308,7 +308,7 @@ def valgrind_post(ctx, config): finally: lookup_procs = list() log.info('Checking for errors in any valgrind logs...') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): # look at valgrind logs for each node proc = remote.run( args=[ @@ -358,7 +358,7 @@ def valgrind_post(ctx, config): def crush_setup(ctx, config): cluster_name = config['cluster'] first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() profile = config.get('crush_tunables', 'default') log.info('Setting crush tunables to %s', profile) @@ -372,7 +372,7 @@ def crush_setup(ctx, config): def create_rbd_pool(ctx, config): cluster_name = config['cluster'] first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() log.info('Waiting for OSDs to come up') teuthology.wait_until_osds_up( ctx, @@ -401,7 +401,7 @@ def cephfs_setup(ctx, config): coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) # If there are any MDSs, then create a filesystem for them to use # Do this last because requires mon cluster to be up and running @@ -770,7 +770,7 @@ def cluster(ctx, config): path=monmap_path, ) - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): # copy mon key and initial monmap log.info('Sending monmap to node {remote}'.format(remote=rem)) teuthology.sudo_write_file( @@ -1923,7 +1923,7 @@ def task(ctx, config): with contextutil.nested(*subtasks): first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() if not hasattr(ctx, 'managers'): ctx.managers = {} ctx.managers[config['cluster']] = CephManager( diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py index a4e5921775ef..09b54902f7aa 100644 --- a/qa/tasks/ceph_deploy.py +++ b/qa/tasks/ceph_deploy.py @@ -29,7 +29,7 @@ def download_ceph_deploy(ctx, config): obtained from `python_version`, if specified. """ # use mon.a for ceph_admin - (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() try: py_ver = str(config['python_version']) @@ -222,7 +222,7 @@ def build_ceph_cluster(ctx, config): # puts it. Remember this here, because subsequently IDs will change from those in # the test config to those that ceph-deploy invents. - (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() def execute_ceph_deploy(cmd): """Remotely execute a ceph_deploy command""" @@ -266,7 +266,7 @@ def build_ceph_cluster(ctx, config): def ceph_volume_osd_create(ctx, config): osds = ctx.cluster.only(teuthology.is_type('osd')) no_of_osds = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): # all devs should be lvm osd_create_cmd = './ceph-deploy osd create --debug ' + remote.shortname + ' ' # default is bluestore so we just need config item for filestore @@ -387,7 +387,7 @@ def build_ceph_cluster(ctx, config): # install admin key on mons (ceph-create-keys doesn't do this any more) mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote in mons.remotes.iterkeys(): + for remote in mons.remotes.keys(): execute_ceph_deploy('./ceph-deploy admin ' + remote.shortname) # create osd's @@ -563,7 +563,7 @@ def build_ceph_cluster(ctx, config): log.info('Archiving logs...') path = os.path.join(ctx.archive, 'remote') os.makedirs(path) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) os.makedirs(sub) teuthology.pull_directory(remote, '/var/log/ceph', @@ -786,7 +786,7 @@ def upgrade(ctx, config): ceph_branch = '--dev={branch}'.format(branch=dev_branch) # get the node used for initial deployment which is mon.a mon_a = mapped_role.get('mon.a') - (ceph_admin,) = ctx.cluster.only(mon_a).remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only(mon_a).remotes.keys() testdir = teuthology.get_testdir(ctx) cmd = './ceph-deploy install ' + ceph_branch for role in roles: diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py index 2caa6cdbce46..72e367f65d17 100644 --- a/qa/tasks/ceph_objectstore_tool.py +++ b/qa/tasks/ceph_objectstore_tool.py @@ -279,7 +279,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): prefix = ("sudo ceph-objectstore-tool " "--data-path {fpath} " "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): log.debug(remote) log.debug(osds.remotes[remote]) for role in osds.remotes[remote]: @@ -319,7 +319,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): GETNAME = os.path.join(DATADIR, "get") SETNAME = os.path.join(DATADIR, "set") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -411,7 +411,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): GETNAME = os.path.join(DATADIR, "get") SETNAME = os.path.join(DATADIR, "set") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -498,7 +498,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.error(values) log.info("Test pg info") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -523,7 +523,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): ERRORS += 1 log.info("Test pg logging") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -555,7 +555,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.info("Test pg export") EXP_ERRORS = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -582,7 +582,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.info("Test pg removal") RM_ERRORS = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -608,7 +608,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if EXP_ERRORS == 0 and RM_ERRORS == 0: log.info("Test pg import") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 8d72ba380d71..c3f0e6ca309e 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -166,7 +166,7 @@ class CephCluster(object): @property def admin_remote(self): first_mon = misc.get_first_mon(self._ctx, None) - (result,) = self._ctx.cluster.only(first_mon).remotes.iterkeys() + (result,) = self._ctx.cluster.only(first_mon).remotes.keys() return result def __init__(self, ctx): diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py index 40f8253eec24..8f2f28cc64df 100644 --- a/qa/tasks/cram.py +++ b/qa/tasks/cram.py @@ -61,7 +61,7 @@ def task(ctx, config): try: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( args=[ @@ -85,11 +85,11 @@ def task(ctx, config): ) with parallel() as p: - for role in clients.iterkeys(): + for role in clients.keys(): p.spawn(_run_tests, ctx, role) finally: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) @@ -128,7 +128,7 @@ def _run_tests(ctx, role): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ceph_ref = ctx.summary.get('ceph-sha1', 'master') testdir = teuthology.get_testdir(ctx) diff --git a/qa/tasks/die_on_err.py b/qa/tasks/die_on_err.py index bf422ae547d7..ee157f4afe46 100644 --- a/qa/tasks/die_on_err.py +++ b/qa/tasks/die_on_err.py @@ -20,7 +20,7 @@ def task(ctx, config): config = {} first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -38,7 +38,7 @@ def task(ctx, config): while True: for i in range(num_osds): - (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() + (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.keys() p = osd_remote.run( args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], wait=True, diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py index 12ea93365bf3..7a4d1327020a 100644 --- a/qa/tasks/divergent_priors.py +++ b/qa/tasks/divergent_priors.py @@ -61,7 +61,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py index 0ed753278b41..fa2fae9e7dc3 100644 --- a/qa/tasks/divergent_priors2.py +++ b/qa/tasks/divergent_priors2.py @@ -64,7 +64,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) @@ -146,7 +146,7 @@ def task(ctx, config): # Export a pg (exp_remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() FSPATH = manager.get_filepath() JPATH = os.path.join(FSPATH, "journal") prefix = ("sudo adjust-ulimits ceph-objectstore-tool " diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py index 8409778b92fd..8b6d2c7d5404 100644 --- a/qa/tasks/dump_stuck.py +++ b/qa/tasks/dump_stuck.py @@ -48,7 +48,7 @@ def task(ctx, config): timeout = 60 first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/ec_lost_unfound.py b/qa/tasks/ec_lost_unfound.py index 2412cbd583ac..2360ea92ba49 100644 --- a/qa/tasks/ec_lost_unfound.py +++ b/qa/tasks/ec_lost_unfound.py @@ -21,7 +21,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index e3c09d5ec158..6e40e4044b03 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -47,7 +47,7 @@ def task(ctx, config): config = dict((id_, a) for id_ in roles) for role, ls in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running commands on role %s host %s', role, remote.name) for c in ls: c.replace('$TESTDIR', testdir) diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py index 75c38a04f46a..01b562905b21 100644 --- a/qa/tasks/filestore_idempotent.py +++ b/qa/tasks/filestore_idempotent.py @@ -32,7 +32,7 @@ def task(ctx, config): # just use the first client... client = clients[0]; - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() testdir = teuthology.get_testdir(ctx) diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index 2d998b917540..9dbf49dc7556 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -106,7 +106,7 @@ def install_packages(ctx, config): packages = {} for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() # use bindep to read which dependencies we need from keystone/bindep.txt run_in_tox_venv(ctx, remote, ['pip', 'install', 'bindep']) r = run_in_tox_venv(ctx, remote, @@ -122,7 +122,7 @@ def install_packages(ctx, config): log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() for dep in packages[client]: remove_package(dep, remote) @@ -204,7 +204,7 @@ def run_keystone(ctx, config): log.info('Configuring keystone...') for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, _, client_id = teuthology.split_role(client) # start the public endpoint diff --git a/qa/tasks/lost_unfound.py b/qa/tasks/lost_unfound.py index c50082863d7f..d51b96693063 100644 --- a/qa/tasks/lost_unfound.py +++ b/qa/tasks/lost_unfound.py @@ -22,7 +22,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py index d8577c03ff6d..1f508a56fc20 100644 --- a/qa/tasks/manypools.py +++ b/qa/tasks/manypools.py @@ -39,7 +39,7 @@ def task(ctx, config): log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) for role in client_roles: log.info('role={role_}'.format(role_=role)) - (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys() + (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.keys() creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) remaining_pools = poolnum diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py index aa2d6dbf2c06..2647eba761c8 100644 --- a/qa/tasks/mds_creation_failure.py +++ b/qa/tasks/mds_creation_failure.py @@ -23,7 +23,7 @@ def task(ctx, config): raise RuntimeError("This task requires exactly one MDS") mds_id = mdslist[0] - (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys() + (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.keys() manager = ceph_manager.CephManager( mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/qa/tasks/mds_thrash.py b/qa/tasks/mds_thrash.py index e5d86e65b5d3..0b23cba602ca 100644 --- a/qa/tasks/mds_thrash.py +++ b/qa/tasks/mds_thrash.py @@ -150,7 +150,7 @@ class MDSThrasher(Greenlet, Thrasher): def kill_mds(self, mds): if self.config.get('powercycle'): (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). - remotes.iterkeys()) + remotes.keys()) self.log('kill_mds on mds.{m} doing powercycle of {s}'. format(m=mds, s=remote.name)) self._assert_ipmi(remote) @@ -171,7 +171,7 @@ class MDSThrasher(Greenlet, Thrasher): """ if self.config.get('powercycle'): (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). - remotes.iterkeys()) + remotes.keys()) self.log('revive_mds on mds.{m} doing powercycle of {s}'. format(m=mds, s=remote.name)) self._assert_ipmi(remote) @@ -387,7 +387,7 @@ def task(ctx, config): log.info('mds thrasher using random seed: {seed}'.format(seed=seed)) random.seed(seed) - (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys() + (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.keys() manager = ceph_manager.CephManager( first, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py index 547339f79a11..5c4088c7369c 100644 --- a/qa/tasks/mon_clock_skew_check.py +++ b/qa/tasks/mon_clock_skew_check.py @@ -50,7 +50,7 @@ def task(ctx, config): log.info('Beginning mon_clock_skew_check...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py index bfa2cdf78f15..e09e9877b571 100644 --- a/qa/tasks/mon_recovery.py +++ b/qa/tasks/mon_recovery.py @@ -17,7 +17,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/mon_thrash.py b/qa/tasks/mon_thrash.py index ed0940c4ebf2..0591ff7eb69c 100644 --- a/qa/tasks/mon_thrash.py +++ b/qa/tasks/mon_thrash.py @@ -365,7 +365,7 @@ def task(ctx, config): log.info('Beginning mon_thrash...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/qa/tasks/netem.py b/qa/tasks/netem.py index 572f9dc7585e..95018150da93 100644 --- a/qa/tasks/netem.py +++ b/qa/tasks/netem.py @@ -238,10 +238,10 @@ def task(ctx, config): if config.get('dst_client') is not None: dst = config.get('dst_client') - (host,) = ctx.cluster.only(dst).remotes.iterkeys() + (host,) = ctx.cluster.only(dst).remotes.keys() for role in config.get('clients', None): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ctx.netem.remote = remote if config.get('delay', False): static_delay(remote, host, config.get('iface'), config.get('delay')) @@ -267,6 +267,6 @@ def task(ctx, config): if ctx.netem.names: toggle.cleanup() for role in config.get('clients'): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() delete_dev(remote, config.get('iface')) diff --git a/qa/tasks/object_source_down.py b/qa/tasks/object_source_down.py index 9705d7c7375d..82ce43263174 100644 --- a/qa/tasks/object_source_down.py +++ b/qa/tasks/object_source_down.py @@ -18,7 +18,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py index e026c74dbc0a..a6372a3bda97 100644 --- a/qa/tasks/omapbench.py +++ b/qa/tasks/omapbench.py @@ -52,7 +52,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=[ "/bin/sh", "-c", diff --git a/qa/tasks/osd_backfill.py b/qa/tasks/osd_backfill.py index 04658d20569f..5ad5b7998314 100644 --- a/qa/tasks/osd_backfill.py +++ b/qa/tasks/osd_backfill.py @@ -38,7 +38,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'thrashosds task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/osd_failsafe_enospc.py b/qa/tasks/osd_failsafe_enospc.py index 691085427393..8d89919035dc 100644 --- a/qa/tasks/osd_failsafe_enospc.py +++ b/qa/tasks/osd_failsafe_enospc.py @@ -54,7 +54,7 @@ def task(ctx, config): log.info('1. Verify warning messages when exceeding nearfull_ratio') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() proc = mon.run( args=[ diff --git a/qa/tasks/osd_recovery.py b/qa/tasks/osd_recovery.py index 41e86d6555bf..a01fe8fea6e8 100644 --- a/qa/tasks/osd_recovery.py +++ b/qa/tasks/osd_recovery.py @@ -38,7 +38,7 @@ def task(ctx, config): 'task only accepts a dict for configuration' testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -114,7 +114,7 @@ def test_incomplete_pgs(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/peer.py b/qa/tasks/peer.py index 9850da18e55d..7ec82f8f6a37 100644 --- a/qa/tasks/peer.py +++ b/qa/tasks/peer.py @@ -20,7 +20,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'peer task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/populate_rbd_pool.py b/qa/tasks/populate_rbd_pool.py index db67d607cd64..76395eb688f1 100644 --- a/qa/tasks/populate_rbd_pool.py +++ b/qa/tasks/populate_rbd_pool.py @@ -34,7 +34,7 @@ def task(ctx, config): write_threads = config.get("write_threads", 10) write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() for poolid in range(num_pools): poolname = "%s-%s" % (pool_prefix, str(poolid)) diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index 548110edf0e0..050eef49daa0 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -203,7 +203,7 @@ def generate_iso(ctx, config): try: yield finally: - for client in config.iterkeys(): + for client in config.keys(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -253,7 +253,7 @@ def download_image(ctx, config): yield finally: log.debug('cleaning up base image files') - for client in config.iterkeys(): + for client in config.keys(): base_file = '{tdir}/qemu/base.{client}.qcow2'.format( tdir=testdir, client=client, @@ -430,7 +430,7 @@ def run_qemu(ctx, config): time.sleep(time_wait) log.debug('checking that qemu tests succeeded...') - for client in config.iterkeys(): + for client in config.keys(): (remote,) = ctx.cluster.only(client).remotes.keys() # ensure we have permissions to all the logs diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 64d21d11e90b..5432ddfa02f1 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -248,7 +248,7 @@ def task(ctx, config): manager.raw_cluster_cmd( 'osd', 'pool', 'set', pool, 'min_size', str(min_size)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + ["--pool", pool], diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py index 01b36fdbaaeb..1feb9e9ffa20 100644 --- a/qa/tasks/radosbench.py +++ b/qa/tasks/radosbench.py @@ -56,7 +56,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() if config.get('ec_pool', False): profile = config.get('erasure_code_profile', {}) diff --git a/qa/tasks/radosbenchsweep.py b/qa/tasks/radosbenchsweep.py index cda106ac66a9..f008dee60ab3 100644 --- a/qa/tasks/radosbenchsweep.py +++ b/qa/tasks/radosbenchsweep.py @@ -171,7 +171,7 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=[ @@ -217,5 +217,5 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep): def wait_until_healthy(ctx, config): first_mon = teuthology.get_first_mon(ctx, config) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() teuthology.wait_until_healthy(ctx, mon_remote) diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index ad424f3b568d..1ed1de8af4c8 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -326,7 +326,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('ragweed', {})) diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py index d32475ecd03c..12e50d98b05b 100644 --- a/qa/tasks/rbd_fsx.py +++ b/qa/tasks/rbd_fsx.py @@ -47,7 +47,7 @@ def _run_one_client(ctx, config, role): krbd = config.get('krbd', False) nbd = config.get('nbd', False) testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() args = [] if krbd or nbd: diff --git a/qa/tasks/rebuild_mondb.py b/qa/tasks/rebuild_mondb.py index 020487874ad7..e68e5c83e9e9 100644 --- a/qa/tasks/rebuild_mondb.py +++ b/qa/tasks/rebuild_mondb.py @@ -200,7 +200,7 @@ def task(ctx, config): 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # stash a monmap for later mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap']) diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py index f24862384495..7bb304608b0b 100644 --- a/qa/tasks/reg11184.py +++ b/qa/tasks/reg11184.py @@ -76,7 +76,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) @@ -164,7 +164,7 @@ def task(ctx, config): # Export a pg (exp_remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() FSPATH = manager.get_filepath() JPATH = os.path.join(FSPATH, "journal") prefix = ("sudo adjust-ulimits ceph-objectstore-tool " @@ -235,7 +235,7 @@ def task(ctx, config): assert exit_status is 0 (remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() cmd = 'rm {file}'.format(file=expfile) remote.run(args=cmd, wait=True) log.info("success") diff --git a/qa/tasks/rep_lost_unfound_delete.py b/qa/tasks/rep_lost_unfound_delete.py index 6051c0720c1f..8ed55145b7ba 100644 --- a/qa/tasks/rep_lost_unfound_delete.py +++ b/qa/tasks/rep_lost_unfound_delete.py @@ -22,7 +22,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py index 8ad4d02be2dd..973273bbeacf 100644 --- a/qa/tasks/repair_test.py +++ b/qa/tasks/repair_test.py @@ -124,7 +124,7 @@ def repair_test_2(ctx, manager, config, chooser): log.info("starting repair test type 2") victim_osd = chooser(manager, pool, 0) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # create object log.info("doing put and setomapval") diff --git a/qa/tasks/resolve_stuck_peering.py b/qa/tasks/resolve_stuck_peering.py index bdf86e9242e4..9122cb667614 100644 --- a/qa/tasks/resolve_stuck_peering.py +++ b/qa/tasks/resolve_stuck_peering.py @@ -51,7 +51,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() #create few objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py index 697345a975b0..fc38af1d7ab3 100644 --- a/qa/tasks/restart.py +++ b/qa/tasks/restart.py @@ -98,7 +98,7 @@ def task(ctx, config): assert 'exec' in config, "config requires exec key with : entries" for role, task in config['exec'].iteritems(): log.info('restart for role {r}'.format(r=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() srcdir, restarts = get_tests(ctx, config, role, remote, testdir) log.info('Running command on role %s host %s', role, remote.name) spec = '{spec}'.format(spec=task[0]) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index 37bc4bc4dee0..b3a1c3164cc4 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -41,7 +41,7 @@ def start_rgw(ctx, config, clients): log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in clients: - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, daemon_type, client_id = teuthology.split_role(client) client_with_id = daemon_type + '.' + client_id client_with_cluster = cluster_name + '.' + client_with_id @@ -196,7 +196,7 @@ def start_rgw(ctx, config, clients): endpoint = ctx.rgw.role_endpoints[client] url = endpoint.url() log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url)) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() wait_for_radosgw(url, remote) try: @@ -257,7 +257,7 @@ def create_pools(ctx, clients): log.info('Creating data pools') for client in clients: log.debug("Obtaining remote for client {}".format(client)) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() data_pool = 'default.rgw.buckets.data' cluster_name, daemon_type, client_id = teuthology.split_role(client) diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py index 6f98e59b0733..d03882b42821 100644 --- a/qa/tasks/s3readwrite.py +++ b/qa/tasks/s3readwrite.py @@ -168,7 +168,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: @@ -307,7 +307,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('s3readwrite', {})) diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py index f2632c9b1e50..d21981335aa0 100644 --- a/qa/tasks/s3roundtrip.py +++ b/qa/tasks/s3roundtrip.py @@ -137,7 +137,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 5a5b0e623303..61ddbafa781c 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -416,7 +416,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('s3tests', {})) diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py index 8272e8b9539c..319c6d5e5bf2 100644 --- a/qa/tasks/samba.py +++ b/qa/tasks/samba.py @@ -26,7 +26,7 @@ def get_sambas(ctx, roles): PREFIX = 'samba.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() yield (id_, remote) diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py index a6194c2c7ce5..14d5103e19f2 100644 --- a/qa/tasks/scrub.py +++ b/qa/tasks/scrub.py @@ -39,7 +39,7 @@ def task(ctx, config): log.info('Beginning scrub...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py index 377158b7e21e..e6532578c708 100644 --- a/qa/tasks/scrub_test.py +++ b/qa/tasks/scrub_test.py @@ -31,7 +31,7 @@ def wait_for_victim_pg(manager): def find_victim_object(ctx, pg, osd): """Return a file to be fuzzed""" - (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() + (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys() data_path = os.path.join( '/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd), @@ -359,7 +359,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'scrub_test task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index a84fcf46849d..dac11dd619ca 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -96,7 +96,7 @@ def create_users(ctx, config): try: yield finally: - for client in config.iterkeys(): + for client in config.keys(): for user in users.itervalues(): uid = '{user}.{client}'.format(user=user, client=client) cluster_name, daemon_type, client_id = teuthology.split_role(client) diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py index d5e7b255dfd7..b661c11df1cb 100644 --- a/qa/tasks/systemd.py +++ b/qa/tasks/systemd.py @@ -137,6 +137,6 @@ def task(ctx, config): 'grep', 'ceph']) # wait for HEALTH_OK mon = get_first_mon(ctx, config) - (mon_remote,) = ctx.cluster.only(mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(mon).remotes.keys() wait_until_healthy(ctx, mon_remote, use_sudo=True) yield diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index 474b5b981e37..4dfaa44d4451 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -251,7 +251,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('keystone', {})) diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py index ee79208cc5bd..d3abf1ced3b9 100644 --- a/qa/tasks/util/rgw.py +++ b/qa/tasks/util/rgw.py @@ -31,7 +31,7 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False, ] pre.extend(cmd) log.log(log_level, 'rgwadmin: cmd=%s' % pre) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() proc = remote.run( args=pre, check_status=check_status, diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py index c5314330ffbf..91b0e0f8e19f 100644 --- a/qa/tasks/util/workunit.py +++ b/qa/tasks/util/workunit.py @@ -63,8 +63,8 @@ def get_refspec_after_overrides(config, overrides): overrides = copy.deepcopy(overrides.get('workunit', {})) refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch, 'sha1': Refspec, 'tag': Refspec, 'branch': Branch} - if any(map(lambda i: i in config, refspecs.iterkeys())): - for i in refspecs.iterkeys(): + if any(map(lambda i: i in config, refspecs.keys())): + for i in refspecs.keys(): overrides.pop(i, None) misc.deep_merge(config, overrides) diff --git a/qa/tasks/vault.py b/qa/tasks/vault.py index 47e7a785a70a..8da2254c848c 100644 --- a/qa/tasks/vault.py +++ b/qa/tasks/vault.py @@ -89,7 +89,7 @@ def run_vault(ctx, config): assert isinstance(config, dict) for (client, cconf) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, _, client_id = teuthology.split_role(client) _, port = ctx.vault.endpoints[client] @@ -210,7 +210,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('vault', {})) diff --git a/qa/tasks/watch_notify_same_primary.py b/qa/tasks/watch_notify_same_primary.py index 8f6d33b89af3..b462e5e4d76e 100644 --- a/qa/tasks/watch_notify_same_primary.py +++ b/qa/tasks/watch_notify_same_primary.py @@ -44,7 +44,7 @@ def task(ctx, config): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() manager = ctx.managers['ceph'] manager.raw_cluster_cmd('osd', 'set', 'noout') diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py index 6db313fea6de..e54d4b6e0724 100644 --- a/qa/tasks/watch_notify_stress.py +++ b/qa/tasks/watch_notify_stress.py @@ -40,7 +40,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() remotes.append(remote) args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py index 5ffc04758d33..cf0e33ab8216 100644 --- a/qa/tasks/workunit.py +++ b/qa/tasks/workunit.py @@ -102,7 +102,7 @@ def task(ctx, config): # Create scratch dirs for any non-all workunits log.info('Making a separate scratch dir for every client...') - for role in clients.iterkeys(): + for role in clients.keys(): assert isinstance(role, basestring) if role == "all": continue diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index 7bc8c923920f..cca170ac4edc 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -307,7 +307,7 @@ def test_all(): print 'testing {m}/{c}'.format(m=module,c=cmd_cmd) # test - for good_bad in perms.iterkeys(): + for good_bad in perms.keys(): for (kind,lst) in perms[good_bad].iteritems(): for (perm,_) in lst: cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm) -- 2.47.3