From: Kyr Shatskyy Date: Fri, 11 Oct 2019 15:57:47 +0000 (+0200) Subject: qa: get rid of iterkeys for py3 compatibility X-Git-Tag: v13.2.9~13^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=99bd80bad8c1a098919acf6464bf2acc9998d548;p=ceph.git qa: get rid of iterkeys for py3 compatibility Fixes: https://tracker.ceph.com/issues/42287 Signed-off-by: Kyr Shatskyy (cherry picked from commit 5f95b532aa26249cecea4586166399a0bf10ad47) Ensured all iterkeys -> keys under qa/tasks Removed ceph_crash / install_packages methods by PR comments --- diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py index 3301372756be2..ed134e7d2f6d1 100644 --- a/qa/tasks/admin_socket.py +++ b/qa/tasks/admin_socket.py @@ -124,7 +124,7 @@ def _run_tests(ctx, client, tests): """ testdir = teuthology.get_testdir(ctx) log.debug('Running admin socket tests on %s', client) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py index efa972123d250..1735f677380db 100644 --- a/qa/tasks/autotest.py +++ b/qa/tasks/autotest.py @@ -42,17 +42,17 @@ def task(ctx, config): log.info('Setting up autotest...') testdir = teuthology.get_testdir(ctx) with parallel() as p: - for role in config.iterkeys(): + for role in config.keys(): (remote,) = ctx.cluster.only(role).remotes.keys() p.spawn(_download, testdir, remote) log.info('Making a separate scratch dir for every client...') - for role in config.iterkeys(): + for role in config.keys(): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) remote.run( diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index aaba777121045..57ed91f559c52 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -166,7 +166,7 @@ def ceph_log(ctx, config): conf += f.read().format(daemon_type=daemon, max_size=size) f.seek(0, 0) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): teuthology.write_file(remote=remote, path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir), data=StringIO(conf) @@ -237,8 +237,11 @@ def ceph_log(ctx, config): log.info('Archiving logs...') path = os.path.join(ctx.archive, 'remote') - os.makedirs(path) - for remote in ctx.cluster.remotes.iterkeys(): + try: + os.makedirs(path) + except OSError as e: + pass + for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) os.makedirs(sub) teuthology.pull_directory(remote, '/var/log/ceph', @@ -271,7 +274,7 @@ def valgrind_post(ctx, config): finally: lookup_procs = list() log.info('Checking for errors in any valgrind logs...') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): # look at valgrind logs for each node proc = remote.run( args=[ @@ -321,7 +324,7 @@ def valgrind_post(ctx, config): def crush_setup(ctx, config): cluster_name = config['cluster'] first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() profile = config.get('crush_tunables', 'default') log.info('Setting crush tunables to %s', profile) @@ -335,7 +338,7 @@ def crush_setup(ctx, config): def create_rbd_pool(ctx, config): cluster_name = config['cluster'] first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() log.info('Waiting for OSDs to come up') teuthology.wait_until_osds_up( ctx, @@ -364,7 +367,7 @@ def cephfs_setup(ctx, config): coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) first_mon = teuthology.get_first_mon(ctx, config, cluster_name) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) # If there are any MDSs, then create a filesystem for them to use # Do this last because requires mon cluster to be up and running @@ -699,7 +702,7 @@ def cluster(ctx, config): path=monmap_path, ) - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): # copy mon key and initial monmap log.info('Sending monmap to node {remote}'.format(remote=rem)) teuthology.sudo_write_file( @@ -1840,7 +1843,7 @@ def task(ctx, config): with contextutil.nested(*subtasks): first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() if not hasattr(ctx, 'managers'): ctx.managers = {} ctx.managers[config['cluster']] = CephManager( diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py index 56868f16fa0a9..3d29a2ebe64b1 100644 --- a/qa/tasks/ceph_deploy.py +++ b/qa/tasks/ceph_deploy.py @@ -29,7 +29,7 @@ def download_ceph_deploy(ctx, config): obtained from `python_version`, if specified. """ # use mon.a for ceph_admin - (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() try: py_ver = str(config['python_version']) @@ -222,7 +222,7 @@ def build_ceph_cluster(ctx, config): # puts it. Remember this here, because subsequently IDs will change from those in # the test config to those that ceph-deploy invents. - (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() def execute_ceph_deploy(cmd): """Remotely execute a ceph_deploy command""" @@ -266,7 +266,7 @@ def build_ceph_cluster(ctx, config): def ceph_volume_osd_create(ctx, config): osds = ctx.cluster.only(teuthology.is_type('osd')) no_of_osds = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): # all devs should be lvm osd_create_cmd = './ceph-deploy osd create --debug ' + remote.shortname + ' ' # default is bluestore so we just need config item for filestore @@ -384,9 +384,8 @@ def build_ceph_cluster(ctx, config): # create-keys is explicit now # http://tracker.ceph.com/issues/16036 mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote in mons.remotes.iterkeys(): - remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph', - '--id', remote.shortname]) + for remote in mons.remotes.keys(): + execute_ceph_deploy('./ceph-deploy admin ' + remote.shortname) estatus_gather = execute_ceph_deploy(gather_keys) if estatus_gather != 0: @@ -572,7 +571,7 @@ def build_ceph_cluster(ctx, config): log.info('Archiving logs...') path = os.path.join(ctx.archive, 'remote') os.makedirs(path) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) os.makedirs(sub) teuthology.pull_directory(remote, '/var/log/ceph', @@ -797,7 +796,7 @@ def upgrade(ctx, config): ceph_branch = '--dev={branch}'.format(branch=dev_branch) # get the node used for initial deployment which is mon.a mon_a = mapped_role.get('mon.a') - (ceph_admin,) = ctx.cluster.only(mon_a).remotes.iterkeys() + (ceph_admin,) = ctx.cluster.only(mon_a).remotes.keys() testdir = teuthology.get_testdir(ctx) cmd = './ceph-deploy install ' + ceph_branch for role in roles: diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py index 912577317b402..f2ff86ef0020e 100644 --- a/qa/tasks/ceph_objectstore_tool.py +++ b/qa/tasks/ceph_objectstore_tool.py @@ -279,7 +279,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): prefix = ("sudo ceph-objectstore-tool " "--data-path {fpath} " "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): log.debug(remote) log.debug(osds.remotes[remote]) for role in osds.remotes[remote]: @@ -319,7 +319,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): GETNAME = os.path.join(DATADIR, "get") SETNAME = os.path.join(DATADIR, "set") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -411,7 +411,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): GETNAME = os.path.join(DATADIR, "get") SETNAME = os.path.join(DATADIR, "set") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -498,7 +498,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.error(values) log.info("Test pg info") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -523,7 +523,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): ERRORS += 1 log.info("Test pg logging") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -555,7 +555,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.info("Test pg export") EXP_ERRORS = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -582,7 +582,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): log.info("Test pg removal") RM_ERRORS = 0 - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue @@ -608,7 +608,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if EXP_ERRORS == 0 and RM_ERRORS == 0: log.info("Test pg import") - for remote in osds.remotes.iterkeys(): + for remote in osds.remotes.keys(): for role in osds.remotes[remote]: if string.find(role, "osd.") != 0: continue diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 8764753c21c41..c05e3a8807d7d 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -144,7 +144,7 @@ class CephCluster(object): @property def admin_remote(self): first_mon = misc.get_first_mon(self._ctx, None) - (result,) = self._ctx.cluster.only(first_mon).remotes.iterkeys() + (result,) = self._ctx.cluster.only(first_mon).remotes.keys() return result def __init__(self, ctx): diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py index 40f8253eec245..8f2f28cc64dff 100644 --- a/qa/tasks/cram.py +++ b/qa/tasks/cram.py @@ -61,7 +61,7 @@ def task(ctx, config): try: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( args=[ @@ -85,11 +85,11 @@ def task(ctx, config): ) with parallel() as p: - for role in clients.iterkeys(): + for role in clients.keys(): p.spawn(_run_tests, ctx, role) finally: for client, tests in clients.iteritems(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) @@ -128,7 +128,7 @@ def _run_tests(ctx, role): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ceph_ref = ctx.summary.get('ceph-sha1', 'master') testdir = teuthology.get_testdir(ctx) diff --git a/qa/tasks/die_on_err.py b/qa/tasks/die_on_err.py index bf422ae547d7a..ee157f4afe464 100644 --- a/qa/tasks/die_on_err.py +++ b/qa/tasks/die_on_err.py @@ -20,7 +20,7 @@ def task(ctx, config): config = {} first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -38,7 +38,7 @@ def task(ctx, config): while True: for i in range(num_osds): - (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() + (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.keys() p = osd_remote.run( args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], wait=True, diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py index 12ea93365bf39..7a4d1327020a8 100644 --- a/qa/tasks/divergent_priors.py +++ b/qa/tasks/divergent_priors.py @@ -61,7 +61,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py index 0ed753278b411..fa2fae9e7dc32 100644 --- a/qa/tasks/divergent_priors2.py +++ b/qa/tasks/divergent_priors2.py @@ -64,7 +64,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) @@ -146,7 +146,7 @@ def task(ctx, config): # Export a pg (exp_remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() FSPATH = manager.get_filepath() JPATH = os.path.join(FSPATH, "journal") prefix = ("sudo adjust-ulimits ceph-objectstore-tool " diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py index 39429d2c34819..ad4213aaed3b4 100644 --- a/qa/tasks/dump_stuck.py +++ b/qa/tasks/dump_stuck.py @@ -48,7 +48,7 @@ def task(ctx, config): timeout = 60 first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/ec_lost_unfound.py b/qa/tasks/ec_lost_unfound.py index cc0bdb25850d2..625812ee9270a 100644 --- a/qa/tasks/ec_lost_unfound.py +++ b/qa/tasks/ec_lost_unfound.py @@ -21,7 +21,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index e3c09d5ec158d..6e40e4044b036 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -47,7 +47,7 @@ def task(ctx, config): config = dict((id_, a) for id_ in roles) for role, ls in config.iteritems(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running commands on role %s host %s', role, remote.name) for c in ls: c.replace('$TESTDIR', testdir) diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py index e091148da43c8..32d9dae584d06 100644 --- a/qa/tasks/filestore_idempotent.py +++ b/qa/tasks/filestore_idempotent.py @@ -32,7 +32,7 @@ def task(ctx, config): # just use the first client... client = clients[0]; - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() testdir = teuthology.get_testdir(ctx) diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index 755df5da13968..ae24ee636dceb 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -32,7 +32,7 @@ def install_packages(ctx, config): 'rpm': [ 'libffi-devel', 'openssl-devel' ], } for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() for dep in deps[remote.os.package_type]: install_package(dep, remote) try: @@ -41,7 +41,7 @@ def install_packages(ctx, config): log.info('Removing packaged dependencies of Keystone...') for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() for dep in deps[remote.os.package_type]: remove_package(dep, remote) @@ -176,7 +176,7 @@ def run_keystone(ctx, config): log.info('Configuring keystone...') for (client, _) in config.items(): - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, _, client_id = teuthology.split_role(client) # start the public endpoint diff --git a/qa/tasks/lost_unfound.py b/qa/tasks/lost_unfound.py index 64872743b25f7..575c4c5844e63 100644 --- a/qa/tasks/lost_unfound.py +++ b/qa/tasks/lost_unfound.py @@ -22,7 +22,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py index 1ddcba5c8a90c..8bbf8418dc8ad 100644 --- a/qa/tasks/manypools.py +++ b/qa/tasks/manypools.py @@ -39,7 +39,7 @@ def task(ctx, config): log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) for role in client_roles: log.info('role={role_}'.format(role_=role)) - (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys() + (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.keys() creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) remaining_pools = poolnum diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py index aa2d6dbf2c062..2647eba761c87 100644 --- a/qa/tasks/mds_creation_failure.py +++ b/qa/tasks/mds_creation_failure.py @@ -23,7 +23,7 @@ def task(ctx, config): raise RuntimeError("This task requires exactly one MDS") mds_id = mdslist[0] - (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys() + (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.keys() manager = ceph_manager.CephManager( mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/qa/tasks/mds_thrash.py b/qa/tasks/mds_thrash.py index af5fe784261b2..da030fc8ffe68 100644 --- a/qa/tasks/mds_thrash.py +++ b/qa/tasks/mds_thrash.py @@ -255,7 +255,7 @@ class MDSThrasher(Greenlet): def kill_mds(self, mds): if self.config.get('powercycle'): (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). - remotes.iterkeys()) + remotes.keys()) self.log('kill_mds on mds.{m} doing powercycle of {s}'. format(m=mds, s=remote.name)) self._assert_ipmi(remote) @@ -276,7 +276,7 @@ class MDSThrasher(Greenlet): """ if self.config.get('powercycle'): (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). - remotes.iterkeys()) + remotes.keys()) self.log('revive_mds on mds.{m} doing powercycle of {s}'. format(m=mds, s=remote.name)) self._assert_ipmi(remote) @@ -494,7 +494,7 @@ def task(ctx, config): log.info('mds thrasher using random seed: {seed}'.format(seed=seed)) random.seed(seed) - (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys() + (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.keys() manager = ceph_manager.CephManager( first, ctx=ctx, logger=log.getChild('ceph_manager'), ) diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py index 547339f79a11b..5c4088c7369cd 100644 --- a/qa/tasks/mon_clock_skew_check.py +++ b/qa/tasks/mon_clock_skew_check.py @@ -50,7 +50,7 @@ def task(ctx, config): log.info('Beginning mon_clock_skew_check...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py index bfa2cdf78f15d..e09e9877b571f 100644 --- a/qa/tasks/mon_recovery.py +++ b/qa/tasks/mon_recovery.py @@ -17,7 +17,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/mon_seesaw.py b/qa/tasks/mon_seesaw.py index 1a70c1d47c582..f0fc7deaea819 100644 --- a/qa/tasks/mon_seesaw.py +++ b/qa/tasks/mon_seesaw.py @@ -145,7 +145,7 @@ def task(ctx, config): replacer the id of the new mon (use "${victim}_prime" if not specified) """ first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = CephManager(mon, ctx=ctx, logger=log.getChild('ceph_manager')) if config is None: diff --git a/qa/tasks/mon_thrash.py b/qa/tasks/mon_thrash.py index 0754bcdd4e97f..1bf981b71426b 100644 --- a/qa/tasks/mon_thrash.py +++ b/qa/tasks/mon_thrash.py @@ -324,7 +324,7 @@ def task(ctx, config): 'mon_thrash task requires at least 3 monitors' log.info('Beginning mon_thrash...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/qa/tasks/object_source_down.py b/qa/tasks/object_source_down.py index 9705d7c7375d6..82ce432631745 100644 --- a/qa/tasks/object_source_down.py +++ b/qa/tasks/object_source_down.py @@ -18,7 +18,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py index e026c74dbc0a2..a6372a3bda970 100644 --- a/qa/tasks/omapbench.py +++ b/qa/tasks/omapbench.py @@ -52,7 +52,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=[ "/bin/sh", "-c", diff --git a/qa/tasks/osd_backfill.py b/qa/tasks/osd_backfill.py index 04658d20569fc..5ad5b79983144 100644 --- a/qa/tasks/osd_backfill.py +++ b/qa/tasks/osd_backfill.py @@ -38,7 +38,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'thrashosds task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/osd_failsafe_enospc.py b/qa/tasks/osd_failsafe_enospc.py index 691085427393f..8d89919035dce 100644 --- a/qa/tasks/osd_failsafe_enospc.py +++ b/qa/tasks/osd_failsafe_enospc.py @@ -54,7 +54,7 @@ def task(ctx, config): log.info('1. Verify warning messages when exceeding nearfull_ratio') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() proc = mon.run( args=[ diff --git a/qa/tasks/osd_recovery.py b/qa/tasks/osd_recovery.py index 41e86d6555bf5..a01fe8fea6e89 100644 --- a/qa/tasks/osd_recovery.py +++ b/qa/tasks/osd_recovery.py @@ -38,7 +38,7 @@ def task(ctx, config): 'task only accepts a dict for configuration' testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) @@ -114,7 +114,7 @@ def test_incomplete_pgs(ctx, config): assert isinstance(config, dict), \ 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/peer.py b/qa/tasks/peer.py index 9850da18e55d2..7ec82f8f6a373 100644 --- a/qa/tasks/peer.py +++ b/qa/tasks/peer.py @@ -20,7 +20,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'peer task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/populate_rbd_pool.py b/qa/tasks/populate_rbd_pool.py index db67d607cd64c..76395eb688f16 100644 --- a/qa/tasks/populate_rbd_pool.py +++ b/qa/tasks/populate_rbd_pool.py @@ -34,7 +34,7 @@ def task(ctx, config): write_threads = config.get("write_threads", 10) write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() for poolid in range(num_pools): poolname = "%s-%s" % (pool_prefix, str(poolid)) diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index b2bca00d8d1b2..b507b76229e66 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -203,7 +203,7 @@ def generate_iso(ctx, config): try: yield finally: - for client in config.iterkeys(): + for client in config.keys(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -253,7 +253,7 @@ def download_image(ctx, config): yield finally: log.debug('cleaning up base image files') - for client in config.iterkeys(): + for client in config.keys(): base_file = '{tdir}/qemu/base.{client}.qcow2'.format( tdir=testdir, client=client, @@ -430,7 +430,7 @@ def run_qemu(ctx, config): time.sleep(time_wait) log.debug('checking that qemu tests succeeded...') - for client in config.iterkeys(): + for client in config.keys(): (remote,) = ctx.cluster.only(client).remotes.keys() # ensure we have permissions to all the logs diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 26a84d734af1b..d4872fb11570a 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -246,7 +246,7 @@ def task(ctx, config): manager.raw_cluster_cmd( 'osd', 'pool', 'set', pool, 'min_size', str(min_size)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + ["--pool", pool], diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py index dd1f85dee7af0..0ae705134913c 100644 --- a/qa/tasks/radosbench.py +++ b/qa/tasks/radosbench.py @@ -56,7 +56,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() if config.get('ec_pool', False): profile = config.get('erasure_code_profile', {}) diff --git a/qa/tasks/radosbenchsweep.py b/qa/tasks/radosbenchsweep.py index cda106ac66a9c..f008dee60ab31 100644 --- a/qa/tasks/radosbenchsweep.py +++ b/qa/tasks/radosbenchsweep.py @@ -171,7 +171,7 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() proc = remote.run( args=[ @@ -217,5 +217,5 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep): def wait_until_healthy(ctx, config): first_mon = teuthology.get_first_mon(ctx, config) - (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() teuthology.wait_until_healthy(ctx, mon_remote) diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py index 3a35f0429328e..f7396ef866cc0 100644 --- a/qa/tasks/radosgw_admin_rest.py +++ b/qa/tasks/radosgw_admin_rest.py @@ -43,7 +43,7 @@ def rgwadmin(ctx, client, cmd): '--format', 'json', ] pre.extend(cmd) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() proc = remote.run( args=pre, check_status=False, @@ -207,7 +207,7 @@ def task(ctx, config): logging.error(err) assert not err - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() remote_host = remote.name.split('@')[1] admin_conn = boto.s3.connection.S3Connection( aws_access_key_id=admin_access_key, @@ -694,4 +694,3 @@ def task(ctx, config): # TESTCASE 'rm-user3','user','info','deleted user','fails' (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) assert ret == 404 - diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index b3c69f7b261f7..a3a7d3b0af4d4 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -186,7 +186,7 @@ def configure(ctx, config, run_stages): ragweed_conf = config['ragweed_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: @@ -322,7 +322,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('ragweed', {})) diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py index d32475ecd03c9..12e50d98b05b3 100644 --- a/qa/tasks/rbd_fsx.py +++ b/qa/tasks/rbd_fsx.py @@ -47,7 +47,7 @@ def _run_one_client(ctx, config, role): krbd = config.get('krbd', False) nbd = config.get('nbd', False) testdir = teuthology.get_testdir(ctx) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() args = [] if krbd or nbd: diff --git a/qa/tasks/rebuild_mondb.py b/qa/tasks/rebuild_mondb.py index 739627b33aae1..e2ee017eb308a 100644 --- a/qa/tasks/rebuild_mondb.py +++ b/qa/tasks/rebuild_mondb.py @@ -197,7 +197,11 @@ def task(ctx, config): 'task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + # stash a monmap for later + mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap']) + manager = ceph_manager.CephManager( mon, ctx=ctx, diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py index f248623844954..7bb304608b0b5 100644 --- a/qa/tasks/reg11184.py +++ b/qa/tasks/reg11184.py @@ -76,7 +76,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # write 100 objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) @@ -164,7 +164,7 @@ def task(ctx, config): # Export a pg (exp_remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() FSPATH = manager.get_filepath() JPATH = os.path.join(FSPATH, "journal") prefix = ("sudo adjust-ulimits ceph-objectstore-tool " @@ -235,7 +235,7 @@ def task(ctx, config): assert exit_status is 0 (remote,) = ctx.\ - cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys() + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() cmd = 'rm {file}'.format(file=expfile) remote.run(args=cmd, wait=True) log.info("success") diff --git a/qa/tasks/rep_lost_unfound_delete.py b/qa/tasks/rep_lost_unfound_delete.py index d24360083861f..d98517691be66 100644 --- a/qa/tasks/rep_lost_unfound_delete.py +++ b/qa/tasks/rep_lost_unfound_delete.py @@ -22,7 +22,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'lost_unfound task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py index 8ad4d02be2dd1..973273bbeacf9 100644 --- a/qa/tasks/repair_test.py +++ b/qa/tasks/repair_test.py @@ -124,7 +124,7 @@ def repair_test_2(ctx, manager, config, chooser): log.info("starting repair test type 2") victim_osd = chooser(manager, pool, 0) first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() # create object log.info("doing put and setomapval") diff --git a/qa/tasks/resolve_stuck_peering.py b/qa/tasks/resolve_stuck_peering.py index bdf86e9242e49..9122cb6676142 100644 --- a/qa/tasks/resolve_stuck_peering.py +++ b/qa/tasks/resolve_stuck_peering.py @@ -51,7 +51,7 @@ def task(ctx, config): log.info('writing initial objects') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() #create few objects for i in range(100): rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py index 697345a975b07..fc38af1d7ab38 100644 --- a/qa/tasks/restart.py +++ b/qa/tasks/restart.py @@ -98,7 +98,7 @@ def task(ctx, config): assert 'exec' in config, "config requires exec key with : entries" for role, task in config['exec'].iteritems(): log.info('restart for role {r}'.format(r=role)) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() srcdir, restarts = get_tests(ctx, config, role, remote, testdir) log.info('Running command on role %s host %s', role, remote.name) spec = '{spec}'.format(spec=task[0]) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index 0b493411546b3..24652146965b0 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -41,7 +41,7 @@ def start_rgw(ctx, config, clients): log.info('Starting rgw...') testdir = teuthology.get_testdir(ctx) for client in clients: - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() cluster_name, daemon_type, client_id = teuthology.split_role(client) client_with_id = daemon_type + '.' + client_id client_with_cluster = cluster_name + '.' + client_with_id @@ -147,7 +147,7 @@ def start_rgw(ctx, config, clients): endpoint = ctx.rgw.role_endpoints[client] url = endpoint.url() log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url)) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() wait_for_radosgw(url, remote) try: @@ -207,7 +207,7 @@ def create_pools(ctx, clients): log.info('Creating data pools') for client in clients: log.debug("Obtaining remote for client {}".format(client)) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() data_pool = 'default.rgw.buckets.data' cluster_name, daemon_type, client_id = teuthology.split_role(client) diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py index 1a6b2b6d56bb4..6c64a9ce9736d 100644 --- a/qa/tasks/s3readwrite.py +++ b/qa/tasks/s3readwrite.py @@ -171,7 +171,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: @@ -310,7 +310,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('s3readwrite', {})) diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py index 9ed3ed44b0c6c..d71a83c07da4d 100644 --- a/qa/tasks/s3roundtrip.py +++ b/qa/tasks/s3roundtrip.py @@ -151,7 +151,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 305de1e6e15e8..c2844394b51f5 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -172,7 +172,7 @@ def configure(ctx, config): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: @@ -374,7 +374,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('s3tests', {})) diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py index 8272e8b9539c4..319c6d5e5bf2e 100644 --- a/qa/tasks/samba.py +++ b/qa/tasks/samba.py @@ -26,7 +26,7 @@ def get_sambas(ctx, roles): PREFIX = 'samba.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() yield (id_, remote) diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py index 9800d1e98a55d..cae0e7a8e1889 100644 --- a/qa/tasks/scrub.py +++ b/qa/tasks/scrub.py @@ -39,7 +39,7 @@ def task(ctx, config): log.info('Beginning scrub...') first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() manager = ceph_manager.CephManager( mon, diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py index 377158b7e21e2..e6532578c7084 100644 --- a/qa/tasks/scrub_test.py +++ b/qa/tasks/scrub_test.py @@ -31,7 +31,7 @@ def wait_for_victim_pg(manager): def find_victim_object(ctx, pg, osd): """Return a file to be fuzzed""" - (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() + (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys() data_path = os.path.join( '/var/lib/ceph/osd', 'ceph-{id}'.format(id=osd), @@ -359,7 +359,7 @@ def task(ctx, config): assert isinstance(config, dict), \ 'scrub_test task only accepts a dict for configuration' first_mon = teuthology.get_first_mon(ctx, config) - (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + (mon,) = ctx.cluster.only(first_mon).remotes.keys() num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') log.info('num_osds is %s' % num_osds) diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index 1966ced3407b0..08b1464d9e1fd 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -96,7 +96,7 @@ def create_users(ctx, config): try: yield finally: - for client in config['clients']: + for client in config.keys(): for user in users.itervalues(): uid = '{user}.{client}'.format(user=user, client=client) cluster_name, daemon_type, client_id = teuthology.split_role(client) @@ -128,7 +128,7 @@ def configure(ctx, config): testswift_conf = config['testswift_conf'][client] if properties is not None and 'rgw_server' in properties: host = None - for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']): + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): log.info('roles: ' + str(roles)) log.info('target: ' + str(target)) if properties['rgw_server'] in roles: diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py index 50471db31ea80..09322c960fee5 100644 --- a/qa/tasks/systemd.py +++ b/qa/tasks/systemd.py @@ -137,6 +137,6 @@ def task(ctx, config): 'grep', 'ceph']) # wait for HEALTH_OK mon = get_first_mon(ctx, config) - (mon_remote,) = ctx.cluster.only(mon).remotes.iterkeys() + (mon_remote,) = ctx.cluster.only(mon).remotes.keys() wait_until_healthy(ctx, mon_remote, use_sudo=True) yield diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index 14cd2ed4a90b1..6d4a38ad0baec 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -242,7 +242,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for client in config.iterkeys(): + for client in config.keys(): if not config[client]: config[client] = {} teuthology.deep_merge(config[client], overrides.get('keystone', {})) diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py index ee79208cc5bd7..d3abf1ced3b97 100644 --- a/qa/tasks/util/rgw.py +++ b/qa/tasks/util/rgw.py @@ -31,7 +31,7 @@ def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False, ] pre.extend(cmd) log.log(log_level, 'rgwadmin: cmd=%s' % pre) - (remote,) = ctx.cluster.only(client).remotes.iterkeys() + (remote,) = ctx.cluster.only(client).remotes.keys() proc = remote.run( args=pre, check_status=check_status, diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py index c5314330ffbfe..91b0e0f8e19f0 100644 --- a/qa/tasks/util/workunit.py +++ b/qa/tasks/util/workunit.py @@ -63,8 +63,8 @@ def get_refspec_after_overrides(config, overrides): overrides = copy.deepcopy(overrides.get('workunit', {})) refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch, 'sha1': Refspec, 'tag': Refspec, 'branch': Branch} - if any(map(lambda i: i in config, refspecs.iterkeys())): - for i in refspecs.iterkeys(): + if any(map(lambda i: i in config, refspecs.keys())): + for i in refspecs.keys(): overrides.pop(i, None) misc.deep_merge(config, overrides) diff --git a/qa/tasks/watch_notify_same_primary.py b/qa/tasks/watch_notify_same_primary.py index 8f6d33b89af38..b462e5e4d76ed 100644 --- a/qa/tasks/watch_notify_same_primary.py +++ b/qa/tasks/watch_notify_same_primary.py @@ -44,7 +44,7 @@ def task(ctx, config): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() manager = ctx.managers['ceph'] manager.raw_cluster_cmd('osd', 'set', 'noout') diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py index 6db313fea6de6..e54d4b6e07249 100644 --- a/qa/tasks/watch_notify_stress.py +++ b/qa/tasks/watch_notify_stress.py @@ -40,7 +40,7 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() remotes.append(remote) args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py index 5ffc04758d33b..cf0e33ab82167 100644 --- a/qa/tasks/workunit.py +++ b/qa/tasks/workunit.py @@ -102,7 +102,7 @@ def task(ctx, config): # Create scratch dirs for any non-all workunits log.info('Making a separate scratch dir for every client...') - for role in clients.iterkeys(): + for role in clients.keys(): assert isinstance(role, basestring) if role == "all": continue diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index 7bc8c923920fc..cca170ac4edce 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -307,7 +307,7 @@ def test_all(): print 'testing {m}/{c}'.format(m=module,c=cmd_cmd) # test - for good_bad in perms.iterkeys(): + for good_bad in perms.keys(): for (kind,lst) in perms[good_bad].iteritems(): for (perm,_) in lst: cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm)