From: Kyr Shatskyy Date: Wed, 9 Oct 2019 12:36:58 +0000 (+0200) Subject: qa: get rid of iteritems for python3 compatibility X-Git-Tag: v14.2.10~17^2~83 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=a1b8f43195e6fe2148d8ccde4f91ee0db4485f77;p=ceph.git qa: get rid of iteritems for python3 compatibility Fixes: https://tracker.ceph.com/issues/42267 Signed-off-by: Kyr Shatskyy (cherry picked from commit 50b1823faca0e73fe81a3c2aca12dae3e250abb6) Conflicts: qa/tasks/barbican.py qa/tasks/ragweed.py qa/tasks/s3roundtrip.py qa/tasks/s3tests.py qa/tasks/s3tests_java.py qa/tasks/vault.py: trivial resolution --- diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py index c54eb7a72b46..c454d3d0c03e 100644 --- a/qa/tasks/admin_socket.py +++ b/qa/tasks/admin_socket.py @@ -68,7 +68,7 @@ def task(ctx, config): teuthology.replace_all_with_clients(ctx.cluster, config) with parallel() as ptask: - for client, tests in config.iteritems(): + for client, tests in config.items(): ptask.spawn(_run_tests, ctx, client, tests) @@ -140,7 +140,7 @@ def _run_tests(ctx, client, tests): ], ) - for command, config in tests.iteritems(): + for command, config in tests.items(): if config is None: config = {} teuthology.deep_merge(config, overrides) diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py index 2f96373d0a9f..a78987dca70e 100644 --- a/qa/tasks/autotest.py +++ b/qa/tasks/autotest.py @@ -70,7 +70,7 @@ def task(ctx, config): ) with parallel() as p: - for role, tests in config.iteritems(): + for role, tests in config.items(): (remote,) = ctx.cluster.only(role).remotes.keys() p.spawn(_run_tests, testdir, remote, role, tests) diff --git a/qa/tasks/blktrace.py b/qa/tasks/blktrace.py index 96aaf505ada5..10b1da0c0bb0 100644 --- a/qa/tasks/blktrace.py +++ b/qa/tasks/blktrace.py @@ -20,7 +20,7 @@ def setup(ctx, config): osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster'])) log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) - for remote, roles_for_host in osds.remotes.iteritems(): + for remote, roles_for_host in osds.remotes.items(): log.info('Creating %s on %s' % (log_dir, remote.name)) remote.run( args=['mkdir', '-p', '-m0755', '--', log_dir], @@ -38,7 +38,7 @@ def execute(ctx, config): log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles_for_host in osds.remotes.iteritems(): + for remote, roles_for_host in osds.remotes.items(): roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', config['cluster']): diff --git a/qa/tasks/ceph_client.py b/qa/tasks/ceph_client.py index 3ca90b7d1988..74e818f932be 100644 --- a/qa/tasks/ceph_client.py +++ b/qa/tasks/ceph_client.py @@ -16,7 +16,7 @@ def create_keyring(ctx, cluster_name): clients = ctx.cluster.only(teuthology.is_type('client', cluster_name)) testdir = teuthology.get_testdir(ctx) coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - for remote, roles_for_host in clients.remotes.iteritems(): + for remote, roles_for_host in clients.remotes.items(): for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name): name = teuthology.ceph_role(role) diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py index 42f868d8b889..de45fff99513 100644 --- a/qa/tasks/ceph_deploy.py +++ b/qa/tasks/ceph_deploy.py @@ -144,7 +144,7 @@ def get_nodes_using_role(ctx, target_role): # Prepare a modified version of cluster.remotes with ceph-deploy-ized names modified_remotes = {} ceph_deploy_mapped = dict() - for _remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for _remote, roles_for_host in ctx.cluster.remotes.items(): modified_remotes[_remote] = [] for svc_id in roles_for_host: if svc_id.startswith("{0}.".format(target_role)): @@ -176,7 +176,7 @@ def get_nodes_using_role(ctx, target_role): def get_dev_for_osd(ctx, config): """Get a list of all osd device names.""" osd_devs = [] - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): host = remote.name.split('@')[-1] shortname = host.split('.')[0] devs = teuthology.get_scratch_devices(remote) @@ -204,7 +204,7 @@ def get_dev_for_osd(ctx, config): def get_all_nodes(ctx, config): """Return a string of node names separated by blanks""" nodelist = [] - for t, k in ctx.config['targets'].iteritems(): + for t, k in ctx.config['targets'].items(): host = t.split('@')[-1] simple_host = host.split('.')[0] nodelist.append(simple_host) @@ -309,7 +309,7 @@ def build_ceph_cluster(ctx, config): ceph_branch = None if config.get('branch') is not None: cbranch = config.get('branch') - for var, val in cbranch.iteritems(): + for var, val in cbranch.items(): ceph_branch = '--{var}={val}'.format(var=var, val=val) all_nodes = get_all_nodes(ctx, config) mds_nodes = get_nodes_using_role(ctx, 'mds') @@ -344,11 +344,11 @@ def build_ceph_cluster(ctx, config): if config.get('conf') is not None: confp = config.get('conf') - for section, keys in confp.iteritems(): + for section, keys in confp.items(): lines = '[{section}]\n'.format(section=section) teuthology.append_lines_to_file(ceph_admin, conf_path, lines, sudo=True) - for key, value in keys.iteritems(): + for key, value in keys.items(): log.info("[%s] %s = %s" % (section, key, value)) lines = '{key} = {value}\n'.format(key=key, value=value) teuthology.append_lines_to_file( @@ -432,7 +432,7 @@ def build_ceph_cluster(ctx, config): ) clients = ctx.cluster.only(teuthology.is_type('client')) - for remot, roles_for_host in clients.remotes.iteritems(): + for remot, roles_for_host in clients.remotes.items(): for id_ in teuthology.roles_of_type(roles_for_host, 'client'): client_keyring = \ '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) @@ -526,7 +526,7 @@ def build_ceph_cluster(ctx, config): path = os.path.join(ctx.archive, 'data') os.makedirs(path) mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if role.startswith('mon.'): teuthology.pull_directory_tarball( @@ -614,7 +614,7 @@ def cli_test(ctx, config): branch = ctx.config.get('branch') test_branch = ' --dev={branch} '.format(branch=branch) mons = ctx.cluster.only(teuthology.is_type('mon')) - for node, role in mons.remotes.iteritems(): + for node, role in mons.remotes.items(): admin = node admin.run(args=['mkdir', conf_dir], check_status=False) nodename = admin.shortname @@ -624,7 +624,7 @@ def cli_test(ctx, config): log.info('system type is %s', system_type) osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles in osds.remotes.iteritems(): + for remote, roles in osds.remotes.items(): devs = teuthology.get_scratch_devices(remote) log.info("roles %s", roles) if (len(devs) < 3): @@ -638,11 +638,11 @@ def cli_test(ctx, config): execute_cdeploy(admin, new_cmd, path) if config.get('conf') is not None: confp = config.get('conf') - for section, keys in confp.iteritems(): + for section, keys in confp.items(): lines = '[{section}]\n'.format(section=section) teuthology.append_lines_to_file(admin, conf_path, lines, sudo=True) - for key, value in keys.iteritems(): + for key, value in keys.items(): log.info("[%s] %s = %s" % (section, key, value)) lines = '{key} = {value}\n'.format(key=key, value=value) teuthology.append_lines_to_file(admin, conf_path, lines, @@ -789,7 +789,7 @@ def upgrade(ctx, config): if mapped_role.get(role): role = mapped_role.get(role) remotes_and_roles = ctx.cluster.only(role).remotes - for remote, roles in remotes_and_roles.iteritems(): + for remote, roles in remotes_and_roles.items(): nodename = remote.shortname cmd = cmd + ' ' + nodename log.info("Upgrading ceph on %s", nodename) @@ -813,7 +813,7 @@ def upgrade(ctx, config): # write the correct mgr key to disk if config.get('setup-mgr-node', None): mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): remote.run( args=[ run.Raw('sudo ceph auth get client.bootstrap-mgr'), diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index a7267acfe691..a2479c6f997b 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1223,7 +1223,7 @@ class CephManager: return if no_wait is None: no_wait = [] - for osd, need in seq.iteritems(): + for osd, need in seq.items(): if osd in no_wait: continue got = 0 @@ -1529,7 +1529,7 @@ class CephManager: :param osdnum: osd number :param argdict: dictionary containing values to set. """ - for k, v in argdict.iteritems(): + for k, v in argdict.items(): self.wait_run_admin_socket( 'osd', osdnum, ['config', 'set', str(k), str(v)]) diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py index e6c95ed1c436..9598e9073dd1 100644 --- a/qa/tasks/ceph_objectstore_tool.py +++ b/qa/tasks/ceph_objectstore_tool.py @@ -331,7 +331,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if osdid not in pgs: continue - for pg, JSON in db[basename]["pg2json"].iteritems(): + for pg, JSON in db[basename]["pg2json"].items(): if pg in pgs[osdid]: cmd = ((prefix + "--pgid {pg}"). format(id=osdid, pg=pg).split()) @@ -421,7 +421,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if osdid not in pgs: continue - for pg, JSON in db[basename]["pg2json"].iteritems(): + for pg, JSON in db[basename]["pg2json"].items(): if pg in pgs[osdid]: cmd = ((prefix + "--pgid {pg}"). format(id=osdid, pg=pg).split()) diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py index 41a087abd849..e2506436b745 100644 --- a/qa/tasks/ceph_test_case.py +++ b/qa/tasks/ceph_test_case.py @@ -105,7 +105,7 @@ class CephTestCase(unittest.TestCase): def seen_health_warning(): health = self.ceph_cluster.mon_manager.get_mon_health() codes = [s for s in health['checks']] - summary_strings = [s[1]['summary']['message'] for s in health['checks'].iteritems()] + summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()] if len(summary_strings) == 0: log.debug("Not expected number of summary strings ({0})".format(summary_strings)) return False diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index 20a079d72d7d..6eb31b244df4 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -248,7 +248,7 @@ class TestForwardScrub(CephFSTestCase): "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0) # Revert to old inotable. - for key, value in inotable_copy.iteritems(): + for key, value in inotable_copy.items(): self.fs.put_metadata_object_raw(key, value) self.mds_cluster.mds_restart() diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index c16851719c4d..2016c58bc850 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -31,7 +31,7 @@ class TestSessionMap(CephFSTestCase): def _get_connection_count(self, status=None): perf = self.fs.rank_asok(["perf", "dump"], status=status) conn = 0 - for module, dump in perf.iteritems(): + for module, dump in perf.items(): if "AsyncMessenger::Worker" in module: conn += dump['msgr_active_connections'] return conn diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index f627c4932a75..88a23a0ae7e4 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -242,7 +242,7 @@ class TestSnapshots(CephFSTestCase): def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0): if cache_dump is None: cache_dump = self._get_snapclient_dump(rank=rank) - for key, value in cache_dump.iteritems(): + for key, value in cache_dump.items(): if value != snaps_dump[key]: return False return True; diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py index 85fbb9e66b2c..d06f09440a6a 100644 --- a/qa/tasks/cram.py +++ b/qa/tasks/cram.py @@ -62,7 +62,7 @@ def task(ctx, config): log.info('Pulling tests from %s ref %s', git_url, refspec) try: - for client, tests in clients.iteritems(): + for client, tests in clients.items(): (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( @@ -90,7 +90,7 @@ def task(ctx, config): for role in clients.keys(): p.spawn(_run_tests, ctx, role) finally: - for client, tests in clients.iteritems(): + for client, tests in clients.items(): (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) diff --git a/qa/tasks/dnsmasq.py b/qa/tasks/dnsmasq.py index 7a100c5a3886..717c9f01930a 100644 --- a/qa/tasks/dnsmasq.py +++ b/qa/tasks/dnsmasq.py @@ -67,7 +67,7 @@ def setup_dnsmasq(remote, testdir, cnames): # add address entries for each cname dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n" address_template = "address=/{cname}/{ip_address}\n" - for cname, ip_address in cnames.iteritems(): + for cname, ip_address in cnames.items(): dnsmasq += address_template.format(cname=cname, ip_address=ip_address) # write to temporary dnsmasq file @@ -129,7 +129,7 @@ def task(ctx, config): # multiple roles may map to the same remote, so collect names by remote remote_names = {} - for role, cnames in config.iteritems(): + for role, cnames in config.items(): remote = get_remote_for_role(ctx, role) if remote is None: raise ConfigError('no remote for role %s' % role) @@ -144,7 +144,7 @@ def task(ctx, config): names[cname] = remote.ip_address elif isinstance(cnames, dict): # when given a dict, look up the remote ip for each - for cname, client in cnames.iteritems(): + for cname, client in cnames.items(): r = get_remote_for_role(ctx, client) if r is None: raise ConfigError('no remote for role %s' % client) @@ -160,7 +160,7 @@ def task(ctx, config): # run subtasks for each unique remote subtasks = [] - for remote, cnames in remote_names.iteritems(): + for remote, cnames in remote_names.items(): subtasks.extend([ lambda r=remote: install_dnsmasq(r) ]) subtasks.extend([ lambda r=remote: backup_resolv(r, resolv_bak) ]) subtasks.extend([ lambda r=remote: replace_resolv(r, resolv_tmp) ]) diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index 6431197e1066..a7c7ee5dae92 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -45,7 +45,7 @@ def task(ctx, config): roles = teuthology.all_roles(ctx.cluster) config = dict((id_, a) for id_ in roles) - for role, ls in config.iteritems(): + for role, ls in config.items(): (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running commands on role %s host %s', role, remote.name) for c in ls: diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index 250c297298d7..d5859768dabc 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -327,7 +327,7 @@ def assign_ports(ctx, config, initial_port): """ port = initial_port role_endpoints = {} - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): for role in roles_for_host: if role in config: role_endpoints[role] = (remote.name.split('@')[1], port) diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index bad0d8f4dc48..e455fedf63de 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -24,7 +24,7 @@ DEFAULT_CPUS = 1 DEFAULT_MEM = 4096 # in megabytes def create_images(ctx, config, managers): - for client, client_config in config.iteritems(): + for client, client_config in config.items(): disks = client_config.get('disks', DEFAULT_NUM_DISKS) if not isinstance(disks, list): disks = [{} for n in range(int(disks))] @@ -46,7 +46,7 @@ def create_images(ctx, config, managers): ) def create_clones(ctx, config, managers): - for client, client_config in config.iteritems(): + for client, client_config in config.items(): clone = client_config.get('clone', False) if clone: num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) @@ -72,7 +72,7 @@ def create_dirs(ctx, config): Handle directory creation and cleanup """ testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( @@ -85,7 +85,7 @@ def create_dirs(ctx, config): try: yield finally: - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( @@ -107,7 +107,7 @@ def generate_iso(ctx, config): git_url = teuth_config.get_ceph_qa_suite_git_url() log.info('Pulling tests from %s ref %s', git_url, refspec) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' test = client_config['test'] @@ -219,7 +219,7 @@ def download_image(ctx, config): """Downland base image, remove image file when done""" log.info('downloading base image') testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) image_url = client_config.get('image_url', DEFAULT_IMAGE_URL) @@ -342,7 +342,7 @@ def run_qemu(ctx, config): """Setup kvm environment and start qemu""" procs = [] testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) remote.run( diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 121d06ea288b..595c057fd0a5 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -202,7 +202,7 @@ def task(ctx, config): weights['append'] = weights['append'] / 2 weights['append_excl'] = weights['append'] - for op, weight in weights.iteritems(): + for op, weight in weights.items(): args.extend([ '--op', op, str(weight) ]) diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index 5806ed0c94e0..50ead75a8c66 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -112,7 +112,7 @@ def create_users(ctx, config, run_stages): """ assert isinstance(config, dict) - for client, properties in config['config'].iteritems(): + for client, properties in config['config'].items(): run_stages[client] = string.split(properties.get('stages', 'prepare,check'), ',') log.info('Creating rgw users...') @@ -126,7 +126,7 @@ def create_users(ctx, config, run_stages): ragweed_conf = config['ragweed_conf'][client] ragweed_conf.setdefault('fixtures', {}) ragweed_conf['rgw'].setdefault('bucket_prefix', 'test-' + client) - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(ragweed_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('Creating user {user} on {host}'.format(user=ragweed_conf[section]['user_id'], host=client)) if user == 'sysuser': @@ -181,7 +181,7 @@ def configure(ctx, config, run_stages): assert isinstance(config, dict) log.info('Configuring ragweed...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -240,7 +240,7 @@ def configure(ctx, config, run_stages): finally: log.info('Cleaning up boto...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -260,7 +260,7 @@ def run_tests(ctx, config, run_stages): assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) attrs = ["!fails_on_rgw"] - for client, client_config in config.iteritems(): + for client, client_config in config.items(): stages = string.join(run_stages[client], ',') args = [ 'RAGWEED_CONF={tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client), diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py index d43672b97144..8e635e1cbb31 100644 --- a/qa/tasks/rbd.py +++ b/qa/tasks/rbd.py @@ -607,7 +607,7 @@ def task(ctx, config): norm_config = teuthology.replace_all_with_clients(ctx.cluster, config) if isinstance(norm_config, dict): role_images = {} - for role, properties in norm_config.iteritems(): + for role, properties in norm_config.items(): if properties is None: properties = {} role_images[role] = properties.get('image_name') diff --git a/qa/tasks/rbd_fio.py b/qa/tasks/rbd_fio.py index 82340b4d6fd2..20f3579bec07 100644 --- a/qa/tasks/rbd_fio.py +++ b/qa/tasks/rbd_fio.py @@ -51,7 +51,7 @@ or client_config = config['all'] clients = ctx.cluster.only(teuthology.is_type('client')) rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test" - for remote,role in clients.remotes.iteritems(): + for remote,role in clients.remotes.items(): if 'client_config' in locals(): with parallel() as p: p.spawn(run_fio, remote, client_config, rbd_test_dir) diff --git a/qa/tasks/rebuild_mondb.py b/qa/tasks/rebuild_mondb.py index d1b946b51ddc..008e312e2d3d 100644 --- a/qa/tasks/rebuild_mondb.py +++ b/qa/tasks/rebuild_mondb.py @@ -49,7 +49,7 @@ def _push_directory(path, remote, remote_dir): def _nuke_mons(manager, mons, mon_id): assert mons is_mon = teuthology.is_type('mon') - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if not is_mon(role): continue @@ -77,7 +77,7 @@ def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path): is_osd = teuthology.is_type('osd') osds = ctx.cluster.only(is_osd) assert osds - for osd, roles in osds.remotes.iteritems(): + for osd, roles in osds.remotes.items(): for role in roles: if not is_osd(role): continue @@ -142,7 +142,7 @@ def _revive_mons(manager, mons, recovered, keyring_path): # the initial monmap is in the ceph.conf, so we are good. n_mons = 0 is_mon = teuthology.is_type('mon') - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if not is_mon(role): continue @@ -169,7 +169,7 @@ def _revive_mons(manager, mons, recovered, keyring_path): def _revive_mgrs(ctx, manager): is_mgr = teuthology.is_type('mgr') mgrs = ctx.cluster.only(is_mgr) - for _, roles in mgrs.remotes.iteritems(): + for _, roles in mgrs.remotes.items(): for role in roles: if not is_mgr(role): continue @@ -181,7 +181,7 @@ def _revive_mgrs(ctx, manager): def _revive_osds(ctx, manager): is_osd = teuthology.is_type('osd') osds = ctx.cluster.only(is_osd) - for _, roles in osds.remotes.iteritems(): + for _, roles in osds.remotes.items(): for role in roles: if not is_osd(role): continue diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py index fc38af1d7ab3..52b685c9e360 100644 --- a/qa/tasks/restart.py +++ b/qa/tasks/restart.py @@ -96,7 +96,7 @@ def task(ctx, config): try: assert 'exec' in config, "config requires exec key with : entries" - for role, task in config['exec'].iteritems(): + for role, task in config['exec'].items(): log.info('restart for role {r}'.format(r=role)) (remote,) = ctx.cluster.only(role).remotes.keys() srcdir, restarts = get_tests(ctx, config, role, remote, testdir) @@ -113,7 +113,7 @@ def task(ctx, config): ] env = config.get('env') if env is not None: - for var, val in env.iteritems(): + for var, val in env.items(): quoted_val = pipes.quote(val) env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index bda1b0a8cfef..6056d2200665 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -165,7 +165,7 @@ def start_rgw(ctx, config, clients): def assign_endpoints(ctx, config, default_cert): role_endpoints = {} - for role, client_config in config.iteritems(): + for role, client_config in config.items(): client_config = client_config or {} remote = get_remote_for_role(ctx, role) diff --git a/qa/tasks/rgw_logsocket.py b/qa/tasks/rgw_logsocket.py index 0d9cc180bb23..d76e59d7f66a 100644 --- a/qa/tasks/rgw_logsocket.py +++ b/qa/tasks/rgw_logsocket.py @@ -47,7 +47,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): client_config['extra_args'] = [ 's3tests.functional.test_s3:test_bucket_list_return_data', ] @@ -70,7 +70,7 @@ def run_tests(ctx, config): netcat_out = BytesIO() - for client, client_config in config.iteritems(): + for client, client_config in config.items(): ctx.cluster.only(client).run( args = [ 'netcat', @@ -126,7 +126,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for (client, cconf) in config.iteritems(): + for (client, cconf) in config.items(): teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {})) log.debug('config is %s', config) diff --git a/qa/tasks/rgw_multisite.py b/qa/tasks/rgw_multisite.py index c68f0f76bc46..266d0fb694fd 100644 --- a/qa/tasks/rgw_multisite.py +++ b/qa/tasks/rgw_multisite.py @@ -240,7 +240,7 @@ def extract_clusters_and_gateways(ctx, role_endpoints): """ create cluster and gateway instances for all of the radosgw roles """ clusters = {} gateways = {} - for role, endpoint in role_endpoints.iteritems(): + for role, endpoint in role_endpoints.items(): cluster_name, daemon_type, client_id = misc.split_role(role) # find or create the cluster by name cluster = clusters.get(cluster_name) diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py index 1b063f5b1ffa..b70eb83b7a3f 100644 --- a/qa/tasks/s3readwrite.py +++ b/qa/tasks/s3readwrite.py @@ -103,7 +103,7 @@ def create_users(ctx, config): rwconf['files'].setdefault('num', 10) rwconf['files'].setdefault('size', 2000) rwconf['files'].setdefault('stddev', 500) - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], client=client)) @@ -139,7 +139,7 @@ def create_users(ctx, config): yield finally: for client in config['clients']: - for section, user in users.iteritems(): + for section, user in users.items(): #uid = '{user}.{client}'.format(user=user, client=client) real_uid, delete_this_user = cached_client_user_names[client][section+user] if delete_this_user: @@ -166,7 +166,7 @@ def configure(ctx, config): """ assert isinstance(config, dict) log.info('Configuring s3-readwrite-tests...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None @@ -215,7 +215,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py index 8bf770c86530..9b27b4ae86d6 100644 --- a/qa/tasks/s3roundtrip.py +++ b/qa/tasks/s3roundtrip.py @@ -146,7 +146,7 @@ def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-roundtrip-tests...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None @@ -194,7 +194,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 37ef852bbcd2..1b88ec74567e 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -103,7 +103,7 @@ def create_users(ctx, config): s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('fixtures', {}) s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-') - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) cluster_name, daemon_type, client_id = teuthology.split_role(client) @@ -173,7 +173,7 @@ def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-tests...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None @@ -226,7 +226,7 @@ def configure(ctx, config): finally: log.info('Cleaning up boto...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -247,7 +247,7 @@ def run_tests(ctx, config): testdir = teuthology.get_testdir(ctx) # civetweb > 1.8 && beast parsers are strict on rfc2616 attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616"] - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() args = [ 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), @@ -295,7 +295,7 @@ def scan_for_leaked_encryption_keys(ctx, config): log.debug('Scanning radosgw logs for leaked encryption keys...') procs = list() - for client, client_config in config.iteritems(): + for client, client_config in config.items(): if not client_config.get('scan_for_encryption_keys', True): continue cluster_name, daemon_type, client_id = teuthology.split_role(client) diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index cc04e3495f47..83a9d4c509b9 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -71,9 +71,9 @@ def create_users(ctx, config): log.info('Creating rgw users...') testdir = teuthology.get_testdir(ctx) users = {'': 'foo', '2': 'bar'} - for client, testswift_conf in config.iteritems(): + for client, testswift_conf in config.items(): cluster_name, daemon_type, client_id = teuthology.split_role(client) - for suffix, user in users.iteritems(): + for suffix, user in users.items(): _config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix) ctx.cluster.only(client).run( args=[ @@ -148,7 +148,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): args = [ 'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), '{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir), @@ -217,7 +217,7 @@ def task(ctx, config): testswift_conf = {} clients = [] - for client, client_config in config.iteritems(): + for client, client_config in config.items(): # http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+ (remote,) = ctx.cluster.only(client).remotes.keys() if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'): diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py index 04af133a8f58..745f503c903f 100644 --- a/qa/tasks/systemd.py +++ b/qa/tasks/systemd.py @@ -26,7 +26,7 @@ def task(ctx, config): Test ceph systemd services can start, stop and restart and check for any failed services and report back errors """ - for remote, roles in ctx.cluster.remotes.iteritems(): + for remote, roles in ctx.cluster.remotes.items(): remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), 'grep', 'ceph']) units = remote.sh('sudo systemctl list-units | grep ceph', diff --git a/qa/tasks/tgt.py b/qa/tasks/tgt.py index c2b322e08297..a0758f472de5 100644 --- a/qa/tasks/tgt.py +++ b/qa/tasks/tgt.py @@ -21,7 +21,7 @@ def start_tgt_remotes(ctx, start_tgtd): """ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes tgtd_list = [] - for rem, roles in remotes.iteritems(): + for rem, roles in remotes.items(): for _id in roles: if _id in start_tgtd: if not rem in tgtd_list: diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py index a83f9e19082d..a0c54ce4eadc 100644 --- a/qa/tasks/util/rados.py +++ b/qa/tasks/util/rados.py @@ -84,4 +84,4 @@ def cmd_erasure_code_profile(profile_name, profile): return [ 'osd', 'erasure-code-profile', 'set', profile_name - ] + [ str(key) + '=' + str(value) for key, value in profile.iteritems() ] + ] + [ str(key) + '=' + str(value) for key, value in profile.items() ] diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py index 91b0e0f8e19f..1f5623af8a1c 100644 --- a/qa/tasks/util/workunit.py +++ b/qa/tasks/util/workunit.py @@ -68,7 +68,7 @@ def get_refspec_after_overrides(config, overrides): overrides.pop(i, None) misc.deep_merge(config, overrides) - for spec, cls in refspecs.iteritems(): + for spec, cls in refspecs.items(): refspec = config.get(spec) if refspec: refspec = cls(refspec) diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py index 15e8caeb4a00..5a7670388581 100644 --- a/qa/tasks/workunit.py +++ b/qa/tasks/workunit.py @@ -117,7 +117,7 @@ def task(ctx, config): log.info("timeout={}".format(timeout)) log.info("cleanup={}".format(cleanup)) with parallel() as p: - for role, tests in clients.iteritems(): + for role, tests in clients.items(): if role != "all": p.spawn(_run_tests, ctx, refspec, role, tests, config.get('env'), @@ -389,7 +389,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir, run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)), ] if env is not None: - for var, val in env.iteritems(): + for var, val in env.items(): quoted_val = pipes.quote(val) env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index 1eb0cb658d67..2634f77690e4 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -271,7 +271,7 @@ def test_all(): ] } - for (module,cmd_lst) in cmds.iteritems(): + for (module,cmd_lst) in cmds.items(): k = keyring_base + '.' + module for cmd in cmd_lst: @@ -283,8 +283,8 @@ def test_all(): print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)) # gen keyring - for (good_or_bad,kind_map) in perms.iteritems(): - for (kind,lst) in kind_map.iteritems(): + for (good_or_bad,kind_map) in perms.items(): + for (kind,lst) in kind_map.items(): for (perm, cap) in lst: cap_formatted = cap.format( s=module, @@ -308,7 +308,7 @@ def test_all(): # test for good_bad in perms.keys(): - for (kind,lst) in perms[good_bad].iteritems(): + for (kind,lst) in perms[good_bad].items(): for (perm,_) in lst: cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm) diff --git a/src/test/rgw/rgw_multi/zone_cloud.py b/src/test/rgw/rgw_multi/zone_cloud.py index 4e5eefb8c11b..b9ff43ca9648 100644 --- a/src/test/rgw/rgw_multi/zone_cloud.py +++ b/src/test/rgw/rgw_multi/zone_cloud.py @@ -116,7 +116,7 @@ class CloudKey: self.etag = '"' + self.etag + '"' new_meta = {} - for meta_key, meta_val in k.metadata.iteritems(): + for meta_key, meta_val in k.metadata.items(): if not meta_key.startswith('rgwx-'): new_meta[meta_key] = meta_val