From 50b1823faca0e73fe81a3c2aca12dae3e250abb6 Mon Sep 17 00:00:00 2001 From: Kyr Shatskyy Date: Wed, 9 Oct 2019 14:36:58 +0200 Subject: [PATCH] qa: get rid of iteritems for python3 compatibility Fixes: https://tracker.ceph.com/issues/42267 Signed-off-by: Kyr Shatskyy --- qa/tasks/admin_socket.py | 4 ++-- qa/tasks/autotest.py | 2 +- qa/tasks/barbican.py | 2 +- qa/tasks/blktrace.py | 4 ++-- qa/tasks/ceph_client.py | 2 +- qa/tasks/ceph_deploy.py | 28 +++++++++++++-------------- qa/tasks/ceph_manager.py | 4 ++-- qa/tasks/ceph_objectstore_tool.py | 4 ++-- qa/tasks/ceph_test_case.py | 2 +- qa/tasks/cephfs/test_forward_scrub.py | 2 +- qa/tasks/cephfs/test_sessionmap.py | 2 +- qa/tasks/cephfs/test_snapshots.py | 2 +- qa/tasks/cram.py | 4 ++-- qa/tasks/dnsmasq.py | 8 ++++---- qa/tasks/exec_on_cleanup.py | 2 +- qa/tasks/keystone.py | 2 +- qa/tasks/qemu.py | 14 +++++++------- qa/tasks/rados.py | 2 +- qa/tasks/ragweed.py | 12 ++++++------ qa/tasks/rbd.py | 2 +- qa/tasks/rbd_fio.py | 2 +- qa/tasks/rebuild_mondb.py | 10 +++++----- qa/tasks/restart.py | 4 ++-- qa/tasks/rgw.py | 2 +- qa/tasks/rgw_logsocket.py | 6 +++--- qa/tasks/rgw_multisite.py | 2 +- qa/tasks/s3readwrite.py | 8 ++++---- qa/tasks/s3roundtrip.py | 6 +++--- qa/tasks/s3tests.py | 12 ++++++------ qa/tasks/s3tests_java.py | 2 +- qa/tasks/swift.py | 10 +++++----- qa/tasks/systemd.py | 2 +- qa/tasks/tgt.py | 2 +- qa/tasks/util/rados.py | 2 +- qa/tasks/util/workunit.py | 2 +- qa/tasks/vault.py | 2 +- qa/tasks/workunit.py | 4 ++-- qa/workunits/mon/caps.py | 8 ++++---- src/test/rgw/rgw_multi/zone_cloud.py | 2 +- 39 files changed, 96 insertions(+), 96 deletions(-) diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py index ed134e7d2f6..54fe1a1ba5d 100644 --- a/qa/tasks/admin_socket.py +++ b/qa/tasks/admin_socket.py @@ -68,7 +68,7 @@ def task(ctx, config): teuthology.replace_all_with_clients(ctx.cluster, config) with parallel() as ptask: - for client, tests in config.iteritems(): + for client, tests in config.items(): ptask.spawn(_run_tests, ctx, client, tests) @@ -145,7 +145,7 @@ def _run_tests(ctx, client, tests): ], ) - for command, config in tests.iteritems(): + for command, config in tests.items(): if config is None: config = {} teuthology.deep_merge(config, overrides) diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py index 1735f677380..743b6fac119 100644 --- a/qa/tasks/autotest.py +++ b/qa/tasks/autotest.py @@ -68,7 +68,7 @@ def task(ctx, config): ) with parallel() as p: - for role, tests in config.iteritems(): + for role, tests in config.items(): (remote,) = ctx.cluster.only(role).remotes.keys() p.spawn(_run_tests, testdir, remote, role, tests) diff --git a/qa/tasks/barbican.py b/qa/tasks/barbican.py index 0b54cbddc70..d33b4302b69 100644 --- a/qa/tasks/barbican.py +++ b/qa/tasks/barbican.py @@ -105,7 +105,7 @@ def assign_ports(ctx, config, initial_port): """ port = initial_port role_endpoints = {} - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): for role in roles_for_host: if role in config: role_endpoints[role] = (remote.name.split('@')[1], port) diff --git a/qa/tasks/blktrace.py b/qa/tasks/blktrace.py index 96aaf505ada..10b1da0c0bb 100644 --- a/qa/tasks/blktrace.py +++ b/qa/tasks/blktrace.py @@ -20,7 +20,7 @@ def setup(ctx, config): osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster'])) log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) - for remote, roles_for_host in osds.remotes.iteritems(): + for remote, roles_for_host in osds.remotes.items(): log.info('Creating %s on %s' % (log_dir, remote.name)) remote.run( args=['mkdir', '-p', '-m0755', '--', log_dir], @@ -38,7 +38,7 @@ def execute(ctx, config): log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles_for_host in osds.remotes.iteritems(): + for remote, roles_for_host in osds.remotes.items(): roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', config['cluster']): diff --git a/qa/tasks/ceph_client.py b/qa/tasks/ceph_client.py index 3ca90b7d198..74e818f932b 100644 --- a/qa/tasks/ceph_client.py +++ b/qa/tasks/ceph_client.py @@ -16,7 +16,7 @@ def create_keyring(ctx, cluster_name): clients = ctx.cluster.only(teuthology.is_type('client', cluster_name)) testdir = teuthology.get_testdir(ctx) coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) - for remote, roles_for_host in clients.remotes.iteritems(): + for remote, roles_for_host in clients.remotes.items(): for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name): name = teuthology.ceph_role(role) diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py index 09b54902f7a..7fd4989e9d6 100644 --- a/qa/tasks/ceph_deploy.py +++ b/qa/tasks/ceph_deploy.py @@ -147,7 +147,7 @@ def get_nodes_using_role(ctx, target_role): # Prepare a modified version of cluster.remotes with ceph-deploy-ized names modified_remotes = {} ceph_deploy_mapped = dict() - for _remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for _remote, roles_for_host in ctx.cluster.remotes.items(): modified_remotes[_remote] = [] for svc_id in roles_for_host: if svc_id.startswith("{0}.".format(target_role)): @@ -179,7 +179,7 @@ def get_nodes_using_role(ctx, target_role): def get_dev_for_osd(ctx, config): """Get a list of all osd device names.""" osd_devs = [] - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): host = remote.name.split('@')[-1] shortname = host.split('.')[0] devs = teuthology.get_scratch_devices(remote) @@ -207,7 +207,7 @@ def get_dev_for_osd(ctx, config): def get_all_nodes(ctx, config): """Return a string of node names separated by blanks""" nodelist = [] - for t, k in ctx.config['targets'].iteritems(): + for t, k in ctx.config['targets'].items(): host = t.split('@')[-1] simple_host = host.split('.')[0] nodelist.append(simple_host) @@ -312,7 +312,7 @@ def build_ceph_cluster(ctx, config): ceph_branch = None if config.get('branch') is not None: cbranch = config.get('branch') - for var, val in cbranch.iteritems(): + for var, val in cbranch.items(): ceph_branch = '--{var}={val}'.format(var=var, val=val) all_nodes = get_all_nodes(ctx, config) mds_nodes = get_nodes_using_role(ctx, 'mds') @@ -347,11 +347,11 @@ def build_ceph_cluster(ctx, config): if config.get('conf') is not None: confp = config.get('conf') - for section, keys in confp.iteritems(): + for section, keys in confp.items(): lines = '[{section}]\n'.format(section=section) teuthology.append_lines_to_file(ceph_admin, conf_path, lines, sudo=True) - for key, value in keys.iteritems(): + for key, value in keys.items(): log.info("[%s] %s = %s" % (section, key, value)) lines = '{key} = {value}\n'.format(key=key, value=value) teuthology.append_lines_to_file( @@ -435,7 +435,7 @@ def build_ceph_cluster(ctx, config): ) clients = ctx.cluster.only(teuthology.is_type('client')) - for remot, roles_for_host in clients.remotes.iteritems(): + for remot, roles_for_host in clients.remotes.items(): for id_ in teuthology.roles_of_type(roles_for_host, 'client'): client_keyring = \ '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) @@ -529,7 +529,7 @@ def build_ceph_cluster(ctx, config): path = os.path.join(ctx.archive, 'data') os.makedirs(path) mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if role.startswith('mon.'): teuthology.pull_directory_tarball( @@ -617,7 +617,7 @@ def cli_test(ctx, config): branch = ctx.config.get('branch') test_branch = ' --dev={branch} '.format(branch=branch) mons = ctx.cluster.only(teuthology.is_type('mon')) - for node, role in mons.remotes.iteritems(): + for node, role in mons.remotes.items(): admin = node admin.run(args=['mkdir', conf_dir], check_status=False) nodename = admin.shortname @@ -627,7 +627,7 @@ def cli_test(ctx, config): log.info('system type is %s', system_type) osds = ctx.cluster.only(teuthology.is_type('osd')) - for remote, roles in osds.remotes.iteritems(): + for remote, roles in osds.remotes.items(): devs = teuthology.get_scratch_devices(remote) log.info("roles %s", roles) if (len(devs) < 3): @@ -641,11 +641,11 @@ def cli_test(ctx, config): execute_cdeploy(admin, new_cmd, path) if config.get('conf') is not None: confp = config.get('conf') - for section, keys in confp.iteritems(): + for section, keys in confp.items(): lines = '[{section}]\n'.format(section=section) teuthology.append_lines_to_file(admin, conf_path, lines, sudo=True) - for key, value in keys.iteritems(): + for key, value in keys.items(): log.info("[%s] %s = %s" % (section, key, value)) lines = '{key} = {value}\n'.format(key=key, value=value) teuthology.append_lines_to_file(admin, conf_path, lines, @@ -794,7 +794,7 @@ def upgrade(ctx, config): if mapped_role.get(role): role = mapped_role.get(role) remotes_and_roles = ctx.cluster.only(role).remotes - for remote, roles in remotes_and_roles.iteritems(): + for remote, roles in remotes_and_roles.items(): nodename = remote.shortname cmd = cmd + ' ' + nodename log.info("Upgrading ceph on %s", nodename) @@ -818,7 +818,7 @@ def upgrade(ctx, config): # write the correct mgr key to disk if config.get('setup-mgr-node', None): mons = ctx.cluster.only(teuthology.is_type('mon')) - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): remote.run( args=[ run.Raw('sudo ceph auth get client.bootstrap-mgr'), diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index a635af3de50..16decd0088d 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1312,7 +1312,7 @@ class CephManager: return if no_wait is None: no_wait = [] - for osd, need in seq.iteritems(): + for osd, need in seq.items(): if osd in no_wait: continue got = 0 @@ -1618,7 +1618,7 @@ class CephManager: :param osdnum: osd number :param argdict: dictionary containing values to set. """ - for k, v in argdict.iteritems(): + for k, v in argdict.items(): self.wait_run_admin_socket( 'osd', osdnum, ['config', 'set', str(k), str(v)]) diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py index 72e367f65d1..4eab3de4f6d 100644 --- a/qa/tasks/ceph_objectstore_tool.py +++ b/qa/tasks/ceph_objectstore_tool.py @@ -327,7 +327,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if osdid not in pgs: continue - for pg, JSON in db[basename]["pg2json"].iteritems(): + for pg, JSON in db[basename]["pg2json"].items(): if pg in pgs[osdid]: cmd = ((prefix + "--pgid {pg}"). format(id=osdid, pg=pg).split()) @@ -419,7 +419,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if osdid not in pgs: continue - for pg, JSON in db[basename]["pg2json"].iteritems(): + for pg, JSON in db[basename]["pg2json"].items(): if pg in pgs[osdid]: cmd = ((prefix + "--pgid {pg}"). format(id=osdid, pg=pg).split()) diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py index dd8f515a2ff..6b5621f10d3 100644 --- a/qa/tasks/ceph_test_case.py +++ b/qa/tasks/ceph_test_case.py @@ -104,7 +104,7 @@ class CephTestCase(unittest.TestCase): def seen_health_warning(): health = self.ceph_cluster.mon_manager.get_mon_health() codes = [s for s in health['checks']] - summary_strings = [s[1]['summary']['message'] for s in health['checks'].iteritems()] + summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()] if len(summary_strings) == 0: log.debug("Not expected number of summary strings ({0})".format(summary_strings)) return False diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index b0f85e3213f..b4f7f1385d9 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -248,7 +248,7 @@ class TestForwardScrub(CephFSTestCase): "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0) # Revert to old inotable. - for key, value in inotable_copy.iteritems(): + for key, value in inotable_copy.items(): self.fs.put_metadata_object_raw(key, value) self.mds_cluster.mds_restart() diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index a8d40dc6f70..db3d5dfe6cd 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -32,7 +32,7 @@ class TestSessionMap(CephFSTestCase): def _get_connection_count(self, status=None): perf = self.fs.rank_asok(["perf", "dump"], status=status) conn = 0 - for module, dump in perf.iteritems(): + for module, dump in perf.items(): if "AsyncMessenger::Worker" in module: conn += dump['msgr_active_connections'] return conn diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index 5f18d32cbc5..cf6ef7206ed 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -240,7 +240,7 @@ class TestSnapshots(CephFSTestCase): def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0): if cache_dump is None: cache_dump = self._get_snapclient_dump(rank=rank) - for key, value in cache_dump.iteritems(): + for key, value in cache_dump.items(): if value != snaps_dump[key]: return False return True; diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py index 8f2f28cc64d..8d3886159ee 100644 --- a/qa/tasks/cram.py +++ b/qa/tasks/cram.py @@ -60,7 +60,7 @@ def task(ctx, config): log.info('Pulling tests from %s ref %s', git_url, refspec) try: - for client, tests in clients.iteritems(): + for client, tests in clients.items(): (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( @@ -88,7 +88,7 @@ def task(ctx, config): for role in clients.keys(): p.spawn(_run_tests, ctx, role) finally: - for client, tests in clients.iteritems(): + for client, tests in clients.items(): (remote,) = ctx.cluster.only(client).remotes.keys() client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) diff --git a/qa/tasks/dnsmasq.py b/qa/tasks/dnsmasq.py index 764d742fa71..2bf3feaf083 100644 --- a/qa/tasks/dnsmasq.py +++ b/qa/tasks/dnsmasq.py @@ -67,7 +67,7 @@ def setup_dnsmasq(remote, testdir, cnames): # add address entries for each cname dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n" address_template = "address=/{cname}/{ip_address}\n" - for cname, ip_address in cnames.iteritems(): + for cname, ip_address in cnames.items(): dnsmasq += address_template.format(cname=cname, ip_address=ip_address) # write to temporary dnsmasq file @@ -129,7 +129,7 @@ def task(ctx, config): # multiple roles may map to the same remote, so collect names by remote remote_names = {} - for role, cnames in config.iteritems(): + for role, cnames in config.items(): remote = get_remote_for_role(ctx, role) if remote is None: raise ConfigError('no remote for role %s' % role) @@ -144,7 +144,7 @@ def task(ctx, config): names[cname] = remote.ip_address elif isinstance(cnames, dict): # when given a dict, look up the remote ip for each - for cname, client in cnames.iteritems(): + for cname, client in cnames.items(): r = get_remote_for_role(ctx, client) if r is None: raise ConfigError('no remote for role %s' % client) @@ -160,7 +160,7 @@ def task(ctx, config): # run subtasks for each unique remote subtasks = [] - for remote, cnames in remote_names.iteritems(): + for remote, cnames in remote_names.items(): subtasks.extend([ lambda r=remote: install_dnsmasq(r) ]) subtasks.extend([ lambda r=remote: backup_resolv(r, resolv_bak) ]) subtasks.extend([ lambda r=remote: replace_resolv(r, resolv_tmp) ]) diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index 6e40e4044b0..0aecf78e5b3 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -46,7 +46,7 @@ def task(ctx, config): roles = teuthology.all_roles(ctx.cluster) config = dict((id_, a) for id_ in roles) - for role, ls in config.iteritems(): + for role, ls in config.items(): (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running commands on role %s host %s', role, remote.name) for c in ls: diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index 9dbf49dc755..5961165eb0d 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -354,7 +354,7 @@ def assign_ports(ctx, config, initial_port): """ port = initial_port role_endpoints = {} - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): for role in roles_for_host: if role in config: role_endpoints[role] = (remote.name.split('@')[1], port) diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index 050eef49daa..4a06fde44ec 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -26,7 +26,7 @@ DEFAULT_CPUS = 1 DEFAULT_MEM = 4096 # in megabytes def create_images(ctx, config, managers): - for client, client_config in config.iteritems(): + for client, client_config in config.items(): disks = client_config.get('disks', DEFAULT_NUM_DISKS) if not isinstance(disks, list): disks = [{} for n in range(int(disks))] @@ -48,7 +48,7 @@ def create_images(ctx, config, managers): ) def create_clones(ctx, config, managers): - for client, client_config in config.iteritems(): + for client, client_config in config.items(): clone = client_config.get('clone', False) if clone: num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) @@ -74,7 +74,7 @@ def create_dirs(ctx, config): Handle directory creation and cleanup """ testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( @@ -87,7 +87,7 @@ def create_dirs(ctx, config): try: yield finally: - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( @@ -109,7 +109,7 @@ def generate_iso(ctx, config): git_url = teuth_config.get_ceph_qa_suite_git_url() log.info('Pulling tests from %s ref %s', git_url, refspec) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): assert 'test' in client_config, 'You must specify a test to run' test = client_config['test'] @@ -221,7 +221,7 @@ def download_image(ctx, config): """Downland base image, remove image file when done""" log.info('downloading base image') testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) image_url = client_config.get('image_url', DEFAULT_IMAGE_URL) @@ -344,7 +344,7 @@ def run_qemu(ctx, config): """Setup kvm environment and start qemu""" procs = [] testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) remote.run( diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index 5432ddfa02f..e6b56d22beb 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -202,7 +202,7 @@ def task(ctx, config): weights['append'] = weights['append'] / 2 weights['append_excl'] = weights['append'] - for op, weight in weights.iteritems(): + for op, weight in weights.items(): args.extend([ '--op', op, str(weight) ]) diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index 1ed1de8af4c..67b16851b7b 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -114,7 +114,7 @@ def create_users(ctx, config, run_stages): """ assert isinstance(config, dict) - for client, properties in config['config'].iteritems(): + for client, properties in config['config'].items(): run_stages[client] = string.split(properties.get('stages', 'prepare,check'), ',') log.info('Creating rgw users...') @@ -128,7 +128,7 @@ def create_users(ctx, config, run_stages): ragweed_conf = config['ragweed_conf'][client] ragweed_conf.setdefault('fixtures', {}) ragweed_conf['rgw'].setdefault('bucket_prefix', 'test-' + client) - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(ragweed_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('Creating user {user} on {host}'.format(user=ragweed_conf[section]['user_id'], host=client)) if user == 'sysuser': @@ -183,7 +183,7 @@ def configure(ctx, config, run_stages): assert isinstance(config, dict) log.info('Configuring ragweed...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -213,7 +213,7 @@ def configure(ctx, config, run_stages): log.info('Configuring boto...') boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): with file(boto_src, 'rb') as f: (remote,) = ctx.cluster.only(client).remotes.keys() conf = f.read().format( @@ -230,7 +230,7 @@ def configure(ctx, config, run_stages): finally: log.info('Cleaning up boto...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -250,7 +250,7 @@ def run_tests(ctx, config, run_stages): assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) attrs = ["!fails_on_rgw"] - for client, client_config in config.iteritems(): + for client, client_config in config.items(): stages = string.join(run_stages[client], ',') args = [ 'RAGWEED_CONF={tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client), diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py index 193ab1c3961..ce0ea17145d 100644 --- a/qa/tasks/rbd.py +++ b/qa/tasks/rbd.py @@ -605,7 +605,7 @@ def task(ctx, config): norm_config = teuthology.replace_all_with_clients(ctx.cluster, config) if isinstance(norm_config, dict): role_images = {} - for role, properties in norm_config.iteritems(): + for role, properties in norm_config.items(): if properties is None: properties = {} role_images[role] = properties.get('image_name') diff --git a/qa/tasks/rbd_fio.py b/qa/tasks/rbd_fio.py index 2b520f53fc6..72f20196eac 100644 --- a/qa/tasks/rbd_fio.py +++ b/qa/tasks/rbd_fio.py @@ -52,7 +52,7 @@ or client_config = config['all'] clients = ctx.cluster.only(teuthology.is_type('client')) rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test" - for remote,role in clients.remotes.iteritems(): + for remote,role in clients.remotes.items(): if 'client_config' in locals(): with parallel() as p: p.spawn(run_fio, remote, client_config, rbd_test_dir) diff --git a/qa/tasks/rebuild_mondb.py b/qa/tasks/rebuild_mondb.py index e68e5c83e9e..7877f22ab3e 100644 --- a/qa/tasks/rebuild_mondb.py +++ b/qa/tasks/rebuild_mondb.py @@ -49,7 +49,7 @@ def _push_directory(path, remote, remote_dir): def _nuke_mons(manager, mons, mon_id): assert mons is_mon = teuthology.is_type('mon') - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if not is_mon(role): continue @@ -77,7 +77,7 @@ def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path): is_osd = teuthology.is_type('osd') osds = ctx.cluster.only(is_osd) assert osds - for osd, roles in osds.remotes.iteritems(): + for osd, roles in osds.remotes.items(): for role in roles: if not is_osd(role): continue @@ -142,7 +142,7 @@ def _revive_mons(manager, mons, recovered, keyring_path): # the initial monmap is in the ceph.conf, so we are good. n_mons = 0 is_mon = teuthology.is_type('mon') - for remote, roles in mons.remotes.iteritems(): + for remote, roles in mons.remotes.items(): for role in roles: if not is_mon(role): continue @@ -169,7 +169,7 @@ def _revive_mons(manager, mons, recovered, keyring_path): def _revive_mgrs(ctx, manager): is_mgr = teuthology.is_type('mgr') mgrs = ctx.cluster.only(is_mgr) - for _, roles in mgrs.remotes.iteritems(): + for _, roles in mgrs.remotes.items(): for role in roles: if not is_mgr(role): continue @@ -181,7 +181,7 @@ def _revive_mgrs(ctx, manager): def _revive_osds(ctx, manager): is_osd = teuthology.is_type('osd') osds = ctx.cluster.only(is_osd) - for _, roles in osds.remotes.iteritems(): + for _, roles in osds.remotes.items(): for role in roles: if not is_osd(role): continue diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py index fc38af1d7ab..52b685c9e36 100644 --- a/qa/tasks/restart.py +++ b/qa/tasks/restart.py @@ -96,7 +96,7 @@ def task(ctx, config): try: assert 'exec' in config, "config requires exec key with : entries" - for role, task in config['exec'].iteritems(): + for role, task in config['exec'].items(): log.info('restart for role {r}'.format(r=role)) (remote,) = ctx.cluster.only(role).remotes.keys() srcdir, restarts = get_tests(ctx, config, role, remote, testdir) @@ -113,7 +113,7 @@ def task(ctx, config): ] env = config.get('env') if env is not None: - for var, val in env.iteritems(): + for var, val in env.items(): quoted_val = pipes.quote(val) env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index b3a1c3164cc..64655cd3f76 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -219,7 +219,7 @@ def start_rgw(ctx, config, clients): def assign_endpoints(ctx, config, default_cert): role_endpoints = {} - for role, client_config in config.iteritems(): + for role, client_config in config.items(): client_config = client_config or {} remote = get_remote_for_role(ctx, role) diff --git a/qa/tasks/rgw_logsocket.py b/qa/tasks/rgw_logsocket.py index 2fb4a0c62fb..db6bbcb2ee0 100644 --- a/qa/tasks/rgw_logsocket.py +++ b/qa/tasks/rgw_logsocket.py @@ -47,7 +47,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): client_config['extra_args'] = [ 's3tests.functional.test_s3:test_bucket_list_return_data', ] @@ -70,7 +70,7 @@ def run_tests(ctx, config): netcat_out = StringIO() - for client, client_config in config.iteritems(): + for client, client_config in config.items(): ctx.cluster.only(client).run( args = [ 'netcat', @@ -126,7 +126,7 @@ def task(ctx, config): overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. - for (client, cconf) in config.iteritems(): + for (client, cconf) in config.items(): teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {})) log.debug('config is %s', config) diff --git a/qa/tasks/rgw_multisite.py b/qa/tasks/rgw_multisite.py index 9dea39312de..3c4c4da142e 100644 --- a/qa/tasks/rgw_multisite.py +++ b/qa/tasks/rgw_multisite.py @@ -241,7 +241,7 @@ def extract_clusters_and_gateways(ctx, role_endpoints): """ create cluster and gateway instances for all of the radosgw roles """ clusters = {} gateways = {} - for role, endpoint in role_endpoints.iteritems(): + for role, endpoint in role_endpoints.items(): cluster_name, daemon_type, client_id = misc.split_role(role) # find or create the cluster by name cluster = clusters.get(cluster_name) diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py index d03882b4282..5e285131218 100644 --- a/qa/tasks/s3readwrite.py +++ b/qa/tasks/s3readwrite.py @@ -101,7 +101,7 @@ def create_users(ctx, config): rwconf['files'].setdefault('num', 10) rwconf['files'].setdefault('size', 2000) rwconf['files'].setdefault('stddev', 500) - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], client=client)) @@ -137,7 +137,7 @@ def create_users(ctx, config): yield finally: for client in config['clients']: - for section, user in users.iteritems(): + for section, user in users.items(): #uid = '{user}.{client}'.format(user=user, client=client) real_uid, delete_this_user = cached_client_user_names[client][section+user] if delete_this_user: @@ -164,7 +164,7 @@ def configure(ctx, config): """ assert isinstance(config, dict) log.info('Configuring s3-readwrite-tests...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None @@ -215,7 +215,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py index d21981335aa..01b56b1102f 100644 --- a/qa/tasks/s3roundtrip.py +++ b/qa/tasks/s3roundtrip.py @@ -31,7 +31,7 @@ def download(ctx, config): assert isinstance(config, dict) log.info('Downloading s3-tests...') testdir = teuthology.get_testdir(ctx) - for (client, cconf) in config.iteritems(): + for (client, cconf) in config.items(): branch = cconf.get('force-branch', None) if not branch: branch = cconf.get('branch', 'master') @@ -133,7 +133,7 @@ def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-roundtrip-tests...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: host = None @@ -184,7 +184,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 61ddbafa781..eb6dc5a6eeb 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -107,7 +107,7 @@ def create_users(ctx, config): s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('fixtures', {}) s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-') - for section, user in users.iteritems(): + for section, user in users.items(): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) cluster_name, daemon_type, client_id = teuthology.split_role(client) @@ -177,7 +177,7 @@ def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-tests...') testdir = teuthology.get_testdir(ctx) - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): properties = properties or {} s3tests_conf = config['s3tests_conf'][client] s3tests_conf['DEFAULT']['calling_format'] = properties.get('calling-format', 'ordinary') @@ -248,7 +248,7 @@ def configure(ctx, config): log.info('Configuring boto...') boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): with file(boto_src, 'rb') as f: (remote,) = ctx.cluster.only(client).remotes.keys() conf = f.read().format( @@ -265,7 +265,7 @@ def configure(ctx, config): finally: log.info('Cleaning up boto...') - for client, properties in config['clients'].iteritems(): + for client, properties in config['clients'].items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -284,7 +284,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): client_config = client_config or {} (remote,) = ctx.cluster.only(client).remotes.keys() args = [ @@ -337,7 +337,7 @@ def scan_for_leaked_encryption_keys(ctx, config): log.debug('Scanning radosgw logs for leaked encryption keys...') procs = list() - for client, client_config in config.iteritems(): + for client, client_config in config.items(): if not client_config.get('scan_for_encryption_keys', True): continue cluster_name, daemon_type, client_id = teuthology.split_role(client) diff --git a/qa/tasks/s3tests_java.py b/qa/tasks/s3tests_java.py index 88530cc06f6..47f9de1d2fc 100644 --- a/qa/tasks/s3tests_java.py +++ b/qa/tasks/s3tests_java.py @@ -89,7 +89,7 @@ class S3tests_java(Task): def begin(self): super(S3tests_java, self).begin() log.debug('S3 Tests Java: BEGIN') - for (host, roles) in self.ctx.cluster.remotes.iteritems(): + for (host, roles) in self.ctx.cluster.remotes.items(): log.debug( 'S3 Tests Java: Cluster config is: {cfg}'.format(cfg=roles)) log.debug('S3 Tests Java: Host is: {host}'.format(host=host)) diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index dac11dd619c..e89fcf03e48 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -72,9 +72,9 @@ def create_users(ctx, config): log.info('Creating rgw users...') testdir = teuthology.get_testdir(ctx) users = {'': 'foo', '2': 'bar'} - for client, testswift_conf in config.iteritems(): + for client, testswift_conf in config.items(): cluster_name, daemon_type, client_id = teuthology.split_role(client) - for suffix, user in users.iteritems(): + for suffix, user in users.items(): _config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix) ctx.cluster.only(client).run( args=[ @@ -122,7 +122,7 @@ def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring testswift...') testdir = teuthology.get_testdir(ctx) - for client, testswift_conf in config.iteritems(): + for client, testswift_conf in config.items(): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ @@ -149,7 +149,7 @@ def run_tests(ctx, config): """ assert isinstance(config, dict) testdir = teuthology.get_testdir(ctx) - for client, client_config in config.iteritems(): + for client, client_config in config.items(): args = [ 'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), '{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir), @@ -218,7 +218,7 @@ def task(ctx, config): testswift_conf = {} clients = [] - for client, client_config in config.iteritems(): + for client, client_config in config.items(): # http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+ (remote,) = ctx.cluster.only(client).remotes.keys() if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'): diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py index b661c11df1c..f9a4558ae6a 100644 --- a/qa/tasks/systemd.py +++ b/qa/tasks/systemd.py @@ -23,7 +23,7 @@ def task(ctx, config): Test ceph systemd services can start, stop and restart and check for any failed services and report back errors """ - for remote, roles in ctx.cluster.remotes.iteritems(): + for remote, roles in ctx.cluster.remotes.items(): remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), 'grep', 'ceph']) r = remote.run(args=['sudo', 'systemctl', 'list-units', run.Raw('|'), diff --git a/qa/tasks/tgt.py b/qa/tasks/tgt.py index c2b322e0829..a0758f472de 100644 --- a/qa/tasks/tgt.py +++ b/qa/tasks/tgt.py @@ -21,7 +21,7 @@ def start_tgt_remotes(ctx, start_tgtd): """ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes tgtd_list = [] - for rem, roles in remotes.iteritems(): + for rem, roles in remotes.items(): for _id in roles: if _id in start_tgtd: if not rem in tgtd_list: diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py index a83f9e19082..a0c54ce4ead 100644 --- a/qa/tasks/util/rados.py +++ b/qa/tasks/util/rados.py @@ -84,4 +84,4 @@ def cmd_erasure_code_profile(profile_name, profile): return [ 'osd', 'erasure-code-profile', 'set', profile_name - ] + [ str(key) + '=' + str(value) for key, value in profile.iteritems() ] + ] + [ str(key) + '=' + str(value) for key, value in profile.items() ] diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py index 91b0e0f8e19..1f5623af8a1 100644 --- a/qa/tasks/util/workunit.py +++ b/qa/tasks/util/workunit.py @@ -68,7 +68,7 @@ def get_refspec_after_overrides(config, overrides): overrides.pop(i, None) misc.deep_merge(config, overrides) - for spec, cls in refspecs.iteritems(): + for spec, cls in refspecs.items(): refspec = config.get(spec) if refspec: refspec = cls(refspec) diff --git a/qa/tasks/vault.py b/qa/tasks/vault.py index 8da2254c848..748164b9cb2 100644 --- a/qa/tasks/vault.py +++ b/qa/tasks/vault.py @@ -25,7 +25,7 @@ def assign_ports(ctx, config, initial_port): """ port = initial_port role_endpoints = {} - for remote, roles_for_host in ctx.cluster.remotes.iteritems(): + for remote, roles_for_host in ctx.cluster.remotes.items(): for role in roles_for_host: if role in config: role_endpoints[role] = (remote.name.split('@')[1], port) diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py index cf0e33ab821..81ad2ee0ce3 100644 --- a/qa/tasks/workunit.py +++ b/qa/tasks/workunit.py @@ -115,7 +115,7 @@ def task(ctx, config): log.info("timeout={}".format(timeout)) log.info("cleanup={}".format(cleanup)) with parallel() as p: - for role, tests in clients.iteritems(): + for role, tests in clients.items(): if role != "all": p.spawn(_run_tests, ctx, refspec, role, tests, config.get('env'), @@ -387,7 +387,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir, run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)), ] if env is not None: - for var, val in env.iteritems(): + for var, val in env.items(): quoted_val = pipes.quote(val) env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index cca170ac4ed..270cf791b32 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -271,7 +271,7 @@ def test_all(): ] } - for (module,cmd_lst) in cmds.iteritems(): + for (module,cmd_lst) in cmds.items(): k = keyring_base + '.' + module for cmd in cmd_lst: @@ -283,8 +283,8 @@ def test_all(): print 'generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd) # gen keyring - for (good_or_bad,kind_map) in perms.iteritems(): - for (kind,lst) in kind_map.iteritems(): + for (good_or_bad,kind_map) in perms.items(): + for (kind,lst) in kind_map.items(): for (perm, cap) in lst: cap_formatted = cap.format( s=module, @@ -308,7 +308,7 @@ def test_all(): # test for good_bad in perms.keys(): - for (kind,lst) in perms[good_bad].iteritems(): + for (kind,lst) in perms[good_bad].items(): for (perm,_) in lst: cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm) diff --git a/src/test/rgw/rgw_multi/zone_cloud.py b/src/test/rgw/rgw_multi/zone_cloud.py index 4e5eefb8c11..b9ff43ca964 100644 --- a/src/test/rgw/rgw_multi/zone_cloud.py +++ b/src/test/rgw/rgw_multi/zone_cloud.py @@ -116,7 +116,7 @@ class CloudKey: self.etag = '"' + self.etag + '"' new_meta = {} - for meta_key, meta_val in k.metadata.iteritems(): + for meta_key, meta_val in k.metadata.items(): if not meta_key.startswith('rgwx-'): new_meta[meta_key] = meta_val -- 2.39.5