From a789fdf0020a0ae0bd30a83974dcf57aceb24b06 Mon Sep 17 00:00:00 2001 From: Kyr Shatskyy Date: Fri, 11 Oct 2019 20:20:29 +0200 Subject: [PATCH] python3: get rid of iterkeys for compatibility Signed-off-by: Kyr Shatskyy --- teuthology/lock/cli.py | 4 ++-- teuthology/misc.py | 2 +- teuthology/nuke/actions.py | 10 +++++----- teuthology/orchestra/cluster.py | 4 ++-- teuthology/task/background_exec.py | 2 +- teuthology/task/ceph_ansible.py | 6 +++--- teuthology/task/clock.py | 8 ++++---- teuthology/task/exec.py | 2 +- teuthology/task/install/__init__.py | 10 +++++----- teuthology/task/install/redhat.py | 4 ++-- teuthology/task/install/util.py | 4 ++-- teuthology/task/internal/__init__.py | 8 ++++---- teuthology/task/internal/check_lock.py | 2 +- teuthology/task/internal/lock_machines.py | 4 ++-- teuthology/task/internal/redhat.py | 6 +++--- teuthology/task/internal/syslog.py | 4 ++-- teuthology/task/internal/vm_setup.py | 2 +- teuthology/task/kernel.py | 4 ++-- teuthology/task/lockfile.py | 8 ++++---- teuthology/task/mpi.py | 12 ++++++------ teuthology/task/parallel_example.py | 2 +- teuthology/task/pexec.py | 8 ++++---- teuthology/task/selinux.py | 12 ++++++------ teuthology/task/ssh_keys.py | 2 +- teuthology/task/tests/test_locking.py | 6 +++--- 25 files changed, 68 insertions(+), 68 deletions(-) diff --git a/teuthology/lock/cli.py b/teuthology/lock/cli.py index b9b3eb289d..16e2806798 100644 --- a/teuthology/lock/cli.py +++ b/teuthology/lock/cli.py @@ -41,7 +41,7 @@ def main(ctx): g = yaml.safe_load_all(f) for new in g: if 'targets' in new: - for t in new['targets'].iterkeys(): + for t in new['targets'].keys(): machines.append(t) except IOError as e: raise argparse.ArgumentTypeError(str(e)) @@ -284,6 +284,6 @@ def updatekeys(args): with open(targets) as f: docs = yaml.safe_load_all(f) for doc in docs: - machines = [n for n in doc.get('targets', dict()).iterkeys()] + machines = [n for n in doc.get('targets', dict()).keys()] return keys.do_update_keys(machines, all_)[0] diff --git a/teuthology/misc.py b/teuthology/misc.py index 037755e5eb..1f5d88c7ff 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -1002,7 +1002,7 @@ def get_clients(ctx, roles): assert isinstance(role, basestring) assert 'client.' in role _, _, id_ = split_role(role) - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() yield (id_, remote) diff --git a/teuthology/nuke/actions.py b/teuthology/nuke/actions.py index 8c088aa2fc..e1b166141e 100644 --- a/teuthology/nuke/actions.py +++ b/teuthology/nuke/actions.py @@ -176,7 +176,7 @@ def reboot(ctx, remotes): def reset_syslog_dir(ctx): log.info('Resetting syslog output locations...') nodes = {} - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): proc = remote.run( args=[ 'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf', @@ -199,7 +199,7 @@ def reset_syslog_dir(ctx): def dpkg_configure(ctx): - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type != 'deb': continue log.info( @@ -221,7 +221,7 @@ def dpkg_configure(ctx): def remove_yum_timedhosts(ctx): # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329 log.info("Removing yum timedhosts files...") - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type != 'rpm': continue remote.run( @@ -247,7 +247,7 @@ def remove_ceph_packages(ctx): 'ceph-deploy', 'libapache2-mod-fastcgi' ] pkgs = str.join(' ', ceph_packages_to_remove) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type == 'rpm': log.info("Remove any broken repos") remote.run( @@ -381,7 +381,7 @@ def undo_multipath(ctx): come back unless specifically requested by the test. """ log.info('Removing any multipath config/pkgs...') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): remote.run( args=[ 'sudo', 'multipath', '-F', diff --git a/teuthology/orchestra/cluster.py b/teuthology/orchestra/cluster.py index ebae8de216..3f8e0c2e8f 100644 --- a/teuthology/orchestra/cluster.py +++ b/teuthology/orchestra/cluster.py @@ -60,7 +60,7 @@ class Cluster(object): Returns a list of `RemoteProcess`. """ - remotes = sorted(self.remotes.iterkeys(), key=lambda rem: rem.name) + remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name) return [remote.run(**kwargs) for remote in remotes] def write_file(self, file_name, content, sudo=False, perms=None, owner=None): @@ -72,7 +72,7 @@ class Cluster(object): :param sudo: use sudo :param perms: file permissions (passed to chmod) ONLY if sudo is True """ - remotes = sorted(self.remotes.iterkeys(), key=lambda rem: rem.name) + remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name) for remote in remotes: if sudo: teuthology.misc.sudo_write_file(remote, file_name, content, perms=perms, owner=owner) diff --git a/teuthology/task/background_exec.py b/teuthology/task/background_exec.py index 6691b50d41..cf187e9f5d 100644 --- a/teuthology/task/background_exec.py +++ b/teuthology/task/background_exec.py @@ -47,7 +47,7 @@ def task(ctx, config): tasks = {} for role, cmd in config.items(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running background command on role %s host %s', role, remote.name) if isinstance(cmd, list): diff --git a/teuthology/task/ceph_ansible.py b/teuthology/task/ceph_ansible.py index f8b3b07760..4446791f64 100644 --- a/teuthology/task/ceph_ansible.py +++ b/teuthology/task/ceph_ansible.py @@ -122,9 +122,9 @@ class CephAnsible(Task): ansible_loc = self.ctx.cluster.only('installer.0') (ceph_first_mon,) = self.ctx.cluster.only( misc.get_first_mon(self.ctx, - self.config)).remotes.iterkeys() + self.config)).remotes.keys() if ansible_loc.remotes: - (ceph_installer,) = ansible_loc.remotes.iterkeys() + (ceph_installer,) = ansible_loc.remotes.keys() else: ceph_installer = ceph_first_mon self.ceph_first_mon = ceph_first_mon @@ -502,7 +502,7 @@ class CephAnsible(Task): def fix_keyring_permission(self): clients_only = lambda role: role.startswith('client') - for client in self.cluster.only(clients_only).remotes.iterkeys(): + for client in self.cluster.only(clients_only).remotes.keys(): client.run(args=[ 'sudo', 'chmod', diff --git a/teuthology/task/clock.py b/teuthology/task/clock.py index 69474e9734..63f03e6808 100644 --- a/teuthology/task/clock.py +++ b/teuthology/task/clock.py @@ -30,7 +30,7 @@ def task(ctx, config): """ log.info('Syncing clocks and checking initial clock skew...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): rem.run( args = [ 'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'), @@ -56,7 +56,7 @@ def task(ctx, config): finally: log.info('Checking final clock skew...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): rem.run( args=[ 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), @@ -76,7 +76,7 @@ def check(ctx, config): :param config: Configuration """ log.info('Checking initial clock skew...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): rem.run( args=[ 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), @@ -91,7 +91,7 @@ def check(ctx, config): finally: log.info('Checking final clock skew...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): rem.run( args=[ 'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'), diff --git a/teuthology/task/exec.py b/teuthology/task/exec.py index 0f8308ab91..2d38fa3b8c 100644 --- a/teuthology/task/exec.py +++ b/teuthology/task/exec.py @@ -41,7 +41,7 @@ def task(ctx, config): config = dict((id_, a) for id_ in roles) for role, ls in config.items(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() log.info('Running commands on role %s host %s', role, remote.name) for c in ls: c.replace('$TESTDIR', testdir) diff --git a/teuthology/task/install/__init__.py b/teuthology/task/install/__init__.py index cc83252d71..1e6fd1bc64 100644 --- a/teuthology/task/install/__init__.py +++ b/teuthology/task/install/__init__.py @@ -76,13 +76,13 @@ def install_packages(ctx, pkgs, config): "rpm": rpm._update_package_list_and_install, } with parallel() as p: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): system_type = teuthology.get_system_type(remote) p.spawn( install_pkgs[system_type], ctx, remote, pkgs[system_type], config) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): # verifies that the install worked as expected verify_package_version(ctx, config, remote) @@ -100,7 +100,7 @@ def remove_packages(ctx, config, pkgs): "rpm": rpm._remove, } with parallel() as p: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): system_type = teuthology.get_system_type(remote) p.spawn(remove_pkgs[ system_type], ctx, config, remote, pkgs[system_type]) @@ -121,7 +121,7 @@ def remove_sources(ctx, config): project = config.get('project', 'ceph') log.info("Removing {proj} sources lists".format( proj=project)) - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): remove_fn = remove_sources_pkgs[remote.os.package_type] p.spawn(remove_fn, ctx, config, remote) @@ -267,7 +267,7 @@ def upgrade_remote_to_config(ctx, config): # build a normalized remote -> config dict remotes = {} if 'all' in config: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): remotes[remote] = config.get('all') else: for role in config.keys(): diff --git a/teuthology/task/install/redhat.py b/teuthology/task/install/redhat.py index 5ff061c1d0..4da3ad324b 100644 --- a/teuthology/task/install/redhat.py +++ b/teuthology/task/install/redhat.py @@ -63,7 +63,7 @@ def install(ctx, config): else: raise RuntimeError("Unsupported RH Ceph version %s", version) with parallel() as p: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.name == 'rhel': log.info("Installing on RHEL node: %s", remote.shortname) p.spawn(install_pkgs, ctx, remote, version, downstream_config) @@ -79,7 +79,7 @@ def install(ctx, config): log.info("Skipping uninstall of Ceph") else: with parallel() as p: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): p.spawn(uninstall_pkgs, ctx, remote, downstream_config) diff --git a/teuthology/task/install/util.py b/teuthology/task/install/util.py index 65d481e65f..64268221fd 100644 --- a/teuthology/task/install/util.py +++ b/teuthology/task/install/util.py @@ -75,7 +75,7 @@ def ship_utilities(ctx, config): ) as f: fn = os.path.join(testdir, 'valgrind.supp') filenames.append(fn) - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): teuthology.sudo_write_file( remote=rem, path=fn, @@ -93,7 +93,7 @@ def ship_utilities(ctx, config): dst = os.path.join(destdir, filename) filenames.append(dst) with open(src, 'rb') as f: - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): teuthology.sudo_write_file( remote=rem, path=dst, diff --git a/teuthology/task/internal/__init__.py b/teuthology/task/internal/__init__.py index cf255a199d..cb095dee28 100644 --- a/teuthology/task/internal/__init__.py +++ b/teuthology/task/internal/__init__.py @@ -140,7 +140,7 @@ def add_remotes(ctx, config): return remotes = [] machs = [] - for name in ctx.config['targets'].iterkeys(): + for name in ctx.config['targets'].keys(): machs.append(name) for t, key in ctx.config['targets'].items(): t = misc.canonicalize_hostname(t) @@ -167,7 +167,7 @@ def connect(ctx, config): Connect to all remotes in ctx.cluster """ log.info('Opening connections...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): log.debug('connecting to %s', rem.name) rem.connect() @@ -357,7 +357,7 @@ def archive(ctx, config): logdir = os.path.join(ctx.archive, 'remote') if (not os.path.exists(logdir)): os.mkdir(logdir) - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): path = os.path.join(logdir, rem.shortname) misc.pull_directory(rem, archive_dir, path) # Check for coredumps and pull binaries @@ -442,7 +442,7 @@ def coredump(ctx, config): # set status = 'fail' if the dir is still there = coredumps were # seen - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): r = rem.run( args=[ 'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then', diff --git a/teuthology/task/internal/check_lock.py b/teuthology/task/internal/check_lock.py index ec01deaa82..152e41c2d9 100644 --- a/teuthology/task/internal/check_lock.py +++ b/teuthology/task/internal/check_lock.py @@ -16,7 +16,7 @@ def check_lock(ctx, config, check_up=True): log.info('Lock checking disabled.') return log.info('Checking locks...') - for machine in ctx.config['targets'].iterkeys(): + for machine in ctx.config['targets'].keys(): status = teuthology.lock.query.get_status(machine) log.debug('machine status is %s', repr(status)) assert status is not None, \ diff --git a/teuthology/task/internal/lock_machines.py b/teuthology/task/internal/lock_machines.py index 96b5eec632..1dc2e0a3b2 100644 --- a/teuthology/task/internal/lock_machines.py +++ b/teuthology/task/internal/lock_machines.py @@ -118,7 +118,7 @@ def lock_machines(ctx, config): if teuthology.lock.keys.do_update_keys(keys_dict)[0]: log.info("Error in virtual machine keys") newscandict = {} - for dkey in all_locked.iterkeys(): + for dkey in all_locked.keys(): stats = teuthology.lock.query.get_status(dkey) newscandict[dkey] = stats['ssh_pub_key'] ctx.config['targets'] = newscandict @@ -156,5 +156,5 @@ def lock_machines(ctx, config): ) if get_status(ctx.summary) == 'pass' or unlock_on_failure: log.info('Unlocking machines...') - for machine in ctx.config['targets'].iterkeys(): + for machine in ctx.config['targets'].keys(): teuthology.lock.ops.unlock_one(ctx, machine, ctx.owner, ctx.archive) diff --git a/teuthology/task/internal/redhat.py b/teuthology/task/internal/redhat.py index f79b9fbce8..430fff1e7f 100644 --- a/teuthology/task/internal/redhat.py +++ b/teuthology/task/internal/redhat.py @@ -37,7 +37,7 @@ def setup_additional_repo(ctx, config): """ if ctx.config.get('redhat').get('set-add-repo', None): add_repo = ctx.config.get('redhat').get('set-add-repo') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type == 'rpm': remote.run(args=['sudo', 'wget', '-O', '/etc/yum.repos.d/rh_add.repo', add_repo]) @@ -69,7 +69,7 @@ def setup_base_repo(ctx, config): yield finally: log.info("Cleaning up repo's") - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type == 'rpm': remote.run(args=['sudo', 'rm', run.Raw('/etc/yum.repos.d/rh*.repo'), @@ -81,7 +81,7 @@ def _setup_latest_repo(ctx, config): Setup repo based on redhat nodes """ with parallel(): - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): if remote.os.package_type == 'rpm': remote.run(args=['sudo', 'subscription-manager', 'repos', run.Raw('--disable=*ceph*')]) diff --git a/teuthology/task/internal/syslog.py b/teuthology/task/internal/syslog.py index bf722b8193..8908b4c184 100644 --- a/teuthology/task/internal/syslog.py +++ b/teuthology/task/internal/syslog.py @@ -43,7 +43,7 @@ def syslog(ctx, config): ] conf_fp = StringIO('\n'.join(conf_lines)) try: - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): log_context = 'system_u:object_r:var_log_t:s0' for log_path in (kern_log, misc_log): rem.run(args=['install', '-m', '666', '/dev/null', log_path]) @@ -93,7 +93,7 @@ def syslog(ctx, config): # flush the file fully. oh well. log.info('Checking logs for errors...') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): log.debug('Checking %s', rem.name) r = rem.run( args=[ diff --git a/teuthology/task/internal/vm_setup.py b/teuthology/task/internal/vm_setup.py index c3e913bf84..27ecd8b7e6 100644 --- a/teuthology/task/internal/vm_setup.py +++ b/teuthology/task/internal/vm_setup.py @@ -21,7 +21,7 @@ def vm_setup(ctx, config): ansible_hosts = set() with parallel(): editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh') - for rem in ctx.cluster.remotes.iterkeys(): + for rem in ctx.cluster.remotes.keys(): if rem.is_vm: ansible_hosts.add(rem.shortname) r = rem.run(args=['test', '-e', '/ceph-qa-ready'], diff --git a/teuthology/task/kernel.py b/teuthology/task/kernel.py index ffb236fdb7..1ff1a3b010 100644 --- a/teuthology/task/kernel.py +++ b/teuthology/task/kernel.py @@ -219,7 +219,7 @@ def install_firmware(ctx, config): uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream fw_dir = '/lib/firmware/updates' - for role in config.iterkeys(): + for role in config.keys(): if isinstance(config[role], str) and config[role].find('distro') >= 0: log.info('Skipping firmware on distro kernel'); return @@ -452,7 +452,7 @@ def install_latest_rh_kernel(ctx, config): if config.get('skip'): return with parallel() as p: - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): p.spawn(update_rh_kernel, remote) diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index 0c74e83581..a52b0ffb3e 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -80,7 +80,7 @@ def task(ctx, config): files = set(files) lock_procs = list() for client in clients: - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + (client_remote,) = ctx.cluster.only(client).remotes.keys() log.info("got a client remote") (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) @@ -113,7 +113,7 @@ def task(ctx, config): # create the files to run these locks on client = clients.pop() clients.add(client) - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + (client_remote,) = ctx.cluster.only(client).remotes.keys() (_, _, client_id) = client.partition('.') file_procs = list() for lockfile in files: @@ -170,7 +170,7 @@ def task(ctx, config): greenlet.kill(block=True) for client in clients: - (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() + (client_remote,) = ctx.cluster.only(client).remotes.keys() (_, _, client_id) = client.partition('.') filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) proc = client_remote.run( @@ -192,7 +192,7 @@ def lock_one(op, ctx): timeout = None proc = None result = None - (client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys() + (client_remote,) = ctx.cluster.only(op['client']).remotes.keys() (_, _, client_id) = op['client'].partition('.') testdir = teuthology.get_testdir(ctx) filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) diff --git a/teuthology/task/mpi.py b/teuthology/task/mpi.py index e92d939033..4a752b1213 100644 --- a/teuthology/task/mpi.py +++ b/teuthology/task/mpi.py @@ -94,23 +94,23 @@ def task(ctx, config): if 'nodes' in config: if isinstance(config['nodes'], basestring) and config['nodes'] == 'all': for role in teuthology.all_roles(ctx.cluster): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) - (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys() + (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys() elif isinstance(config['nodes'], list): for role in config['nodes']: - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) - (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys() + (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys() else: roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] - (master_remote,) = ctx.cluster.only(roles[0]).remotes.iterkeys() + (master_remote,) = ctx.cluster.only(roles[0]).remotes.keys() for role in roles: - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() ip,port = remote.ssh.get_transport().getpeername() hosts.append(ip) remotes.append(remote) diff --git a/teuthology/task/parallel_example.py b/teuthology/task/parallel_example.py index c1915201e8..eb9659a81d 100644 --- a/teuthology/task/parallel_example.py +++ b/teuthology/task/parallel_example.py @@ -27,7 +27,7 @@ def parallel_test(ctx, config): log.info('Executing command on all hosts concurrently with role "%s"' % role) cluster = ctx.cluster.only(role) nodes = {} - for remote in cluster.remotes.iterkeys(): + for remote in cluster.remotes.keys(): """Call run for each remote host, but use 'wait=False' to have it return immediately.""" proc = remote.run(args=['sleep', '5', run.Raw(';'), 'date', run.Raw(';'), 'hostname'], wait=False,) nodes[remote.name] = proc diff --git a/teuthology/task/pexec.py b/teuthology/task/pexec.py index 573866c926..4d18d27193 100644 --- a/teuthology/task/pexec.py +++ b/teuthology/task/pexec.py @@ -63,20 +63,20 @@ def _generate_remotes(ctx, config): """Return remote roles and the type of role specified in config""" if 'all' in config and len(config) == 1: ls = config['all'] - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): yield (remote, ls) elif 'clients' in config: ls = config['clients'] for role in teuthology.all_roles_of_type(ctx.cluster, 'client'): - (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys() + (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.keys() yield (remote, ls) del config['clients'] for role, ls in config.items(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() yield (remote, ls) else: for role, ls in config.items(): - (remote,) = ctx.cluster.only(role).remotes.iterkeys() + (remote,) = ctx.cluster.only(role).remotes.keys() yield (remote, ls) def task(ctx, config): diff --git a/teuthology/task/selinux.py b/teuthology/task/selinux.py index 7fb7992746..36d5d06385 100644 --- a/teuthology/task/selinux.py +++ b/teuthology/task/selinux.py @@ -79,7 +79,7 @@ class SELinux(Task): log.debug("Getting current SELinux state") modes = dict() - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): result = remote.run( args=['/usr/sbin/getenforce'], stdout=StringIO(), @@ -93,7 +93,7 @@ class SELinux(Task): Set the requested SELinux mode """ log.info("Putting SELinux into %s mode", self.mode) - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): mode = self.old_modes[remote.name] if mode == "Disabled" or mode == "disabled": continue @@ -127,7 +127,7 @@ class SELinux(Task): if se_whitelist: known_denials.extend(se_whitelist) ignore_known_denials = '\'\(' + str.join('\|', known_denials) + '\)\'' - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): proc = remote.run( args=['sudo', 'grep', 'avc: .*denied', '/var/log/audit/audit.log', run.Raw('|'), 'grep', '-v', @@ -157,7 +157,7 @@ class SELinux(Task): if not set(self.old_modes.values()).difference(set([self.mode])): return log.info("Restoring old SELinux modes") - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): mode = self.old_modes[remote.name] if mode == "Disabled" or mode == "disabled": continue @@ -186,7 +186,7 @@ class SELinux(Task): """ all_denials = self.get_denials() new_denials = dict() - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): old_host_denials = self.old_denials[remote.name] all_host_denials = all_denials[remote.name] new_host_denials = set(all_host_denials).difference( @@ -194,7 +194,7 @@ class SELinux(Task): ) new_denials[remote.name] = list(new_host_denials) - for remote in self.cluster.remotes.iterkeys(): + for remote in self.cluster.remotes.keys(): if len(new_denials[remote.name]): raise SELinuxError(node=remote, denials=new_denials[remote.name]) diff --git a/teuthology/task/ssh_keys.py b/teuthology/task/ssh_keys.py index 173113c955..ee49074d5f 100644 --- a/teuthology/task/ssh_keys.py +++ b/teuthology/task/ssh_keys.py @@ -134,7 +134,7 @@ def push_keys_to_host(ctx, config, public_key, private_key): # add an entry for all hosts in ctx to auth_keys_data auth_keys_data = '' - for inner_host in ctx.cluster.remotes.iterkeys(): + for inner_host in ctx.cluster.remotes.keys(): inner_username, inner_hostname = str(inner_host).split('@') # create a 'user@hostname' string using our fake hostname fake_hostname = '{user}@{host}'.format(user=ssh_keys_user, host=str(inner_hostname)) diff --git a/teuthology/task/tests/test_locking.py b/teuthology/task/tests/test_locking.py index 898d60f39c..05b0f45ad3 100644 --- a/teuthology/task/tests/test_locking.py +++ b/teuthology/task/tests/test_locking.py @@ -7,7 +7,7 @@ class TestLocking(object): os_type = ctx.config.get("os_type") if os_type is None: pytest.skip('os_type was not defined') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): assert remote.os.name == os_type def test_correct_os_version(self, ctx, config): @@ -16,10 +16,10 @@ class TestLocking(object): pytest.skip('os_version was not defined') if ctx.config.get("os_type") == "debian": pytest.skip('known issue with debian versions; see: issue #10878') - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): assert remote.inventory_info['os_version'] == os_version def test_correct_machine_type(self, ctx, config): machine_type = ctx.machine_type - for remote in ctx.cluster.remotes.iterkeys(): + for remote in ctx.cluster.remotes.keys(): assert remote.machine_type in machine_type -- 2.39.5