g = yaml.safe_load_all(f)
for new in g:
if 'targets' in new:
- for t in new['targets'].iterkeys():
+ for t in new['targets'].keys():
machines.append(t)
except IOError as e:
raise argparse.ArgumentTypeError(str(e))
with open(targets) as f:
docs = yaml.safe_load_all(f)
for doc in docs:
- machines = [n for n in doc.get('targets', dict()).iterkeys()]
+ machines = [n for n in doc.get('targets', dict()).keys()]
return keys.do_update_keys(machines, all_)[0]
assert isinstance(role, basestring)
assert 'client.' in role
_, _, id_ = split_role(role)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
yield (id_, remote)
def reset_syslog_dir(ctx):
log.info('Resetting syslog output locations...')
nodes = {}
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
proc = remote.run(
args=[
'if', 'test', '-e', '/etc/rsyslog.d/80-cephtest.conf',
def dpkg_configure(ctx):
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type != 'deb':
continue
log.info(
def remove_yum_timedhosts(ctx):
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1233329
log.info("Removing yum timedhosts files...")
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type != 'rpm':
continue
remote.run(
'ceph-deploy', 'libapache2-mod-fastcgi'
]
pkgs = str.join(' ', ceph_packages_to_remove)
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
log.info("Remove any broken repos")
remote.run(
come back unless specifically requested by the test.
"""
log.info('Removing any multipath config/pkgs...')
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
remote.run(
args=[
'sudo', 'multipath', '-F',
Returns a list of `RemoteProcess`.
"""
- remotes = sorted(self.remotes.iterkeys(), key=lambda rem: rem.name)
+ remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name)
return [remote.run(**kwargs) for remote in remotes]
def write_file(self, file_name, content, sudo=False, perms=None, owner=None):
:param sudo: use sudo
:param perms: file permissions (passed to chmod) ONLY if sudo is True
"""
- remotes = sorted(self.remotes.iterkeys(), key=lambda rem: rem.name)
+ remotes = sorted(self.remotes.keys(), key=lambda rem: rem.name)
for remote in remotes:
if sudo:
teuthology.misc.sudo_write_file(remote, file_name, content, perms=perms, owner=owner)
tasks = {}
for role, cmd in config.items():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running background command on role %s host %s', role,
remote.name)
if isinstance(cmd, list):
ansible_loc = self.ctx.cluster.only('installer.0')
(ceph_first_mon,) = self.ctx.cluster.only(
misc.get_first_mon(self.ctx,
- self.config)).remotes.iterkeys()
+ self.config)).remotes.keys()
if ansible_loc.remotes:
- (ceph_installer,) = ansible_loc.remotes.iterkeys()
+ (ceph_installer,) = ansible_loc.remotes.keys()
else:
ceph_installer = ceph_first_mon
self.ceph_first_mon = ceph_first_mon
def fix_keyring_permission(self):
clients_only = lambda role: role.startswith('client')
- for client in self.cluster.only(clients_only).remotes.iterkeys():
+ for client in self.cluster.only(clients_only).remotes.keys():
client.run(args=[
'sudo',
'chmod',
"""
log.info('Syncing clocks and checking initial clock skew...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
rem.run(
args = [
'sudo', 'systemctl', 'stop', 'ntp.service', run.Raw('||'),
finally:
log.info('Checking final clock skew...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
:param config: Configuration
"""
log.info('Checking initial clock skew...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
finally:
log.info('Checking final clock skew...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
rem.run(
args=[
'PATH=/usr/bin:/usr/sbin', 'ntpq', '-p', run.Raw('||'),
config = dict((id_, a) for id_ in roles)
for role, ls in config.items():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
c.replace('$TESTDIR', testdir)
"rpm": rpm._update_package_list_and_install,
}
with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
system_type = teuthology.get_system_type(remote)
p.spawn(
install_pkgs[system_type],
ctx, remote, pkgs[system_type], config)
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
# verifies that the install worked as expected
verify_package_version(ctx, config, remote)
"rpm": rpm._remove,
}
with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
system_type = teuthology.get_system_type(remote)
p.spawn(remove_pkgs[
system_type], ctx, config, remote, pkgs[system_type])
project = config.get('project', 'ceph')
log.info("Removing {proj} sources lists".format(
proj=project))
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
remove_fn = remove_sources_pkgs[remote.os.package_type]
p.spawn(remove_fn, ctx, config, remote)
# build a normalized remote -> config dict
remotes = {}
if 'all' in config:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
remotes[remote] = config.get('all')
else:
for role in config.keys():
else:
raise RuntimeError("Unsupported RH Ceph version %s", version)
with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.name == 'rhel':
log.info("Installing on RHEL node: %s", remote.shortname)
p.spawn(install_pkgs, ctx, remote, version, downstream_config)
log.info("Skipping uninstall of Ceph")
else:
with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
p.spawn(uninstall_pkgs, ctx, remote, downstream_config)
) as f:
fn = os.path.join(testdir, 'valgrind.supp')
filenames.append(fn)
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
teuthology.sudo_write_file(
remote=rem,
path=fn,
dst = os.path.join(destdir, filename)
filenames.append(dst)
with open(src, 'rb') as f:
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
teuthology.sudo_write_file(
remote=rem,
path=dst,
return
remotes = []
machs = []
- for name in ctx.config['targets'].iterkeys():
+ for name in ctx.config['targets'].keys():
machs.append(name)
for t, key in ctx.config['targets'].items():
t = misc.canonicalize_hostname(t)
Connect to all remotes in ctx.cluster
"""
log.info('Opening connections...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
log.debug('connecting to %s', rem.name)
rem.connect()
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
# Check for coredumps and pull binaries
# set status = 'fail' if the dir is still there = coredumps were
# seen
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
log.info('Lock checking disabled.')
return
log.info('Checking locks...')
- for machine in ctx.config['targets'].iterkeys():
+ for machine in ctx.config['targets'].keys():
status = teuthology.lock.query.get_status(machine)
log.debug('machine status is %s', repr(status))
assert status is not None, \
if teuthology.lock.keys.do_update_keys(keys_dict)[0]:
log.info("Error in virtual machine keys")
newscandict = {}
- for dkey in all_locked.iterkeys():
+ for dkey in all_locked.keys():
stats = teuthology.lock.query.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
)
if get_status(ctx.summary) == 'pass' or unlock_on_failure:
log.info('Unlocking machines...')
- for machine in ctx.config['targets'].iterkeys():
+ for machine in ctx.config['targets'].keys():
teuthology.lock.ops.unlock_one(ctx, machine, ctx.owner, ctx.archive)
"""
if ctx.config.get('redhat').get('set-add-repo', None):
add_repo = ctx.config.get('redhat').get('set-add-repo')
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
remote.run(args=['sudo', 'wget', '-O', '/etc/yum.repos.d/rh_add.repo',
add_repo])
yield
finally:
log.info("Cleaning up repo's")
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
remote.run(args=['sudo', 'rm',
run.Raw('/etc/yum.repos.d/rh*.repo'),
Setup repo based on redhat nodes
"""
with parallel():
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
if remote.os.package_type == 'rpm':
remote.run(args=['sudo', 'subscription-manager', 'repos',
run.Raw('--disable=*ceph*')])
]
conf_fp = StringIO('\n'.join(conf_lines))
try:
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
log_context = 'system_u:object_r:var_log_t:s0'
for log_path in (kern_log, misc_log):
rem.run(args=['install', '-m', '666', '/dev/null', log_path])
# flush the file fully. oh well.
log.info('Checking logs for errors...')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
ansible_hosts = set()
with parallel():
editinfo = os.path.join(os.path.dirname(__file__), 'edit_sudoers.sh')
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
if rem.is_vm:
ansible_hosts.add(rem.shortname)
r = rem.run(args=['test', '-e', '/ceph-qa-ready'],
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
- for role in config.iterkeys():
+ for role in config.keys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
if config.get('skip'):
return
with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
p.spawn(update_rh_kernel, remote)
files = set(files)
lock_procs = list()
for client in clients:
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (client_remote,) = ctx.cluster.only(client).remotes.keys()
log.info("got a client remote")
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
# create the files to run these locks on
client = clients.pop()
clients.add(client)
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (client_remote,) = ctx.cluster.only(client).remotes.keys()
(_, _, client_id) = client.partition('.')
file_procs = list()
for lockfile in files:
greenlet.kill(block=True)
for client in clients:
- (client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (client_remote,) = ctx.cluster.only(client).remotes.keys()
(_, _, client_id) = client.partition('.')
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
timeout = None
proc = None
result = None
- (client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys()
+ (client_remote,) = ctx.cluster.only(op['client']).remotes.keys()
(_, _, client_id) = op['client'].partition('.')
testdir = teuthology.get_testdir(ctx)
filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
if 'nodes' in config:
if isinstance(config['nodes'], basestring) and config['nodes'] == 'all':
for role in teuthology.all_roles(ctx.cluster):
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
- (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
+ (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
elif isinstance(config['nodes'], list):
for role in config['nodes']:
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
- (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.iterkeys()
+ (master_remote,) = ctx.cluster.only(config['nodes'][0]).remotes.keys()
else:
roles = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- (master_remote,) = ctx.cluster.only(roles[0]).remotes.iterkeys()
+ (master_remote,) = ctx.cluster.only(roles[0]).remotes.keys()
for role in roles:
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
ip,port = remote.ssh.get_transport().getpeername()
hosts.append(ip)
remotes.append(remote)
log.info('Executing command on all hosts concurrently with role "%s"' % role)
cluster = ctx.cluster.only(role)
nodes = {}
- for remote in cluster.remotes.iterkeys():
+ for remote in cluster.remotes.keys():
"""Call run for each remote host, but use 'wait=False' to have it return immediately."""
proc = remote.run(args=['sleep', '5', run.Raw(';'), 'date', run.Raw(';'), 'hostname'], wait=False,)
nodes[remote.name] = proc
"""Return remote roles and the type of role specified in config"""
if 'all' in config and len(config) == 1:
ls = config['all']
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
yield (remote, ls)
elif 'clients' in config:
ls = config['clients']
for role in teuthology.all_roles_of_type(ctx.cluster, 'client'):
- (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys()
+ (remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.keys()
yield (remote, ls)
del config['clients']
for role, ls in config.items():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
yield (remote, ls)
else:
for role, ls in config.items():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
yield (remote, ls)
def task(ctx, config):
log.debug("Getting current SELinux state")
modes = dict()
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
result = remote.run(
args=['/usr/sbin/getenforce'],
stdout=StringIO(),
Set the requested SELinux mode
"""
log.info("Putting SELinux into %s mode", self.mode)
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
mode = self.old_modes[remote.name]
if mode == "Disabled" or mode == "disabled":
continue
if se_whitelist:
known_denials.extend(se_whitelist)
ignore_known_denials = '\'\(' + str.join('\|', known_denials) + '\)\''
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
proc = remote.run(
args=['sudo', 'grep', 'avc: .*denied',
'/var/log/audit/audit.log', run.Raw('|'), 'grep', '-v',
if not set(self.old_modes.values()).difference(set([self.mode])):
return
log.info("Restoring old SELinux modes")
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
mode = self.old_modes[remote.name]
if mode == "Disabled" or mode == "disabled":
continue
"""
all_denials = self.get_denials()
new_denials = dict()
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
old_host_denials = self.old_denials[remote.name]
all_host_denials = all_denials[remote.name]
new_host_denials = set(all_host_denials).difference(
)
new_denials[remote.name] = list(new_host_denials)
- for remote in self.cluster.remotes.iterkeys():
+ for remote in self.cluster.remotes.keys():
if len(new_denials[remote.name]):
raise SELinuxError(node=remote,
denials=new_denials[remote.name])
# add an entry for all hosts in ctx to auth_keys_data
auth_keys_data = ''
- for inner_host in ctx.cluster.remotes.iterkeys():
+ for inner_host in ctx.cluster.remotes.keys():
inner_username, inner_hostname = str(inner_host).split('@')
# create a 'user@hostname' string using our fake hostname
fake_hostname = '{user}@{host}'.format(user=ssh_keys_user, host=str(inner_hostname))
os_type = ctx.config.get("os_type")
if os_type is None:
pytest.skip('os_type was not defined')
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
assert remote.os.name == os_type
def test_correct_os_version(self, ctx, config):
pytest.skip('os_version was not defined')
if ctx.config.get("os_type") == "debian":
pytest.skip('known issue with debian versions; see: issue #10878')
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
assert remote.inventory_info['os_version'] == os_version
def test_correct_machine_type(self, ctx, config):
machine_type = ctx.machine_type
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
assert remote.machine_type in machine_type