correctly.
"""
result = dict()
- for key, value in config_dict.iteritems():
+ for key, value in config_dict.items():
new_key = key
if new_key.startswith("--"):
new_key = new_key[2:]
def store_coverage(test_coverage, rev, suite):
with closing(connect_to_db()) as db:
rows = []
- for test, coverage in test_coverage.iteritems():
+ for test, coverage in test_coverage.items():
flattened_cov = [item for sublist in coverage for item in sublist]
rows.append([rev, test, suite] + flattened_cov)
log.debug('inserting rows into db: %s', str(rows))
)
test_coverage = {}
- for test, summary in test_summaries.iteritems():
+ for test, summary in test_summaries.items():
lcov_file = '{name}.lcov'.format(name=test)
log.info('analyzing coverage for %s', test)
job_num = 0
jobs = serializer.jobs_for_run(run_name)
job_total = len(jobs)
- for (job_id, job_dir) in jobs.iteritems():
+ for (job_id, job_dir) in jobs.items():
if not os.path.isdir(job_dir):
continue
job_num += 1
lockd[who][1] += 1 if l['up'] else 0
lockd[who][2] = l['machine_type']
- locks = sorted([p for p in lockd.iteritems()
+ locks = sorted([p for p in lockd.items()
], key=lambda sort: (sort[1][2], sort[1][0]))
total_count, total_up = 0, 0
print "TYPE COUNT UP OWNER"
def push_new_keys(keys_dict, reference):
ret = 0
- for hostname, pubkey in keys_dict.iteritems():
+ for hostname, pubkey in keys_dict.items():
log.info('Checking %s', hostname)
if reference[hostname]['ssh_pub_key'] != pubkey:
log.info('New key found. Updating...')
def list_locks(keyed_by_name=False, **kwargs):
uri = os.path.join(config.lock_server, 'nodes', '')
- for key, value in kwargs.iteritems():
+ for key, value in kwargs.items():
if kwargs[key] is False:
kwargs[key] = '0'
if kwargs[key] is True:
return_statuses = list()
for status in statuses:
- for k, v in query.iteritems():
+ for k, v in query.items():
if not misc.is_in_dict(k, v, status):
break
else:
mons = get_mons(roles=roles, ips=ips,
mon_bind_msgr2=mon_bind_msgr2,
mon_bind_addrvec=mon_bind_addrvec)
- for role, addr in mons.iteritems():
+ for role, addr in mons.items():
mon_cluster, _, _ = split_role(role)
if mon_cluster != cluster:
continue
:param cluster: Cluster extracted from the ctx.
"""
- for _, roles_for_host in cluster.remotes.iteritems():
+ for _, roles_for_host in cluster.remotes.items():
for name in roles_for_host:
yield name
:param cluster: Cluster extracted from the ctx.
:param type_: role type
"""
- for _, roles_for_host in cluster.remotes.iteritems():
+ for _, roles_for_host in cluster.remotes.items():
for id_ in roles_of_type(roles_for_host, type_):
yield id_
Each invocation returns the next monitor address
"""
- for section, data in conf.iteritems():
+ for section, data in conf.items():
PREFIX = 'mon.'
if not section.startswith(PREFIX):
continue
return a
if isinstance(a, dict):
assert isinstance(b, dict)
- for (k, v) in b.iteritems():
+ for (k, v) in b.items():
if k in a:
a[k] = deep_merge(a[k], v)
else:
"""
val = d.get(searchkey, None)
if isinstance(val, dict) and isinstance(searchval, dict):
- for foundkey, foundval in searchval.iteritems():
+ for foundkey, foundval in searchval.items():
if not is_in_dict(foundkey, foundval, val):
return False
return True
def stale_openstack_instances(ctx, instances, locked_nodes):
- for (instance_id, instance) in instances.iteritems():
+ for (instance_id, instance) in instances.items():
i = OpenStackInstance(instance_id)
if not i.exists():
log.debug("stale-openstack: {instance} disappeared, ignored"
def stale_openstack_nodes(ctx, instances, locked_nodes):
names = set([ i['Name'] for i in instances.values() ])
- for (name, node) in locked_nodes.iteritems():
+ for (name, node) in locked_nodes.items():
name = decanonicalize_hostname(name)
if node['machine_type'] != 'openstack':
continue
"Not nuking %s because description doesn't match",
lock['name'])
with parallel() as p:
- for target, hostkey in ctx.config['targets'].iteritems():
+ for target, hostkey in ctx.config['targets'].items():
p.spawn(
nuke_one,
ctx,
)
nodes[remote.name] = proc
- for name, proc in nodes.iteritems():
+ for name, proc in nodes.items():
log.info('Waiting for %s to restart syslog...', name)
proc.wait()
if info is None:
self.set_info()
else:
- self.info = dict(map(lambda (k,v): (k.lower(), v), info.iteritems()))
+ self.info = dict(map(lambda (k,v): (k.lower(), v), info.items()))
def set_info(self):
try:
if resource in hint:
new = hint[resource]
current = result[resource]
- for key, value in hint[resource].iteritems():
+ for key, value in hint[resource].items():
current[key] = max(current[key], new[key])
return result
'../..', self.user_data)
template = open(user_data).read()
openrc = ''
- for (var, value) in os.environ.iteritems():
+ for (var, value) in os.environ.items():
if var in ('OS_TOKEN_VALUE', 'OS_TOKEN_EXPIRES'):
continue
if var.startswith('OS_'):
}
os.environ['OS_REGION_NAME'] = 'REGION'
os.environ['OS_TENANT_ID'] = 'TENANT'
- for (type, cmds) in type2cmd.iteritems():
+ for (type, cmds) in type2cmd.items():
for cmd in cmds:
assert ("//" + type) in o.get_os_url(cmd + " ")
for type in type2cmd.keys():
want = frozenset(r for r in roles if not callable(r))
matchers = [r for r in roles if callable(r)]
- for remote, has_roles in self.remotes.iteritems():
+ for remote, has_roles in self.remotes.items():
# strings given as roles must all match
if frozenset(has_roles) & want != want:
# not a match
"""
matches = self.only(*roles)
c = self.__class__()
- for remote, has_roles in self.remotes.iteritems():
+ for remote, has_roles in self.remotes.items():
if remote not in matches.remotes:
c.add(remote, has_roles)
return c
"""
Run all the patch_* functions in this module.
"""
- monkeys = [(k, v) for (k, v) in globals().iteritems() if k.startswith('patch_') and k != 'patch_all']
+ monkeys = [(k, v) for (k, v) in globals().items() if k.startswith('patch_') and k != 'patch_all']
monkeys.sort()
for k, v in monkeys:
log.debug('Patching %s', k)
@staticmethod
def _version_to_codename(name, version):
- for (_version, codename) in DISTRO_CODENAME_MAP[name].iteritems():
+ for (_version, codename) in DISTRO_CODENAME_MAP[name].items():
if str(version) == _version or str(version).split('.')[0] == _version:
return codename
@staticmethod
def _codename_to_version(name, codename):
- for (version, _codename) in DISTRO_CODENAME_MAP[name].iteritems():
+ for (version, _codename) in DISTRO_CODENAME_MAP[name].items():
if codename == _codename:
return version
raise RuntimeError("No version found for %s %s !" % (
if (should_preserve(item) or not os.path.isdir(item) or not
is_old_enough(item, days)):
continue
- for (subdir, description) in subdirs.iteritems():
+ for (subdir, description) in subdirs.items():
_maybe_remove_subdir(item, subdir, days, description, dry_run)
and returns it as a string.
"""
cmd = ["teuthology"]
- for key, value in args.iteritems():
+ for key, value in args.items():
if value:
# an option, not an argument
if not key.startswith("<"):
try:
for taskdict in tasks:
try:
- ((taskname, config),) = taskdict.iteritems()
+ ((taskname, config),) = taskdict.items()
except (ValueError, AttributeError):
raise RuntimeError('Invalid task definition: %s' % taskdict)
log.info('Running task %s...', taskname)
'<config_yaml>': 'base_yaml_paths',
'filter': 'filter_in',
}
- for (key, value) in args.iteritems():
+ for (key, value) in args.items():
# Translate --foo-bar to foo_bar
key = key.lstrip('--').replace('-', '_')
# Rename the key if necessary
@staticmethod
def verify_facets(tree, description_list, subset, mat, first, matlimit):
def flatten(tree):
- for k,v in tree.iteritems():
+ for k,v in tree.items():
if v is None and '.yaml' in k:
yield k
elif v is not None and '.disable' not in k:
def pptree(tree, tabs=0):
ret = ""
- for k, v in tree.iteritems():
+ for k, v in tree.items():
if v is None:
ret += ('\t'*tabs) + k.ljust(10) + "\n"
else:
for host_spec in host_specs:
role_matches = self.ctx.cluster.only(host_spec)
if len(role_matches.remotes) > 0:
- for (remote, roles) in role_matches.remotes.iteritems():
+ for (remote, roles) in role_matches.remotes.items():
cluster.add(remote, roles)
elif isinstance(host_spec, basestring):
- for (remote, roles) in self.ctx.cluster.remotes.iteritems():
+ for (remote, roles) in self.ctx.cluster.remotes.items():
if remote.name.split('@')[-1] == host_spec or \
remote.shortname == host_spec:
cluster.add(remote, roles)
testdir = misc.get_testdir(ctx)
tasks = {}
- for role, cmd in config.iteritems():
+ for role, cmd in config.items():
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
log.info('Running background command on role %s host %s', role,
remote.name)
yield
finally:
- for name, task in tasks.iteritems():
+ for name, task in tasks.items():
log.info('Stopping background command on %s', name)
task.stdin.close()
run.wait(tasks.itervalues())
for group in sorted(self.groups_to_roles.keys()):
role_prefix = self.groups_to_roles[group]
want = lambda role: role.startswith(role_prefix)
- for (remote, roles) in self.cluster.only(want).remotes.iteritems():
+ for (remote, roles) in self.cluster.only(want).remotes.items():
hostname = remote.hostname
host_vars = self.get_host_vars(remote)
if group not in hosts_dict:
if group not in hosts_dict:
hosts_dict[group] = dict(hosts=dict())
group_dict = hosts_dict[group]['hosts']
- for (remote, roles) in self.cluster.only(want).remotes.iteritems():
+ for (remote, roles) in self.cluster.only(want).remotes.items():
hostname = remote.hostname
group_dict[hostname] = dict(
ansible_user=remote.user,
if not hasattr(self.ctx, 'cluster'):
return
new_cluster = Cluster()
- for (remote, roles) in self.cluster.remotes.iteritems():
+ for (remote, roles) in self.cluster.remotes.items():
if not hasattr(remote.console, 'spawn_sol_log'):
log.debug("%s does not support IPMI; excluding",
remote.shortname)
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles)
- for role, ls in config.iteritems():
+ for role, ls in config.items():
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
for entry in config:
if not isinstance(entry, dict):
entry = ctx.config.get(entry, {})
- ((taskname, confg),) = entry.iteritems()
+ ((taskname, confg),) = entry.items()
log.info('In full_sequential, running task %s...' % taskname)
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
if hasattr(mgr, '__enter__'):
for entry in config:
if not isinstance(entry, dict):
entry = ctx.config.get(entry, {})
- ((taskname, confg),) = entry.iteritems()
+ ((taskname, confg),) = entry.items()
log.info('In full_sequential_finally, running task %s...' % taskname)
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
if hasattr(mgr, '__enter__'):
def dict_to_hadoop_conf(items):
out = "<configuration>\n"
- for key, value in items.iteritems():
+ for key, value in items.items():
out += " <property>\n"
out += " <name>" + key + "</name>\n"
out += " <value>" + value + "</value>\n"
remotes[remote] = config.get(role)
result = {}
- for remote, node in remotes.iteritems():
+ for remote, node in remotes.items():
if not node:
node = {}
extra_pkgs = config.get('extra_packages', [])
log.info('extra packages: {packages}'.format(packages=extra_pkgs))
- for remote, node in remotes.iteritems():
+ for remote, node in remotes.items():
system_type = teuthology.get_system_type(remote)
assert system_type in ('deb', 'rpm')
machs = []
for name in ctx.config['targets'].iterkeys():
machs.append(name)
- for t, key in ctx.config['targets'].iteritems():
+ for t, key in ctx.config['targets'].items():
t = misc.canonicalize_hostname(t)
try:
if ctx.config['sshkeys'] == 'ignore':
with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
- info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.iteritems()])
+ info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.items()])
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
return new_config
new_config = {}
- for role, role_config in config.iteritems():
+ for role, role_config in config.items():
if role_config is None:
role_config = CONFIG_DEFAULT
if '.' in role:
# (e.g. 'branch: foo' is overridden with 'tag: bar'). To be able to
# use deep_merge(), drop all version keys from the original config if
# the corresponding override has a version key.
- for role, role_config in config.iteritems():
+ for role, role_config in config.items():
if (role in overrides and
any(k in overrides[role] for k in VERSION_KEYS)):
for k in VERSION_KEYS:
:param ctx: Context
:param config: Configuration
"""
- for _, roles_for_host in ctx.cluster.remotes.iteritems():
+ for _, roles_for_host in ctx.cluster.remotes.items():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
:param config: Configuration
"""
procs = {}
- for role, src in config.iteritems():
+ for role, src in config.items():
needs_download = False
if src == 'distro':
wait=False)
procs[role_remote.name] = proc
- for name, proc in procs.iteritems():
+ for name, proc in procs.items():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
"""
procs = {}
kernel_title = ''
- for role, src in config.iteritems():
+ for role, src in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
)
procs[role_remote.name] = proc
- for name, proc in procs.iteritems():
+ for name, proc in procs.items():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
:param ctx: Context
:param config: Configuration
"""
- for role, enable in config.iteritems():
+ for role, enable in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
remove_old_kernels(ctx)
- for role, role_config in config.iteritems():
+ for role, role_config in config.items():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type = role_remote.os.name
for entry in config.get('body', []):
if not isinstance(entry, dict):
entry = ctx.config.get(entry, {})
- ((taskname, confg),) = entry.iteritems()
+ ((taskname, confg),) = entry.items()
log.info('In sequential, running task %s...' % taskname)
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
if hasattr(mgr, '__enter__'):
# support the usual list syntax for tasks
if isinstance(entry, list):
entry = dict(sequential=entry)
- ((taskname, confg),) = entry.iteritems()
+ ((taskname, confg),) = entry.items()
p.spawn(_run_spawned, ctx, confg, taskname)
"""Call run for each remote host, but use 'wait=False' to have it return immediately."""
proc = remote.run(args=['sleep', '5', run.Raw(';'), 'date', run.Raw(';'), 'hostname'], wait=False,)
nodes[remote.name] = proc
- for name, proc in nodes.iteritems():
+ for name, proc in nodes.items():
"""Wait for each process to finish before yielding and allowing other contextmanagers to run."""
proc.wait()
yield
(remote,) = ctx.cluster.only('client.{r}'.format(r=role)).remotes.iterkeys()
yield (remote, ls)
del config['clients']
- for role, ls in config.iteritems():
+ for role, ls in config.items():
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
yield (remote, ls)
else:
- for role, ls in config.iteritems():
+ for role, ls in config.items():
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
yield (remote, ls)
"""
super(SELinux, self).filter_hosts()
new_cluster = Cluster()
- for (remote, roles) in self.cluster.remotes.iteritems():
+ for (remote, roles) in self.cluster.remotes.items():
if remote.is_vm:
msg = "Excluding {host}: VMs are not yet supported"
log.info(msg.format(host=remote.shortname))
for entry in config:
if not isinstance(entry, dict):
entry = ctx.config.get(entry, {})
- ((taskname, confg),) = entry.iteritems()
+ ((taskname, confg),) = entry.items()
log.info('In sequential, running task %s...' % taskname)
mgr = run_tasks.run_one_task(taskname, ctx=ctx, config=confg)
if hasattr(mgr, '__enter__'):
'foo.bar baz': 'foo.bar_baz',
'foo.*.bar baz': 'foo._all_.bar_baz',
}
- for in_, out in sanitized_metrics.iteritems():
+ for in_, out in sanitized_metrics.items():
assert self.klass._sanitize_metric_name(in_) == out
def test_get_target_globs(self):