teuthology.replace_all_with_clients(ctx.cluster, config)
with parallel() as ptask:
- for client, tests in config.iteritems():
+ for client, tests in config.items():
ptask.spawn(_run_tests, ctx, client, tests)
],
)
- for command, config in tests.iteritems():
+ for command, config in tests.items():
if config is None:
config = {}
teuthology.deep_merge(config, overrides)
)
with parallel() as p:
- for role, tests in config.iteritems():
+ for role, tests in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_run_tests, testdir, remote, role, tests)
osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster']))
log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
- for remote, roles_for_host in osds.remotes.iteritems():
+ for remote, roles_for_host in osds.remotes.items():
log.info('Creating %s on %s' % (log_dir, remote.name))
remote.run(
args=['mkdir', '-p', '-m0755', '--', log_dir],
log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
osds = ctx.cluster.only(teuthology.is_type('osd'))
- for remote, roles_for_host in osds.remotes.iteritems():
+ for remote, roles_for_host in osds.remotes.items():
roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd',
config['cluster']):
clients = ctx.cluster.only(teuthology.is_type('client', cluster_name))
testdir = teuthology.get_testdir(ctx)
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for remote, roles_for_host in clients.remotes.iteritems():
+ for remote, roles_for_host in clients.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client',
cluster_name):
name = teuthology.ceph_role(role)
# Prepare a modified version of cluster.remotes with ceph-deploy-ized names
modified_remotes = {}
ceph_deploy_mapped = dict()
- for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for _remote, roles_for_host in ctx.cluster.remotes.items():
modified_remotes[_remote] = []
for svc_id in roles_for_host:
if svc_id.startswith("{0}.".format(target_role)):
def get_dev_for_osd(ctx, config):
"""Get a list of all osd device names."""
osd_devs = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for remote, roles_for_host in ctx.cluster.remotes.items():
host = remote.name.split('@')[-1]
shortname = host.split('.')[0]
devs = teuthology.get_scratch_devices(remote)
def get_all_nodes(ctx, config):
"""Return a string of node names separated by blanks"""
nodelist = []
- for t, k in ctx.config['targets'].iteritems():
+ for t, k in ctx.config['targets'].items():
host = t.split('@')[-1]
simple_host = host.split('.')[0]
nodelist.append(simple_host)
ceph_branch = None
if config.get('branch') is not None:
cbranch = config.get('branch')
- for var, val in cbranch.iteritems():
+ for var, val in cbranch.items():
ceph_branch = '--{var}={val}'.format(var=var, val=val)
all_nodes = get_all_nodes(ctx, config)
mds_nodes = get_nodes_using_role(ctx, 'mds')
if config.get('conf') is not None:
confp = config.get('conf')
- for section, keys in confp.iteritems():
+ for section, keys in confp.items():
lines = '[{section}]\n'.format(section=section)
teuthology.append_lines_to_file(ceph_admin, conf_path, lines,
sudo=True)
- for key, value in keys.iteritems():
+ for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
lines = '{key} = {value}\n'.format(key=key, value=value)
teuthology.append_lines_to_file(
)
clients = ctx.cluster.only(teuthology.is_type('client'))
- for remot, roles_for_host in clients.remotes.iteritems():
+ for remot, roles_for_host in clients.remotes.items():
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
client_keyring = \
'/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
path = os.path.join(ctx.archive, 'data')
os.makedirs(path)
mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote, roles in mons.remotes.iteritems():
+ for remote, roles in mons.remotes.items():
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(
branch = ctx.config.get('branch')
test_branch = ' --dev={branch} '.format(branch=branch)
mons = ctx.cluster.only(teuthology.is_type('mon'))
- for node, role in mons.remotes.iteritems():
+ for node, role in mons.remotes.items():
admin = node
admin.run(args=['mkdir', conf_dir], check_status=False)
nodename = admin.shortname
log.info('system type is %s', system_type)
osds = ctx.cluster.only(teuthology.is_type('osd'))
- for remote, roles in osds.remotes.iteritems():
+ for remote, roles in osds.remotes.items():
devs = teuthology.get_scratch_devices(remote)
log.info("roles %s", roles)
if (len(devs) < 3):
execute_cdeploy(admin, new_cmd, path)
if config.get('conf') is not None:
confp = config.get('conf')
- for section, keys in confp.iteritems():
+ for section, keys in confp.items():
lines = '[{section}]\n'.format(section=section)
teuthology.append_lines_to_file(admin, conf_path, lines,
sudo=True)
- for key, value in keys.iteritems():
+ for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
lines = '{key} = {value}\n'.format(key=key, value=value)
teuthology.append_lines_to_file(admin, conf_path, lines,
if mapped_role.get(role):
role = mapped_role.get(role)
remotes_and_roles = ctx.cluster.only(role).remotes
- for remote, roles in remotes_and_roles.iteritems():
+ for remote, roles in remotes_and_roles.items():
nodename = remote.shortname
cmd = cmd + ' ' + nodename
log.info("Upgrading ceph on %s", nodename)
# write the correct mgr key to disk
if config.get('setup-mgr-node', None):
mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote, roles in mons.remotes.iteritems():
+ for remote, roles in mons.remotes.items():
remote.run(
args=[
run.Raw('sudo ceph auth get client.bootstrap-mgr'),
return
if no_wait is None:
no_wait = []
- for osd, need in seq.iteritems():
+ for osd, need in seq.items():
if osd in no_wait:
continue
got = 0
:param osdnum: osd number
:param argdict: dictionary containing values to set.
"""
- for k, v in argdict.iteritems():
+ for k, v in argdict.items():
self.wait_run_admin_socket(
'osd', osdnum,
['config', 'set', str(k), str(v)])
if osdid not in pgs:
continue
- for pg, JSON in db[basename]["pg2json"].iteritems():
+ for pg, JSON in db[basename]["pg2json"].items():
if pg in pgs[osdid]:
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
if osdid not in pgs:
continue
- for pg, JSON in db[basename]["pg2json"].iteritems():
+ for pg, JSON in db[basename]["pg2json"].items():
if pg in pgs[osdid]:
cmd = ((prefix + "--pgid {pg}").
format(id=osdid, pg=pg).split())
def seen_health_warning():
health = self.ceph_cluster.mon_manager.get_mon_health()
codes = [s for s in health['checks']]
- summary_strings = [s[1]['summary']['message'] for s in health['checks'].iteritems()]
+ summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()]
if len(summary_strings) == 0:
log.debug("Not expected number of summary strings ({0})".format(summary_strings))
return False
"--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0)
# Revert to old inotable.
- for key, value in inotable_copy.iteritems():
+ for key, value in inotable_copy.items():
self.fs.put_metadata_object_raw(key, value)
self.mds_cluster.mds_restart()
def _get_connection_count(self, status=None):
perf = self.fs.rank_asok(["perf", "dump"], status=status)
conn = 0
- for module, dump in perf.iteritems():
+ for module, dump in perf.items():
if "AsyncMessenger::Worker" in module:
conn += dump['msgr_active_connections']
return conn
def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0):
if cache_dump is None:
cache_dump = self._get_snapclient_dump(rank=rank)
- for key, value in cache_dump.iteritems():
+ for key, value in cache_dump.items():
if value != snaps_dump[key]:
return False
return True;
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
- for client, tests in clients.iteritems():
+ for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
- for client, tests in clients.iteritems():
+ for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# add address entries for each cname
dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n"
address_template = "address=/{cname}/{ip_address}\n"
- for cname, ip_address in cnames.iteritems():
+ for cname, ip_address in cnames.items():
dnsmasq += address_template.format(cname=cname, ip_address=ip_address)
# write to temporary dnsmasq file
# multiple roles may map to the same remote, so collect names by remote
remote_names = {}
- for role, cnames in config.iteritems():
+ for role, cnames in config.items():
remote = get_remote_for_role(ctx, role)
if remote is None:
raise ConfigError('no remote for role %s' % role)
names[cname] = remote.ip_address
elif isinstance(cnames, dict):
# when given a dict, look up the remote ip for each
- for cname, client in cnames.iteritems():
+ for cname, client in cnames.items():
r = get_remote_for_role(ctx, client)
if r is None:
raise ConfigError('no remote for role %s' % client)
# run subtasks for each unique remote
subtasks = []
- for remote, cnames in remote_names.iteritems():
+ for remote, cnames in remote_names.items():
subtasks.extend([ lambda r=remote: install_dnsmasq(r) ])
subtasks.extend([ lambda r=remote: backup_resolv(r, resolv_bak) ])
subtasks.extend([ lambda r=remote: replace_resolv(r, resolv_tmp) ])
roles = teuthology.all_roles(ctx.cluster)
config = dict((id_, a) for id_ in roles)
- for role, ls in config.iteritems():
+ for role, ls in config.items():
(remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
"""
port = initial_port
role_endpoints = {}
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for remote, roles_for_host in ctx.cluster.remotes.items():
for role in roles_for_host:
if role in config:
role_endpoints[role] = (remote.name.split('@')[1], port)
DEFAULT_MEM = 4096 # in megabytes
def create_images(ctx, config, managers):
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if not isinstance(disks, list):
disks = [{} for n in range(int(disks))]
)
def create_clones(ctx, config, managers):
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
clone = client_config.get('clone', False)
if clone:
num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
Handle directory creation and cleanup
"""
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
try:
yield
finally:
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
assert 'test' in client_config, 'You must specify a test to run'
test = client_config['test']
"""Downland base image, remove image file when done"""
log.info('downloading base image')
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
image_url = client_config.get('image_url', DEFAULT_IMAGE_URL)
"""Setup kvm environment and start qemu"""
procs = []
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
remote.run(
weights['append'] = weights['append'] / 2
weights['append_excl'] = weights['append']
- for op, weight in weights.iteritems():
+ for op, weight in weights.items():
args.extend([
'--op', op, str(weight)
])
"""
assert isinstance(config, dict)
- for client, properties in config['config'].iteritems():
+ for client, properties in config['config'].items():
run_stages[client] = string.split(properties.get('stages', 'prepare,check'), ',')
log.info('Creating rgw users...')
ragweed_conf = config['ragweed_conf'][client]
ragweed_conf.setdefault('fixtures', {})
ragweed_conf['rgw'].setdefault('bucket_prefix', 'test-' + client)
- for section, user in users.iteritems():
+ for section, user in users.items():
_config_user(ragweed_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=ragweed_conf[section]['user_id'], host=client))
if user == 'sysuser':
assert isinstance(config, dict)
log.info('Configuring ragweed...')
testdir = teuthology.get_testdir(ctx)
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
finally:
log.info('Cleaning up boto...')
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
attrs = ["!fails_on_rgw"]
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
stages = string.join(run_stages[client], ',')
args = [
'RAGWEED_CONF={tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client),
norm_config = teuthology.replace_all_with_clients(ctx.cluster, config)
if isinstance(norm_config, dict):
role_images = {}
- for role, properties in norm_config.iteritems():
+ for role, properties in norm_config.items():
if properties is None:
properties = {}
role_images[role] = properties.get('image_name')
client_config = config['all']
clients = ctx.cluster.only(teuthology.is_type('client'))
rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test"
- for remote,role in clients.remotes.iteritems():
+ for remote,role in clients.remotes.items():
if 'client_config' in locals():
with parallel() as p:
p.spawn(run_fio, remote, client_config, rbd_test_dir)
def _nuke_mons(manager, mons, mon_id):
assert mons
is_mon = teuthology.is_type('mon')
- for remote, roles in mons.remotes.iteritems():
+ for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
assert osds
- for osd, roles in osds.remotes.iteritems():
+ for osd, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
# the initial monmap is in the ceph.conf, so we are good.
n_mons = 0
is_mon = teuthology.is_type('mon')
- for remote, roles in mons.remotes.iteritems():
+ for remote, roles in mons.remotes.items():
for role in roles:
if not is_mon(role):
continue
def _revive_mgrs(ctx, manager):
is_mgr = teuthology.is_type('mgr')
mgrs = ctx.cluster.only(is_mgr)
- for _, roles in mgrs.remotes.iteritems():
+ for _, roles in mgrs.remotes.items():
for role in roles:
if not is_mgr(role):
continue
def _revive_osds(ctx, manager):
is_osd = teuthology.is_type('osd')
osds = ctx.cluster.only(is_osd)
- for _, roles in osds.remotes.iteritems():
+ for _, roles in osds.remotes.items():
for role in roles:
if not is_osd(role):
continue
try:
assert 'exec' in config, "config requires exec key with <role>: <command> entries"
- for role, task in config['exec'].iteritems():
+ for role, task in config['exec'].items():
log.info('restart for role {r}'.format(r=role))
(remote,) = ctx.cluster.only(role).remotes.keys()
srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
]
env = config.get('env')
if env is not None:
- for var, val in env.iteritems():
+ for var, val in env.items():
quoted_val = pipes.quote(val)
env_arg = '{var}={val}'.format(var=var, val=quoted_val)
args.append(run.Raw(env_arg))
def assign_endpoints(ctx, config, default_cert):
role_endpoints = {}
- for role, client_config in config.iteritems():
+ for role, client_config in config.items():
client_config = client_config or {}
remote = get_remote_for_role(ctx, role)
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
client_config['extra_args'] = [
's3tests.functional.test_s3:test_bucket_list_return_data',
]
netcat_out = BytesIO()
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
ctx.cluster.only(client).run(
args = [
'netcat',
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
- for (client, cconf) in config.iteritems():
+ for (client, cconf) in config.items():
teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
log.debug('config is %s', config)
""" create cluster and gateway instances for all of the radosgw roles """
clusters = {}
gateways = {}
- for role, endpoint in role_endpoints.iteritems():
+ for role, endpoint in role_endpoints.items():
cluster_name, daemon_type, client_id = misc.split_role(role)
# find or create the cluster by name
cluster = clusters.get(cluster_name)
rwconf['files'].setdefault('num', 10)
rwconf['files'].setdefault('size', 2000)
rwconf['files'].setdefault('stddev', 500)
- for section, user in users.iteritems():
+ for section, user in users.items():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
client=client))
yield
finally:
for client in config['clients']:
- for section, user in users.iteritems():
+ for section, user in users.items():
#uid = '{user}.{client}'.format(user=user, client=client)
real_uid, delete_this_user = cached_client_user_names[client][section+user]
if delete_this_user:
"""
assert isinstance(config, dict)
log.info('Configuring s3-readwrite-tests...')
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
assert isinstance(config, dict)
log.info('Configuring s3-roundtrip-tests...')
testdir = teuthology.get_testdir(ctx)
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('fixtures', {})
s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-')
- for section, user in users.iteritems():
+ for section, user in users.items():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client))
cluster_name, daemon_type, client_id = teuthology.split_role(client)
assert isinstance(config, dict)
log.info('Configuring s3-tests...')
testdir = teuthology.get_testdir(ctx)
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
finally:
log.info('Cleaning up boto...')
- for client, properties in config['clients'].iteritems():
+ for client, properties in config['clients'].items():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
testdir = teuthology.get_testdir(ctx)
# civetweb > 1.8 && beast parsers are strict on rfc2616
attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616"]
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
args = [
'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
log.debug('Scanning radosgw logs for leaked encryption keys...')
procs = list()
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
if not client_config.get('scan_for_encryption_keys', True):
continue
cluster_name, daemon_type, client_id = teuthology.split_role(client)
log.info('Creating rgw users...')
testdir = teuthology.get_testdir(ctx)
users = {'': 'foo', '2': 'bar'}
- for client, testswift_conf in config.iteritems():
+ for client, testswift_conf in config.items():
cluster_name, daemon_type, client_id = teuthology.split_role(client)
- for suffix, user in users.iteritems():
+ for suffix, user in users.items():
_config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix)
ctx.cluster.only(client).run(
args=[
"""
assert isinstance(config, dict)
testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
args = [
'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client),
'{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir),
testswift_conf = {}
clients = []
- for client, client_config in config.iteritems():
+ for client, client_config in config.items():
# http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+
(remote,) = ctx.cluster.only(client).remotes.keys()
if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'):
Test ceph systemd services can start, stop and restart and
check for any failed services and report back errors
"""
- for remote, roles in ctx.cluster.remotes.iteritems():
+ for remote, roles in ctx.cluster.remotes.items():
remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'),
'grep', 'ceph'])
units = remote.sh('sudo systemctl list-units | grep ceph',
"""
remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
tgtd_list = []
- for rem, roles in remotes.iteritems():
+ for rem, roles in remotes.items():
for _id in roles:
if _id in start_tgtd:
if not rem in tgtd_list:
return [
'osd', 'erasure-code-profile', 'set',
profile_name
- ] + [ str(key) + '=' + str(value) for key, value in profile.iteritems() ]
+ ] + [ str(key) + '=' + str(value) for key, value in profile.items() ]
overrides.pop(i, None)
misc.deep_merge(config, overrides)
- for spec, cls in refspecs.iteritems():
+ for spec, cls in refspecs.items():
refspec = config.get(spec)
if refspec:
refspec = cls(refspec)
log.info("timeout={}".format(timeout))
log.info("cleanup={}".format(cleanup))
with parallel() as p:
- for role, tests in clients.iteritems():
+ for role, tests in clients.items():
if role != "all":
p.spawn(_run_tests, ctx, refspec, role, tests,
config.get('env'),
run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)),
]
if env is not None:
- for var, val in env.iteritems():
+ for var, val in env.items():
quoted_val = pipes.quote(val)
env_arg = '{var}={val}'.format(var=var, val=quoted_val)
args.append(run.Raw(env_arg))
]
}
- for (module,cmd_lst) in cmds.iteritems():
+ for (module,cmd_lst) in cmds.items():
k = keyring_base + '.' + module
for cmd in cmd_lst:
print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd))
# gen keyring
- for (good_or_bad,kind_map) in perms.iteritems():
- for (kind,lst) in kind_map.iteritems():
+ for (good_or_bad,kind_map) in perms.items():
+ for (kind,lst) in kind_map.items():
for (perm, cap) in lst:
cap_formatted = cap.format(
s=module,
# test
for good_bad in perms.keys():
- for (kind,lst) in perms[good_bad].iteritems():
+ for (kind,lst) in perms[good_bad].items():
for (perm,_) in lst:
cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm)
self.etag = '"' + self.etag + '"'
new_meta = {}
- for meta_key, meta_val in k.metadata.iteritems():
+ for meta_key, meta_val in k.metadata.items():
if not meta_key.startswith('rgwx-'):
new_meta[meta_key] = meta_val