rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
with file(rotate_conf_path, 'rb') as f:
conf = ""
- for daemon, size in daemons.iteritems():
+ for daemon, size in daemons.items():
log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
skconf = t.read().format(testdir=teuthology.get_testdir(ctx))
conf = configobj.ConfigObj(StringIO(skconf), file_error=True)
mon_hosts = []
- for role, addr in mons.iteritems():
+ for role, addr in mons.items():
mon_cluster, _, _ = teuthology.split_role(role)
if mon_cluster != cluster:
continue
:return the FSID (as a string) of the newly created monmap
"""
- addresses = list(mons.iteritems())
+ addresses = list(mons.items())
assert addresses, "There are no monitors in config!"
log.debug('Ceph mon addresses: %s', addresses)
devs_to_clean = {}
remote_to_roles_to_devs = {}
osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name))
- for remote, roles_for_host in osds.remotes.iteritems():
+ for remote, roles_for_host in osds.remotes.items():
devs = teuthology.get_scratch_devices(remote)
roles_to_devs = assign_devs(
teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), devs
conf = skeleton_config(
ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name,
)
- for section, keys in config['conf'].iteritems():
- for key, value in keys.iteritems():
+ for section, keys in config['conf'].items():
+ for key, value in keys.items():
log.info("[%s] %s = %s" % (section, key, value))
if section not in conf:
conf[section] = {}
if not config.get('skip_mgr_daemons', False):
log.info('Setting up mgr nodes...')
mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name))
- for remote, roles_for_host in mgrs.remotes.iteritems():
+ for remote, roles_for_host in mgrs.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr',
cluster_name):
_, _, id_ = teuthology.split_role(role)
log.info('Setting up mds nodes...')
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
- for remote, roles_for_host in mdss.remotes.iteritems():
+ for remote, roles_for_host in mdss.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds',
cluster_name):
_, _, id_ = teuthology.split_role(role)
teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs)
log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev)))
- for remote, roles_for_host in osds.remotes.iteritems():
+ for remote, roles_for_host in osds.remotes.items():
roles_to_devs = remote_to_roles_to_devs[remote]
for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name):
log.info('Reading keys from all nodes...')
keys_fp = StringIO()
keys = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for remote, roles_for_host in ctx.cluster.remotes.items():
for type_ in ['mgr', 'mds', 'osd']:
if type_ == 'mgr' and config.get('skip_mgr_daemons', False):
continue
)
keys.append((type_, id_, data))
keys_fp.write(data)
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for remote, roles_for_host in ctx.cluster.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name):
_, _, id_ = teuthology.split_role(role)
data = teuthology.get_file(
)
log.info('Running mkfs on mon nodes...')
- for remote, roles_for_host in mons.remotes.iteritems():
+ for remote, roles_for_host in mons.remotes.items():
for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name):
_, _, id_ = teuthology.split_role(role)
mnt_point = DATA_PATH.format(
)
break
- for remote, dirs in devs_to_clean.iteritems():
+ for remote, dirs in devs_to_clean.items():
for dir_ in dirs:
log.info('Unmounting %s on %s' % (dir_, remote))
try:
pass
else:
raise
- for remote, roles in mons.remotes.iteritems():
+ for remote, roles in mons.remotes.items():
for role in roles:
is_mon = teuthology.is_type('mon', cluster_name)
if is_mon(role):
# create osds in order. (this only matters for pre-luminous, which might
# be hammer, which doesn't take an id_ argument to legacy 'osd create').
osd_uuids = {}
- for remote, roles_for_host in daemons.remotes.iteritems():
+ for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):
]
)
- for remote, roles_for_host in daemons.remotes.iteritems():
+ for remote, roles_for_host in daemons.remotes.items():
is_type_ = teuthology.is_type(type_, cluster_name)
for role in roles_for_host:
if not is_type_(role):