"""
testdir = teuthology.get_testdir(ctx)
log.debug('Running admin socket tests on %s', client)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client)
overrides = ctx.config.get('overrides', {}).get('admin_socket', {})
log.info('Setting up autotest...')
testdir = teuthology.get_testdir(ctx)
with parallel() as p:
- for role in config.iterkeys():
+ for role in config.keys():
(remote,) = ctx.cluster.only(role).remotes.keys()
p.spawn(_download, testdir, remote)
log.info('Making a separate scratch dir for every client...')
- for role in config.iterkeys():
+ for role in config.keys():
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
remote.run(
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
teuthology.write_file(remote=remote,
path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
data=StringIO(conf)
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
- os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ pass
+ for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
finally:
lookup_procs = list()
log.info('Checking for errors in any valgrind logs...')
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
# look at valgrind logs for each node
proc = remote.run(
args=[
def crush_setup(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
- (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
profile = config.get('crush_tunables', 'default')
log.info('Setting crush tunables to %s', profile)
def create_rbd_pool(ctx, config):
cluster_name = config['cluster']
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
- (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
log.info('Waiting for OSDs to come up')
teuthology.wait_until_osds_up(
ctx,
coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
- (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name))
# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
path=monmap_path,
)
- for rem in ctx.cluster.remotes.iterkeys():
+ for rem in ctx.cluster.remotes.keys():
# copy mon key and initial monmap
log.info('Sending monmap to node {remote}'.format(remote=rem))
teuthology.sudo_write_file(
with contextutil.nested(*subtasks):
first_mon = teuthology.get_first_mon(ctx, config, config['cluster'])
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
if not hasattr(ctx, 'managers'):
ctx.managers = {}
ctx.managers[config['cluster']] = CephManager(
obtained from `python_version`, if specified.
"""
# use mon.a for ceph_admin
- (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys()
+ (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys()
try:
py_ver = str(config['python_version'])
# puts it. Remember this here, because subsequently IDs will change from those in
# the test config to those that ceph-deploy invents.
- (ceph_admin,) = ctx.cluster.only('mon.a').remotes.iterkeys()
+ (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys()
def execute_ceph_deploy(cmd):
"""Remotely execute a ceph_deploy command"""
def ceph_volume_osd_create(ctx, config):
osds = ctx.cluster.only(teuthology.is_type('osd'))
no_of_osds = 0
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
# all devs should be lvm
osd_create_cmd = './ceph-deploy osd create --debug ' + remote.shortname + ' '
# default is bluestore so we just need config item for filestore
# create-keys is explicit now
# http://tracker.ceph.com/issues/16036
mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote in mons.remotes.iterkeys():
- remote.run(args=['sudo', 'ceph-create-keys', '--cluster', 'ceph',
- '--id', remote.shortname])
+ for remote in mons.remotes.keys():
+ execute_ceph_deploy('./ceph-deploy admin ' + remote.shortname)
estatus_gather = execute_ceph_deploy(gather_keys)
if estatus_gather != 0:
log.info('Archiving logs...')
path = os.path.join(ctx.archive, 'remote')
os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
+ for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
os.makedirs(sub)
teuthology.pull_directory(remote, '/var/log/ceph',
ceph_branch = '--dev={branch}'.format(branch=dev_branch)
# get the node used for initial deployment which is mon.a
mon_a = mapped_role.get('mon.a')
- (ceph_admin,) = ctx.cluster.only(mon_a).remotes.iterkeys()
+ (ceph_admin,) = ctx.cluster.only(mon_a).remotes.keys()
testdir = teuthology.get_testdir(ctx)
cmd = './ceph-deploy install ' + ceph_branch
for role in roles:
prefix = ("sudo ceph-objectstore-tool "
"--data-path {fpath} "
"--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
log.debug(remote)
log.debug(osds.remotes[remote])
for role in osds.remotes[remote]:
GETNAME = os.path.join(DATADIR, "get")
SETNAME = os.path.join(DATADIR, "set")
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
GETNAME = os.path.join(DATADIR, "get")
SETNAME = os.path.join(DATADIR, "set")
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
log.error(values)
log.info("Test pg info")
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
ERRORS += 1
log.info("Test pg logging")
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
log.info("Test pg export")
EXP_ERRORS = 0
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
log.info("Test pg removal")
RM_ERRORS = 0
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
if EXP_ERRORS == 0 and RM_ERRORS == 0:
log.info("Test pg import")
- for remote in osds.remotes.iterkeys():
+ for remote in osds.remotes.keys():
for role in osds.remotes[remote]:
if string.find(role, "osd.") != 0:
continue
@property
def admin_remote(self):
first_mon = misc.get_first_mon(self._ctx, None)
- (result,) = self._ctx.cluster.only(first_mon).remotes.iterkeys()
+ (result,) = self._ctx.cluster.only(first_mon).remotes.keys()
return result
def __init__(self, ctx):
try:
for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
)
with parallel() as p:
- for role in clients.iterkeys():
+ for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.iteritems():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
config = {}
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
while True:
for i in range(num_osds):
- (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
+ (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.keys()
p = osd_remote.run(
args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
wait=True,
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
# Export a pg
(exp_remote,) = ctx.\
- cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+ cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
FSPATH = manager.get_filepath()
JPATH = os.path.join(FSPATH, "journal")
prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
timeout = 60
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
config = dict((id_, a) for id_ in roles)
for role, ls in config.iteritems():
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
log.info('Running commands on role %s host %s', role, remote.name)
for c in ls:
c.replace('$TESTDIR', testdir)
# just use the first client...
client = clients[0];
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
testdir = teuthology.get_testdir(ctx)
'rpm': [ 'libffi-devel', 'openssl-devel' ],
}
for (client, _) in config.items():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
for dep in deps[remote.os.package_type]:
install_package(dep, remote)
try:
log.info('Removing packaged dependencies of Keystone...')
for (client, _) in config.items():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
for dep in deps[remote.os.package_type]:
remove_package(dep, remote)
log.info('Configuring keystone...')
for (client, _) in config.items():
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, _, client_id = teuthology.split_role(client)
# start the public endpoint
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles))
for role in client_roles:
log.info('role={role_}'.format(role_=role))
- (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.iterkeys()
+ (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.keys()
creator_remotes.append((creator_remote, 'client.{id}'.format(id=role)))
remaining_pools = poolnum
raise RuntimeError("This task requires exactly one MDS")
mds_id = mdslist[0]
- (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.iterkeys()
+ (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.keys()
manager = ceph_manager.CephManager(
mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'),
)
def kill_mds(self, mds):
if self.config.get('powercycle'):
(remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
- remotes.iterkeys())
+ remotes.keys())
self.log('kill_mds on mds.{m} doing powercycle of {s}'.
format(m=mds, s=remote.name))
self._assert_ipmi(remote)
"""
if self.config.get('powercycle'):
(remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
- remotes.iterkeys())
+ remotes.keys())
self.log('revive_mds on mds.{m} doing powercycle of {s}'.
format(m=mds, s=remote.name))
self._assert_ipmi(remote)
log.info('mds thrasher using random seed: {seed}'.format(seed=seed))
random.seed(seed)
- (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.iterkeys()
+ (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.keys()
manager = ceph_manager.CephManager(
first, ctx=ctx, logger=log.getChild('ceph_manager'),
)
log.info('Beginning mon_clock_skew_check...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
replacer the id of the new mon (use "${victim}_prime" if not specified)
"""
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = CephManager(mon, ctx=ctx, logger=log.getChild('ceph_manager'))
if config is None:
'mon_thrash task requires at least 3 monitors'
log.info('Beginning mon_thrash...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=[
"/bin/sh", "-c",
assert isinstance(config, dict), \
'thrashosds task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
log.info('1. Verify warning messages when exceeding nearfull_ratio')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
proc = mon.run(
args=[
'task only accepts a dict for configuration'
testdir = teuthology.get_testdir(ctx)
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
assert isinstance(config, dict), \
'peer task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
write_threads = config.get("write_threads", 10)
write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
for poolid in range(num_pools):
poolname = "%s-%s" % (pool_prefix, str(poolid))
try:
yield
finally:
- for client in config.iterkeys():
+ for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
yield
finally:
log.debug('cleaning up base image files')
- for client in config.iterkeys():
+ for client in config.keys():
base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
tdir=testdir,
client=client,
time.sleep(time_wait)
log.debug('checking that qemu tests succeeded...')
- for client in config.iterkeys():
+ for client in config.keys():
(remote,) = ctx.cluster.only(client).remotes.keys()
# ensure we have permissions to all the logs
manager.raw_cluster_cmd(
'osd', 'pool', 'set', pool, 'min_size', str(min_size))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args +
["--pool", pool],
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
if config.get('ec_pool', False):
profile = config.get('erasure_code_profile', {})
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
proc = remote.run(
args=[
def wait_until_healthy(ctx, config):
first_mon = teuthology.get_first_mon(ctx, config)
- (mon_remote,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
teuthology.wait_until_healthy(ctx, mon_remote)
'--format', 'json',
]
pre.extend(cmd)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
proc = remote.run(
args=pre,
check_status=False,
logging.error(err)
assert not err
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
remote_host = remote.name.split('@')[1]
admin_conn = boto.s3.connection.S3Connection(
aws_access_key_id=admin_access_key,
# TESTCASE 'rm-user3','user','info','deleted user','fails'
(ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
assert ret == 404
-
ragweed_conf = config['ragweed_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
- for client in config.iterkeys():
+ for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('ragweed', {}))
krbd = config.get('krbd', False)
nbd = config.get('nbd', False)
testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
args = []
if krbd or nbd:
'task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ # stash a monmap for later
+ mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap'])
+
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
# write 100 objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
# Export a pg
(exp_remote,) = ctx.\
- cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+ cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
FSPATH = manager.get_filepath()
JPATH = os.path.join(FSPATH, "journal")
prefix = ("sudo adjust-ulimits ceph-objectstore-tool "
assert exit_status is 0
(remote,) = ctx.\
- cluster.only('osd.{o}'.format(o=divergent)).remotes.iterkeys()
+ cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
cmd = 'rm {file}'.format(file=expfile)
remote.run(args=cmd, wait=True)
log.info("success")
assert isinstance(config, dict), \
'lost_unfound task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
log.info("starting repair test type 2")
victim_osd = chooser(manager, pool, 0)
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
# create object
log.info("doing put and setomapval")
log.info('writing initial objects')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
#create few objects
for i in range(100):
rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
assert 'exec' in config, "config requires exec key with <role>: <command> entries"
for role, task in config['exec'].iteritems():
log.info('restart for role {r}'.format(r=role))
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
srcdir, restarts = get_tests(ctx, config, role, remote, testdir)
log.info('Running command on role %s host %s', role, remote.name)
spec = '{spec}'.format(spec=task[0])
log.info('Starting rgw...')
testdir = teuthology.get_testdir(ctx)
for client in clients:
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
cluster_name, daemon_type, client_id = teuthology.split_role(client)
client_with_id = daemon_type + '.' + client_id
client_with_cluster = cluster_name + '.' + client_with_id
endpoint = ctx.rgw.role_endpoints[client]
url = endpoint.url()
log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url))
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
wait_for_radosgw(url, remote)
try:
log.info('Creating data pools')
for client in clients:
log.debug("Obtaining remote for client {}".format(client))
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
data_pool = 'default.rgw.buckets.data'
cluster_name, daemon_type, client_id = teuthology.split_role(client)
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
- for client in config.iterkeys():
+ for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('s3readwrite', {}))
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
- for client in config.iterkeys():
+ for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('s3tests', {}))
PREFIX = 'samba.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
yield (id_, remote)
log.info('Beginning scrub...')
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
manager = ceph_manager.CephManager(
mon,
def find_victim_object(ctx, pg, osd):
"""Return a file to be fuzzed"""
- (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
+ (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys()
data_path = os.path.join(
'/var/lib/ceph/osd',
'ceph-{id}'.format(id=osd),
assert isinstance(config, dict), \
'scrub_test task only accepts a dict for configuration'
first_mon = teuthology.get_first_mon(ctx, config)
- (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ (mon,) = ctx.cluster.only(first_mon).remotes.keys()
num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
log.info('num_osds is %s' % num_osds)
try:
yield
finally:
- for client in config['clients']:
+ for client in config.keys():
for user in users.itervalues():
uid = '{user}.{client}'.format(user=user, client=client)
cluster_name, daemon_type, client_id = teuthology.split_role(client)
testswift_conf = config['testswift_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
- for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
+ for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
'grep', 'ceph'])
# wait for HEALTH_OK
mon = get_first_mon(ctx, config)
- (mon_remote,) = ctx.cluster.only(mon).remotes.iterkeys()
+ (mon_remote,) = ctx.cluster.only(mon).remotes.keys()
wait_until_healthy(ctx, mon_remote, use_sudo=True)
yield
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
- for client in config.iterkeys():
+ for client in config.keys():
if not config[client]:
config[client] = {}
teuthology.deep_merge(config[client], overrides.get('keystone', {}))
]
pre.extend(cmd)
log.log(log_level, 'rgwadmin: cmd=%s' % pre)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(client).remotes.keys()
proc = remote.run(
args=pre,
check_status=check_status,
overrides = copy.deepcopy(overrides.get('workunit', {}))
refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch,
'sha1': Refspec, 'tag': Refspec, 'branch': Branch}
- if any(map(lambda i: i in config, refspecs.iterkeys())):
- for i in refspecs.iterkeys():
+ if any(map(lambda i: i in config, refspecs.keys())):
+ for i in refspecs.keys():
overrides.pop(i, None)
misc.deep_merge(config, overrides)
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
manager = ctx.managers['ceph']
manager.raw_cluster_cmd('osd', 'set', 'noout')
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- (remote,) = ctx.cluster.only(role).remotes.iterkeys()
+ (remote,) = ctx.cluster.only(role).remotes.keys()
remotes.append(remote)
args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
# Create scratch dirs for any non-all workunits
log.info('Making a separate scratch dir for every client...')
- for role in clients.iterkeys():
+ for role in clients.keys():
assert isinstance(role, basestring)
if role == "all":
continue
print 'testing {m}/{c}'.format(m=module,c=cmd_cmd)
# test
- for good_bad in perms.iterkeys():
+ for good_bad in perms.keys():
for (kind,lst) in perms[good_bad].iteritems():
for (perm,_) in lst:
cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm)