Configure barbican paste-api and barbican-api.
"""
assert isinstance(config, dict)
- (cclient, cconfig) = config.items()[0]
+ (cclient, cconfig) = next(iter(config.items()))
keystone_role = cconfig.get('use-keystone-role', None)
if keystone_role is None:
Create a main and an alternate s3 user.
"""
assert isinstance(config, dict)
- (cclient, cconfig) = config.items()[0]
+ (cclient, cconfig) = next(iter(config.items()))
rgw_user = cconfig['rgw_user']
)
benchmark_config = self.config.get('benchmarks')
- benchmark_type = benchmark_config.keys()[0]
+ benchmark_type = next(iter(benchmark_config.keys()))
if benchmark_type in ['librbdfio', 'fio']:
testdir = misc.get_testdir(self.ctx)
benchmark_config[benchmark_type]['cmd_path'] = os.path.join(testdir, 'fio/fio')
cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl']
self.first_mon.run(args=install_cmd + cbt_depends)
- benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
+ benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
self.log.info('benchmark: %s', benchmark_type)
if benchmark_type in ['librbdfio', 'fio']:
def setup(self):
super(CBT, self).setup()
- self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
+ self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()))
self.cbt_config = self.generate_cbt_config()
self.log.info('cbt configuration is %s', self.cbt_config)
self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
'{tdir}/cbt'.format(tdir=testdir),
]
)
- benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
+ benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
if benchmark_type in ['librbdfio', 'fio']:
self.first_mon.run(
args=[
self.pgid = self.manager.get_object_pg_with_shard(self.pool,
self.object_name,
self.osd)
- self.remote = self.manager.ctx.\
- cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()[0]
+ self.remote = next(iter(self.manager.ctx.\
+ cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()))
path = self.manager.get_filepath().format(id=self.osd)
self.paths = ("--data-path {path} --journal-path {path}/journal".
format(path=path))
REP_NAME, DATALINECOUNT)
allremote = []
allremote.append(cli_remote)
- allremote += osds.remotes.keys()
+ allremote += list(osds.remotes.keys())
allremote = list(set(allremote))
for remote in allremote:
cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
assert(len(self.data_pools) == 1)
- return self.data_pools.values()[0]
+ return next(iter(self.data_pools.values()))
def get_data_pool_id(self, refresh = False):
"""
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
assert(len(self.data_pools) == 1)
- return self.data_pools.keys()[0]
+ return next(iter(self.data_pools.keys()))
def get_data_pool_names(self, refresh = False):
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
- return self.data_pools.values()
+ return list(self.data_pools.values())
def get_metadata_pool_name(self):
return self.metadata_pool_name
targets = self.config.get('counters', {})
if cluster_name is None:
- cluster_name = self.ctx.managers.keys()[0]
+ cluster_name = next(iter(self.ctx.managers.keys()))
for daemon_type, counters in targets.items():
# List of 'a', 'b', 'c'...
if not isinstance(config, dict):
raise TypeError("config must be a dict")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
+ an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
devstack_branch = config.get("branch", "master")
install_devstack(devstack_node, devstack_branch)
if not isinstance(config, dict):
raise TypeError("config must be a dict")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
# TODO: save the log *and* preserve failures
#devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
def smoke(ctx, config):
log.info("Running a basic smoketest...")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
+ an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
try:
create_volume(devstack_node, an_osd_node, 'smoke0', 1)
# restart dnsmasq
remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
# verify dns name is set
- remote.run(args=['ping', '-c', '4', cnames.keys()[0]])
+ remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))])
try:
yield
clients = config.keys()
# just use the first client...
- client = clients[0];
+ client = next(iter(clients))
(remote,) = ctx.cluster.only(client).remotes.keys()
testdir = teuthology.get_testdir(ctx)
self.assertIn(crash['crash_id'], retstr)
def test_rm(self):
- crashid = self.crashes.keys()[0]
+ crashid = next(iter(self.crashes.keys()))
self.assertEqual(
0,
self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
clients_from_config = config.keys()
# choose first client as default
- client = clients_from_config[0]
+ client = next(iter(clients_from_config))
# once the client is chosen, pull the host name and assigned port out of
# the role_endpoints that were assigned by the rgw task
clients = config.keys()
# just use the first client...
- client = clients[0]
+ client = next(iter(clients))
##
admin_user = 'ada'
"""
Mount Transit or KV version 2 secrets engine
"""
- (cclient, cconfig) = config.items()[0]
+ (cclient, cconfig) = next(iter(config.items()))
engine = cconfig.get('engine')
if engine == 'kv':
@contextlib.contextmanager
def create_secrets(ctx, config):
- (cclient, cconfig) = config.items()[0]
+ (cclient, cconfig) = next(iter(config.items()))
engine = cconfig.get('engine')
prefix = cconfig.get('prefix')
secrets = cconfig.get('secrets')