in python2, dict.values() and dict.keys() return lists. but in python3,
they return views, which cannot be indexed directly using an integer index.
there are three use cases when we access these views in python3:
1. get the first element
2. get all the elements and then *might* want to access them by index
3. get the first element assuming there is only a single element in
the view
4. iterate thru the view
in the 1st case, we cannot assume the number of elements, so to be
python3 compatible, we should use `next(iter(a_dict))` instead.
in the 2nd case, in this change, the view is materialized using
`list(a_dict)`.
in the 3rd case, we can just continue using the short hand of
```py
(first_element,) = a_dict.keys()
```
to unpack the view. this works in both python2 and python3.
in the 4th case, the existing code works in both python2 and python3, as
both list and view can be iterated using `iter`, and `len` works as
well.
Signed-off-by: Kefu Chai <kchai@redhat.com>
(cherry picked from commit
d7258ea7fdcd81d4a88028a25b32ed5b278d0752)
Conflicts:
qa/tasks/barbican.py
qa/tasks/cbt.py
qa/tasks/vault.py: trivial resolutions
)
benchmark_config = self.config.get('benchmarks')
- benchmark_type = benchmark_config.keys()[0]
+ benchmark_type = next(iter(benchmark_config.keys()))
if benchmark_type == 'librbdfio':
testdir = misc.get_testdir(self.ctx)
benchmark_config['librbdfio']['cmd_path'] = os.path.join(testdir, 'fio/fio')
cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl']
self.first_mon.run(args=install_cmd + cbt_depends)
- benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
+ benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
self.log.info('benchmark: %s', benchmark_type)
if benchmark_type == 'librbdfio':
def setup(self):
super(CBT, self).setup()
- self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
+ self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()))
self.cbt_config = self.generate_cbt_config()
self.log.info('cbt configuration is %s', self.cbt_config)
self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
'{tdir}/cbt'.format(tdir=testdir),
]
)
- benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
+ benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
if benchmark_type == 'librbdfio':
self.first_mon.run(
args=[
self.pgid = self.manager.get_object_pg_with_shard(self.pool,
self.object_name,
self.osd)
- self.remote = self.manager.ctx.\
- cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()[0]
+ self.remote = next(iter(self.manager.ctx.\
+ cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()))
path = self.manager.get_filepath().format(id=self.osd)
self.paths = ("--data-path {path} --journal-path {path}/journal".
format(path=path))
REP_NAME, DATALINECOUNT)
allremote = []
allremote.append(cli_remote)
- allremote += osds.remotes.keys()
+ allremote += list(osds.remotes.keys())
allremote = list(set(allremote))
for remote in allremote:
cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
assert(len(self.data_pools) == 1)
- return self.data_pools.values()[0]
+ return next(iter(self.data_pools.values()))
def get_data_pool_id(self, refresh = False):
"""
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
assert(len(self.data_pools) == 1)
- return self.data_pools.keys()[0]
+ return next(iter(self.data_pools.keys()))
def get_data_pool_names(self, refresh = False):
if refresh or self.data_pools is None:
self.get_pool_names(refresh = True)
- return self.data_pools.values()
+ return list(self.data_pools.values())
def get_metadata_pool_name(self):
return self.metadata_pool_name
targets = self.config.get('counters', {})
if cluster_name is None:
- cluster_name = self.ctx.managers.keys()[0]
+ cluster_name = next(iter(self.ctx.managers.keys()))
for daemon_type, counters in targets.items():
# List of 'a', 'b', 'c'...
if not isinstance(config, dict):
raise TypeError("config must be a dict")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
+ an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
devstack_branch = config.get("branch", "master")
install_devstack(devstack_node, devstack_branch)
if not isinstance(config, dict):
raise TypeError("config must be a dict")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
# TODO: save the log *and* preserve failures
#devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
def smoke(ctx, config):
log.info("Running a basic smoketest...")
- devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
- an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
+ devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
+ an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
try:
create_volume(devstack_node, an_osd_node, 'smoke0', 1)
# restart dnsmasq
remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
# verify dns name is set
- remote.run(args=['ping', '-c', '4', cnames.keys()[0]])
+ remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))])
try:
yield
clients = config.keys()
# just use the first client...
- client = clients[0];
+ client = next(iter(clients))
(remote,) = ctx.cluster.only(client).remotes.keys()
testdir = teuthology.get_testdir(ctx)
self.assertIn(crash['crash_id'], retstr)
def test_rm(self):
- crashid = self.crashes.keys()[0]
+ crashid = next(iter(self.crashes.keys()))
self.assertEqual(
0,
self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
clients_from_config = config.keys()
# choose first client as default
- client = clients_from_config[0]
+ client = next(iter(clients_from_config))
# once the client is chosen, pull the host name and assigned port out of
# the role_endpoints that were assigned by the rgw task
clients = config.keys()
# just use the first client...
- client = clients[0]
+ client = next(iter(clients))
##
admin_user = 'ada'