From 723f63edb1cf45eba653c2f6836ed0ceb4dfb6f4 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Tue, 31 Mar 2020 10:16:40 +0800 Subject: [PATCH] qa/tasks: use next(iter(..)) for accessing first element in a view in python2, dict.values() and dict.keys() return lists. but in python3, they return views, which cannot be indexed directly using an integer index. there are three use cases when we access these views in python3: 1. get the first element 2. get all the elements and then *might* want to access them by index 3. get the first element assuming there is only a single element in the view 4. iterate thru the view in the 1st case, we cannot assume the number of elements, so to be python3 compatible, we should use `next(iter(a_dict))` instead. in the 2nd case, in this change, the view is materialized using `list(a_dict)`. in the 3rd case, we can just continue using the short hand of ```py (first_element,) = a_dict.keys() ``` to unpack the view. this works in both python2 and python3. in the 4th case, the existing code works in both python2 and python3, as both list and view can be iterated using `iter`, and `len` works as well. Signed-off-by: Kefu Chai (cherry picked from commit d7258ea7fdcd81d4a88028a25b32ed5b278d0752) Conflicts: qa/tasks/barbican.py qa/tasks/cbt.py qa/tasks/vault.py: trivial resolutions --- qa/tasks/cbt.py | 8 ++++---- qa/tasks/ceph_manager.py | 4 ++-- qa/tasks/ceph_objectstore_tool.py | 2 +- qa/tasks/cephfs/filesystem.py | 6 +++--- qa/tasks/check_counter.py | 2 +- qa/tasks/devstack.py | 10 +++++----- qa/tasks/dnsmasq.py | 2 +- qa/tasks/filestore_idempotent.py | 2 +- qa/tasks/mgr/test_crash.py | 2 +- qa/tasks/radosgw_admin.py | 2 +- qa/tasks/radosgw_admin_rest.py | 2 +- 11 files changed, 21 insertions(+), 21 deletions(-) diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py index 941694802cf62..e234eff907b27 100644 --- a/qa/tasks/cbt.py +++ b/qa/tasks/cbt.py @@ -44,7 +44,7 @@ class CBT(Task): ) benchmark_config = self.config.get('benchmarks') - benchmark_type = benchmark_config.keys()[0] + benchmark_type = next(iter(benchmark_config.keys())) if benchmark_type == 'librbdfio': testdir = misc.get_testdir(self.ctx) benchmark_config['librbdfio']['cmd_path'] = os.path.join(testdir, 'fio/fio') @@ -78,7 +78,7 @@ class CBT(Task): cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl'] self.first_mon.run(args=install_cmd + cbt_depends) - benchmark_type = self.cbt_config.get('benchmarks').keys()[0] + benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys())) self.log.info('benchmark: %s', benchmark_type) if benchmark_type == 'librbdfio': @@ -192,7 +192,7 @@ class CBT(Task): def setup(self): super(CBT, self).setup() - self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0] + self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys())) self.cbt_config = self.generate_cbt_config() self.log.info('cbt configuration is %s', self.cbt_config) self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt') @@ -224,7 +224,7 @@ class CBT(Task): '{tdir}/cbt'.format(tdir=testdir), ] ) - benchmark_type = self.cbt_config.get('benchmarks').keys()[0] + benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys())) if benchmark_type == 'librbdfio': self.first_mon.run( args=[ diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index a2479c6f997be..d02f14592b7c6 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1046,8 +1046,8 @@ class ObjectStoreTool: self.pgid = self.manager.get_object_pg_with_shard(self.pool, self.object_name, self.osd) - self.remote = self.manager.ctx.\ - cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()[0] + self.remote = next(iter(self.manager.ctx.\ + cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys())) path = self.manager.get_filepath().format(id=self.osd) self.paths = ("--data-path {path} --journal-path {path}/journal". format(path=path)) diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py index 9598e9073dd1b..94b9d19e913fc 100644 --- a/qa/tasks/ceph_objectstore_tool.py +++ b/qa/tasks/ceph_objectstore_tool.py @@ -242,7 +242,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): REP_NAME, DATALINECOUNT) allremote = [] allremote.append(cli_remote) - allremote += osds.remotes.keys() + allremote += list(osds.remotes.keys()) allremote = list(set(allremote)) for remote in allremote: cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index e02d822e26ff1..d149387a6984b 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -662,7 +662,7 @@ class Filesystem(MDSCluster): if refresh or self.data_pools is None: self.get_pool_names(refresh = True) assert(len(self.data_pools) == 1) - return self.data_pools.values()[0] + return next(iter(self.data_pools.values())) def get_data_pool_id(self, refresh = False): """ @@ -672,12 +672,12 @@ class Filesystem(MDSCluster): if refresh or self.data_pools is None: self.get_pool_names(refresh = True) assert(len(self.data_pools) == 1) - return self.data_pools.keys()[0] + return next(iter(self.data_pools.keys())) def get_data_pool_names(self, refresh = False): if refresh or self.data_pools is None: self.get_pool_names(refresh = True) - return self.data_pools.values() + return list(self.data_pools.values()) def get_metadata_pool_name(self): return self.metadata_pool_name diff --git a/qa/tasks/check_counter.py b/qa/tasks/check_counter.py index fc877f285b6c8..daa81973be735 100644 --- a/qa/tasks/check_counter.py +++ b/qa/tasks/check_counter.py @@ -45,7 +45,7 @@ class CheckCounter(Task): targets = self.config.get('counters', {}) if cluster_name is None: - cluster_name = self.ctx.managers.keys()[0] + cluster_name = next(iter(self.ctx.managers.keys())) for daemon_type, counters in targets.items(): # List of 'a', 'b', 'c'... diff --git a/qa/tasks/devstack.py b/qa/tasks/devstack.py index dc1c6a1f961be..35620f7ea82a6 100644 --- a/qa/tasks/devstack.py +++ b/qa/tasks/devstack.py @@ -59,8 +59,8 @@ def install(ctx, config): if not isinstance(config, dict): raise TypeError("config must be a dict") - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] - an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) + an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys())) devstack_branch = config.get("branch", "master") install_devstack(devstack_node, devstack_branch) @@ -306,7 +306,7 @@ def exercise(ctx, config): if not isinstance(config, dict): raise TypeError("config must be a dict") - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) # TODO: save the log *and* preserve failures #devstack_archive_dir = create_devstack_archive(ctx, devstack_node) @@ -333,8 +333,8 @@ def create_devstack_archive(ctx, devstack_node): def smoke(ctx, config): log.info("Running a basic smoketest...") - devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] - an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) + an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys())) try: create_volume(devstack_node, an_osd_node, 'smoke0', 1) diff --git a/qa/tasks/dnsmasq.py b/qa/tasks/dnsmasq.py index 717c9f01930ae..352ed246b6eb4 100644 --- a/qa/tasks/dnsmasq.py +++ b/qa/tasks/dnsmasq.py @@ -83,7 +83,7 @@ def setup_dnsmasq(remote, testdir, cnames): # restart dnsmasq remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq']) # verify dns name is set - remote.run(args=['ping', '-c', '4', cnames.keys()[0]]) + remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))]) try: yield diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py index 01b562905b212..319bef7686c71 100644 --- a/qa/tasks/filestore_idempotent.py +++ b/qa/tasks/filestore_idempotent.py @@ -31,7 +31,7 @@ def task(ctx, config): clients = config.keys() # just use the first client... - client = clients[0]; + client = next(iter(clients)) (remote,) = ctx.cluster.only(client).remotes.keys() testdir = teuthology.get_testdir(ctx) diff --git a/qa/tasks/mgr/test_crash.py b/qa/tasks/mgr/test_crash.py index c45c24a48dbbe..6607bec7d736d 100644 --- a/qa/tasks/mgr/test_crash.py +++ b/qa/tasks/mgr/test_crash.py @@ -73,7 +73,7 @@ class TestCrash(MgrTestCase): self.assertIn(crash['crash_id'], retstr) def test_rm(self): - crashid = self.crashes.keys()[0] + crashid = next(iter(self.crashes.keys())) self.assertEqual( 0, self.mgr_cluster.mon_manager.raw_cluster_cmd_result( diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index 63cb1375f35d5..b8e6008ce3ef1 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -272,7 +272,7 @@ def task(ctx, config): clients_from_config = config.keys() # choose first client as default - client = clients_from_config[0] + client = next(iter(clients_from_config)) # once the client is chosen, pull the host name and assigned port out of # the role_endpoints that were assigned by the rgw task diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py index a2724e3ffb3b1..8902ca15c6e20 100644 --- a/qa/tasks/radosgw_admin_rest.py +++ b/qa/tasks/radosgw_admin_rest.py @@ -130,7 +130,7 @@ def task(ctx, config): clients = config.keys() # just use the first client... - client = clients[0] + client = next(iter(clients)) ## admin_user = 'ada' -- 2.39.5