]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Run flake8 on python2 and python3
authorThomas Bechtold <tbechtold@suse.com>
Mon, 9 Dec 2019 16:27:46 +0000 (17:27 +0100)
committerKefu Chai <kchai@redhat.com>
Tue, 2 Jun 2020 02:32:23 +0000 (10:32 +0800)
To be able to catch problems with python2 *and* python3, run flake8
with both versions. From the flake8 homepage:

It is very important to install Flake8 on the correct version of
Python for your needs. If you want Flake8 to properly parse new
language features in Python 3.5 (for example), you need it to be
installed on 3.5 for Flake8 to understand those features. In many
ways, Flake8 is tied to the version of Python on which it runs.

Also fix the problems with python3 on the way.
Note: This requires now the six module for teuthology. But this is
already an install_require in teuthology itself.

Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
(cherry picked from commit bdcc94a1d1f659b1524fdc6bb8bd1da6d38a30d2)

Conflicts:
qa/CMakeLists.txt
qa/tasks/ceph_manager.py
qa/tasks/cephfs/xfstests_dev.py
qa/tasks/cram.py
qa/tasks/manypools.py
qa/tasks/ragweed.py
qa/tasks/s3tests.py
qa/tasks/vstart_runner.py
qa/tasks/workunit.py
qa/tox.ini: trivial resolutions

33 files changed:
qa/tasks/autotest.py
qa/tasks/ceph.py
qa/tasks/ceph_manager.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs_test_runner.py
qa/tasks/cram.py
qa/tasks/keystone.py
qa/tasks/manypools.py
qa/tasks/omapbench.py
qa/tasks/qemu.py
qa/tasks/rados.py
qa/tasks/radosbench.py
qa/tasks/radosbenchsweep.py
qa/tasks/radosgw_admin.py
qa/tasks/ragweed.py
qa/tasks/rbd.py
qa/tasks/s3readwrite.py
qa/tasks/s3roundtrip.py
qa/tasks/s3tests.py
qa/tasks/samba.py
qa/tasks/tempest.py
qa/tasks/vstart_runner.py
qa/tasks/watch_notify_same_primary.py
qa/tasks/watch_notify_stress.py
qa/tasks/workunit.py
qa/tox.ini
qa/workunits/mon/caps.py
qa/workunits/mon/ping.py
qa/workunits/rados/test_large_omap_detection.py
qa/workunits/restart/test-backtraces.py

index 1735f677380dbc4be63b0402c9c69ab973c2afad..2f96373d0a9f4a6e80f0a9c8dc22dd219dc77436 100644 (file)
@@ -1,10 +1,12 @@
-""" 
+"""
 Run an autotest test on the ceph cluster.
 """
 import json
 import logging
 import os
 
+import six
+
 from teuthology import misc as teuthology
 from teuthology.parallel import parallel
 from teuthology.orchestra import run
@@ -48,7 +50,7 @@ def task(ctx, config):
 
     log.info('Making a separate scratch dir for every client...')
     for role in config.keys():
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
@@ -103,7 +105,7 @@ def _run_tests(testdir, remote, role, tests):
     """
     Spawned to run test on remote site
     """
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     id_ = role[len(PREFIX):]
index c44e2cf26bfb4cfdd4a6064e175c924344b9834e..e38f7c62edf007223ed6f215d9e3cde6be6fd16d 100644 (file)
@@ -166,13 +166,13 @@ def ceph_log(ctx, config):
                     # case we will see connection errors that we should ignore.
                     log.debug("Missed logrotate, node '{0}' is offline".format(
                         e.node))
-                except EOFError as e:
+                except EOFError:
                     # Paramiko sometimes raises this when it fails to
                     # connect to a node during open_session.  As with
                     # ConnectionLostError, we ignore this because nodes
                     # are allowed to get power cycled during tests.
                     log.debug("Missed logrotate, EOFError")
-                except SSHException as e:
+                except SSHException:
                     log.debug("Missed logrotate, SSHException")
                 except socket.error as e:
                     if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET):
@@ -191,7 +191,7 @@ def ceph_log(ctx, config):
         testdir = teuthology.get_testdir(ctx)
         remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir
         rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
-        with file(rotate_conf_path, 'rb') as f:
+        with open(rotate_conf_path, 'rb') as f:
             conf = ""
             for daemon, size in daemons.items():
                 log.info('writing logrotate stanza for {}'.format(daemon))
index e20583763384a34e0a4cde402e1c54f736cf12eb..a7267acfe69139db0487e22d26be1c3e5e04c875 100644 (file)
@@ -23,6 +23,7 @@ from teuthology.contextutil import safe_while
 from teuthology.orchestra.remote import Remote
 from teuthology.orchestra import run
 from teuthology.exceptions import CommandFailedError
+import six
 
 try:
     from subprocess import DEVNULL # py3k
@@ -1631,7 +1632,7 @@ class CephManager:
         :param erasure_code_use_overwrites: if true, allow overwrites
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(pg_num, int)
             assert pool_name not in self.pools
             self.log("creating pool_name %s" % (pool_name,))
@@ -1683,7 +1684,7 @@ class CephManager:
         :param pool_name: Pool to be removed
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert pool_name in self.pools
             self.log("removing pool_name %s" % (pool_name,))
             del self.pools[pool_name]
@@ -1702,7 +1703,7 @@ class CephManager:
         Return the number of pgs in the pool specified.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             if pool_name in self.pools:
                 return self.pools[pool_name]
             return 0
@@ -1714,8 +1715,8 @@ class CephManager:
         :returns: property as an int value.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
-            assert isinstance(prop, basestring)
+            assert isinstance(pool_name, six.string_types)
+            assert isinstance(prop, six.string_types)
             output = self.raw_cluster_cmd(
                 'osd',
                 'pool',
@@ -1733,8 +1734,8 @@ class CephManager:
         This routine retries if set operation fails.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
-            assert isinstance(prop, basestring)
+            assert isinstance(pool_name, six.string_types)
+            assert isinstance(prop, six.string_types)
             assert isinstance(val, int)
             tries = 0
             while True:
@@ -1761,7 +1762,7 @@ class CephManager:
         Increase the number of pgs in a pool
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(by, int)
             assert pool_name in self.pools
             if self.get_num_creating() > 0:
@@ -1781,7 +1782,7 @@ class CephManager:
         with self.lock:
             self.log('contract_pool %s by %s min %s' % (
                      pool_name, str(by), str(min_pgs)))
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(by, int)
             assert pool_name in self.pools
             if self.get_num_creating() > 0:
@@ -1820,7 +1821,7 @@ class CephManager:
         Set pgpnum property of pool_name pool.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert pool_name in self.pools
             if not force and self.get_num_creating() > 0:
                 return False
index 3f1f9bf8036784a61c610026713ed48b14abd5cf..e02d822e26ff1271b258f78d4df34b56d900ecc6 100644 (file)
@@ -781,7 +781,8 @@ class Filesystem(MDSCluster):
         """
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['state'] == state or state is None:
                 result.append(mds_status['name'])
 
@@ -799,7 +800,8 @@ class Filesystem(MDSCluster):
     def get_all_mds_rank(self, status=None):
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
                 result.append(mds_status['rank'])
 
@@ -849,7 +851,8 @@ class Filesystem(MDSCluster):
         """
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
                 result.append(mds_status['name'])
 
index bdd8e1388d2adec1ba8306165acdd89d659d00d6..ab86ae7a89c7863e36d108c88fdde748a1e5d84c 100644 (file)
@@ -449,10 +449,10 @@ class TestClientRecovery(CephFSTestCase):
         self.mount_a.wait_until_mounted()
 
     def test_dir_fsync(self):
-       self._test_fsync(True);
+        self._test_fsync(True);
 
     def test_create_fsync(self):
-       self._test_fsync(False);
+        self._test_fsync(False);
 
     def _test_fsync(self, dirfsync):
         """
index 70e55060684161a8055fc3682e97a2366bbbfa55..bd895e381b5b545786da71d04aee0d85fe6b8f82 100644 (file)
@@ -131,8 +131,8 @@ class TestExports(CephFSTestCase):
             p = self.mount_a.client_remote.sh('uname -r'), wait=True)
             dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
             log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
-           if str(p.stdout.getvalue()) < "5" and not(dir_pin):
-               self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
+            if str(p.stdout.getvalue()) < "5" and not(dir_pin):
+                self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
         self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1")
         self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0")
         if (len(self.fs.get_active_names()) > 2):
index 80c69e3c29c216e445f7ca62759ec78d33c58727..54ed16ffa1f8732d1c4ca77beb7bc84d8912e7d2 100644 (file)
@@ -310,11 +310,11 @@ class TestScrubChecks(CephFSTestCase):
         self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir),
                           lambda j, r: self.json_validator(j, r, "return_code", 0))
 
-       # wait a few second for background repair
-       time.sleep(10)
+        # wait a few second for background repair
+        time.sleep(10)
 
-       # fragstat should be fixed
-       self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+        # fragstat should be fixed
+        self.mount_a.run_shell(["sudo", "rmdir", test_dir])
 
     @staticmethod
     def json_validator(json_out, rc, element, expected_value):
index d57e85d306f61d117a59b2650177701df633caba..4455c086f31314b4acddb3da792ad34d59e4ba26 100644 (file)
@@ -133,7 +133,7 @@ def task(ctx, config):
 
     # Mount objects, sorted by ID
     if hasattr(ctx, 'mounts'):
-        mounts = [v for k, v in sorted(ctx.mounts.items(), lambda a, b: cmp(a[0], b[0]))]
+        mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])]
     else:
         # The test configuration has a filesystem but no fuse/kclient mounts
         mounts = []
index 3e7659364830ca41a4aa130d04c46688c9ff7ab3..85fbb9e66b2ca7c4dafb98eb7177e8f50292dbbe 100644 (file)
@@ -4,6 +4,8 @@ Cram tests
 import logging
 import os
 
+import six
+
 from tasks.util.workunit import get_refspec_after_overrides
 
 from teuthology import misc as teuthology
@@ -124,7 +126,7 @@ def _run_tests(ctx, role):
     :param ctx: Context
     :param role: Roles
     """
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     id_ = role[len(PREFIX):]
index baf99324d8ea6623cb799fd1e0730965832a0255..250c297298d79fa858147729e20d8eb57967b751 100644 (file)
@@ -134,7 +134,7 @@ def setup_venv(ctx, config):
     for (client, _) in config.items():
         run_in_keystone_dir(ctx, client,
             [   'source',
-               '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
+                '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
                 run.Raw('&&'),
                 'tox', '-e', 'venv', '--notest'
             ])
index 7aec5df17382a1b334c34d1921672969bc842917..7fe7e43e1b8335bbc12b84e221d0896ecaeae201 100644 (file)
@@ -46,25 +46,25 @@ def task(ctx, config):
     poolprocs=dict()
     while (remaining_pools > 0):
         log.info('{n} pools remaining to create'.format(n=remaining_pools))
-       for remote, role_ in creator_remotes:
+        for remote, role_ in creator_remotes:
             poolnum = remaining_pools
             remaining_pools -= 1
             if remaining_pools < 0:
                 continue
             log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
-           proc = remote.run(
-               args=[
-                   'ceph',
-                   '--name', role_,
-                   'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
-                   run.Raw('&&'),
-                   'rados',
-                   '--name', role_,
-                   '--pool', 'pool{num}'.format(num=poolnum),
-                   'bench', '0', 'write', '-t', '16', '--block-size', '1'
-                   ],
-               wait = False
-           )
+            proc = remote.run(
+                args=[
+                    'ceph',
+                    '--name', role_,
+                    'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
+                    run.Raw('&&'),
+                    'rados',
+                    '--name', role_,
+                    '--pool', 'pool{num}'.format(num=poolnum),
+                    'bench', '0', 'write', '-t', '16', '--block-size', '1'
+                ],
+                wait = False
+            )
             log.info('waiting for pool and object creates')
             poolprocs[remote] = proc
 
index 77f5dd0676e16e2cceea1d02be46201b9bbaddae..af0793d956447e1fabfcb5d871e62bfa60035e61 100644 (file)
@@ -4,6 +4,8 @@ Run omapbench executable within teuthology
 import contextlib
 import logging
 
+import six
+
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
@@ -48,7 +50,7 @@ def task(ctx, config):
     testdir = teuthology.get_testdir(ctx)
     print(str(config.get('increment',-1)))
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index aa490214ed9a8e8efbbafd75baf9ce6d71b6e60a..48ed5b024d64c8a965c65cc2281a0289e9b226d3 100644 (file)
@@ -53,7 +53,7 @@ def create_clones(ctx, config, managers):
             num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
             if isinstance(num_disks, list):
                 num_disks = len(num_disks)
-            for i in xrange(num_disks):
+            for i in range(num_disks):
                 create_config = {
                     client: {
                         'image_name':
@@ -121,7 +121,7 @@ def generate_iso(ctx, config):
         userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
         metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
 
-        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
             test_setup = ''.join(f.readlines())
             # configuring the commands to setup the nfs mount
             mnt_dir = "/export/{client}".format(client=client)
@@ -129,7 +129,7 @@ def generate_iso(ctx, config):
                 mnt_dir=mnt_dir
             )
 
-        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
             test_teardown = ''.join(f.readlines())
 
         user_data = test_setup
@@ -137,7 +137,7 @@ def generate_iso(ctx, config):
             num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
             if isinstance(num_disks, list):
                 num_disks = len(num_disks)
-            for i in xrange(1, num_disks):
+            for i in range(1, num_disks):
                 dev_letter = chr(ord('a') + i)
                 user_data += """
 - |
@@ -173,7 +173,7 @@ def generate_iso(ctx, config):
             ceph_sha1=ctx.config.get('sha1'))
         teuthology.write_file(remote, userdata_path, StringIO(user_data))
 
-        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
             teuthology.write_file(remote, metadata_path, f)
 
         test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
@@ -394,7 +394,7 @@ def run_qemu(ctx, config):
         num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
         if isinstance(num_disks, list):
             num_disks = len(num_disks)
-        for i in xrange(num_disks):
+        for i in range(num_disks):
             suffix = '-clone' if clone else ''
             args.extend([
                 '-drive',
index d4872fb11570a7c27f1293c8537dbde381d7d45e..121d06ea288bdd1a8d65ce09aa0729a902e56ed3 100644 (file)
@@ -6,6 +6,8 @@ import logging
 import gevent
 from teuthology import misc as teuthology
 
+import six
+
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
@@ -223,7 +225,7 @@ def task(ctx, config):
             existing_pools = config.get('pools', [])
             created_pools = []
             for role in config.get('clients', clients):
-                assert isinstance(role, basestring)
+                assert isinstance(role, six.string_types)
                 PREFIX = 'client.'
                 assert role.startswith(PREFIX)
                 id_ = role[len(PREFIX):]
index 770e3aff7c5166b292572c8048453323a4bea5e0..7b6b98adad0cb7a99e7a36d3b967cf6030af210f 100644 (file)
@@ -7,6 +7,8 @@ import logging
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
+import six
+
 log = logging.getLogger(__name__)
 
 @contextlib.contextmanager
@@ -53,7 +55,7 @@ def task(ctx, config):
 
     create_pool = config.get('create_pool', True)
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index 5d18f7a3badd6890aab2686ee0bd5d8b14800f73..0aeb7218681c3858e2b9b47d4833dbf5692bb6b7 100644 (file)
@@ -11,6 +11,8 @@ from itertools import product
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
+import six
+
 log = logging.getLogger(__name__)
 
 
@@ -167,7 +169,7 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep):
     log.info('  repetition =' + str(rep))
 
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index 8c6e952080ca8517d1b197fc7dd5f5a79fd786b4..63cb1375f35d58a4a32433e409fd14b2f9042217 100644 (file)
@@ -187,7 +187,7 @@ class usage_acc:
                 x2 = s2['total']
             except Exception as ex:
                 r.append("malformed summary looking for totals for user "
-                   + e['user'] + " " + str(ex))
+                         + e['user'] + " " + str(ex))
                 break
             usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user'])
         return r
@@ -909,8 +909,8 @@ def task(ctx, config):
     assert len(out['placement_pools']) == orig_placement_pools + 1
 
     zonecmd = ['zone', 'placement', 'rm',
-       '--rgw-zone', 'default',
-       '--placement-id', 'new-placement']
+               '--rgw-zone', 'default',
+               '--placement-id', 'new-placement']
 
     (err, out) = rgwadmin(ctx, client, zonecmd, check_status=True)
 
index bb72267e5e4ca37e4fc79c6fcaf96d34a5a9f783..fab0853ce986453522aee573994f8736cc6a41d0 100644 (file)
@@ -41,7 +41,7 @@ def download(ctx, config):
             ragweed_repo = ctx.config.get('ragweed_repo', teuth_config.ceph_git_base_url + 'ragweed.git')
             if suite_branch in s3_branches:
                 branch = cconf.get('branch', 'ceph-' + suite_branch)
-           else:
+            else:
                 branch = cconf.get('branch', suite_branch)
         if not branch:
             raise ValueError(
@@ -101,7 +101,7 @@ def _config_user(ragweed_conf, section, user):
     ragweed_conf[section].setdefault('user_id', user)
     ragweed_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 
@@ -211,7 +211,7 @@ def configure(ctx, config, run_stages):
             ragweed_conf['rgw']['host'] = 'localhost'
 
         if properties is not None and 'slow_backend' in properties:
-           ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
+            ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
 
         conf_fp = StringIO()
         ragweed_conf.write(conf_fp)
@@ -223,8 +223,8 @@ def configure(ctx, config, run_stages):
 
     log.info('Configuring boto...')
     boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
-    for client, properties in config['clients'].iteritems():
-        with file(boto_src, 'rb') as f:
+    for client, properties in config['clients'].items():
+        with open(boto_src, 'rb') as f:
             (remote,) = ctx.cluster.only(client).remotes.keys()
             conf = f.read().format(
                 idle_timeout=config.get('idle_timeout', 30)
index 193ab1c3961a0dfaa2b30076dc074806e7bc6173..ee30e02c4359125f17183c01a63d38af51f0a94a 100644 (file)
@@ -16,6 +16,8 @@ from teuthology.task.common_fs_utils import generic_mkfs
 from teuthology.task.common_fs_utils import generic_mount
 from teuthology.task.common_fs_utils import default_image_name
 
+import six
+
 #V1 image unsupported but required for testing purposes
 os.environ["RBD_FORCE_ALLOW_V1"] = "1"
 
@@ -355,7 +357,7 @@ def run_xfstests(ctx, config):
             except:
                 exc_info = sys.exc_info()
         if exc_info:
-            raise exc_info[0], exc_info[1], exc_info[2]
+            six.reraise(exc_info[0], exc_info[1], exc_info[2])
     yield
 
 def run_xfstests_one_client(ctx, role, properties):
index 76c4ebbab1476730ae3380f33a447c2f25f70e2d..2dae973e9b21231c201f7be07049031f46033e57 100644 (file)
@@ -78,7 +78,7 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 @contextlib.contextmanager
index ebaeb4bc6e6278f0f6719440151fb8320bf6e3ec..8bf770c86530a2a112e7f5dd30ae5eec6cfca468 100644 (file)
@@ -77,7 +77,7 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 @contextlib.contextmanager
index 7064ce01a85108505be973c3fc6ee6000ef63014..f6fce64ddfad126e108ddab09d0282747b0810fe 100644 (file)
@@ -78,9 +78,9 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
-    s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in xrange(10)))
+    s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in range(10)))
     s3tests_conf[section].setdefault('totp_seed', base64.b32encode(os.urandom(40)))
     s3tests_conf[section].setdefault('totp_seconds', '5')
 
@@ -183,7 +183,7 @@ def configure(ctx, config):
             s3tests_conf['DEFAULT']['host'] = 'localhost'
 
         if properties is not None and 'slow_backend' in properties:
-           s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
+            s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
 
         (remote,) = ctx.cluster.only(client).remotes.keys()
         remote.run(
@@ -204,8 +204,8 @@ def configure(ctx, config):
 
     log.info('Configuring boto...')
     boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
-    for client, properties in config['clients'].iteritems():
-        with file(boto_src, 'rb') as f:
+    for client, properties in config['clients'].items():
+        with open(boto_src, 'rb') as f:
             (remote,) = ctx.cluster.only(client).remotes.keys()
             conf = f.read().format(
                 idle_timeout=config.get('idle_timeout', 30)
index 319c6d5e5bf2eeb524c31f5936628062199d3435..1dd62d8624cf36ecd3d0ffde53a00a328acf977f 100644 (file)
@@ -6,6 +6,8 @@ import logging
 import sys
 import time
 
+import six
+
 from teuthology import misc as teuthology
 from teuthology.orchestra import run
 from teuthology.orchestra.daemon import DaemonGroup
@@ -22,7 +24,7 @@ def get_sambas(ctx, roles):
     :param roles: roles for this test (extracted from yaml files)
     """
     for role in roles:
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'samba.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
@@ -196,7 +198,7 @@ def task(ctx, config):
                 exc_info = sys.exc_info()
                 log.exception('Saw exception from %s.%s', d.role, d.id_)
         if exc_info != (None, None, None):
-            raise exc_info[0], exc_info[1], exc_info[2]
+            six.reraise(exc_info[0], exc_info[1], exc_info[2])
 
         for id_, remote in samba_servers:
             remote.run(
index a8f1d82c32ad1b9246dc171e4e864bc094dfc5dc..10a682da24af3433546b57c6cbe82aaee2ec01a1 100644 (file)
@@ -133,7 +133,7 @@ def configure_instance(ctx, config):
         to_config(cconfig, params, 'identity', cpar)
         to_config(cconfig, params, 'object-storage', cpar)
         to_config(cconfig, params, 'object-storage-feature-enabled', cpar)
-        cpar.write(file(local_conf, 'w+'))
+        cpar.write(open(local_conf, 'w+'))
 
         remote.put_file(local_conf, tetcdir + '/tempest.conf')
     yield
index bf1564460275898ccc34f5a6e7e53c9f47db720b..79548737146f80187b073a8bb3dd731951642a7d 100644 (file)
@@ -50,7 +50,7 @@ from teuthology import misc
 from teuthology.orchestra.run import Raw, quote
 from teuthology.orchestra.daemon import DaemonGroup
 from teuthology.config import config as teuth_config
-
+import six
 import logging
 
 log = logging.getLogger(__name__)
@@ -271,7 +271,7 @@ class LocalRemote(object):
         else:
             # Sanity check that we've got a list of strings
             for arg in args:
-                if not isinstance(arg, basestring):
+                if not isinstance(arg, six.string_types):
                     raise RuntimeError("Oops, can't handle arg {0} type {1}".format(
                         arg, arg.__class__
                     ))
@@ -284,7 +284,7 @@ class LocalRemote(object):
                                        env=env)
 
         if stdin:
-            if not isinstance(stdin, basestring):
+            if not isinstance(stdin, six.string_types):
                 raise RuntimeError("Can't handle non-string stdins on a vstart cluster")
 
             # Hack: writing to stdin is not deadlock-safe, but it "always" works
index 3cb7eeb704abf167504e258b84d74dc6a3ac624e..7160979119b38ff0c640d5caee33ed04b0cf4f32 100644 (file)
@@ -6,6 +6,8 @@ from io import BytesIO
 import contextlib
 import logging
 
+import six
+
 from teuthology.orchestra import run
 from teuthology.contextutil import safe_while
 
@@ -41,7 +43,7 @@ def task(ctx, config):
     clients = config.get('clients', ['client.0'])
     assert len(clients) == 1
     role = clients[0]
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     (remote,) = ctx.cluster.only(role).remotes.keys()
index 1e10e99b62afab93e5d5f30aaf1c756d5e2940a1..e5e380492e0e8038d6edbcfd63183a88c6698ee8 100644 (file)
@@ -4,6 +4,7 @@ test_stress_watch task
 import contextlib
 import logging
 
+import six
 from teuthology.orchestra import run
 from teuthology.task import proc_thrasher
 
@@ -36,7 +37,7 @@ def task(ctx, config):
     remotes = []
 
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index 4372c2549f0776579b028b4901682e107c6fce0c..6ef08a430152d5dea4b9df0ca4626434180ad2f0 100644 (file)
@@ -6,8 +6,10 @@ import pipes
 import os
 import re
 
-from tasks.util import get_remote_for_role
-from tasks.util.workunit import get_refspec_after_overrides
+import six
+
+from util import get_remote_for_role
+from util.workunit import get_refspec_after_overrides
 
 from teuthology import misc
 from teuthology.config import config as teuth_config
@@ -103,7 +105,7 @@ def task(ctx, config):
     # Create scratch dirs for any non-all workunits
     log.info('Making a separate scratch dir for every client...')
     for role in clients.keys():
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         if role == "all":
             continue
 
@@ -311,7 +313,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir,
                     to False is passed, the 'timeout' command is not used.
     """
     testdir = misc.get_testdir(ctx)
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     cluster, type_, id_ = misc.split_role(role)
     assert type_ == 'client'
     remote = get_remote_for_role(ctx, role)
index c5826ecb6ecd86fe4347f2019705b6fdac0e0887..5088e1203155ae1fdbc219ffe2c50a5d81cb1bcf 100644 (file)
@@ -1,8 +1,15 @@
 [tox]
-envlist = flake8
+envlist = flake8-py2, flake8-py3
 skipsdist = True
 
-[testenv:flake8]
+[testenv:flake8-py2]
+basepython = python2
+deps=
+  flake8
+commands=flake8 --select=F,E9 --exclude=venv,.tox
+
+[testenv:flake8-py3]
+basepython = python3
 deps=
   flake8
 commands=flake8 --select=F,E9 --exclude=venv,.tox
index 454bea37da6fe9b18763ceb2d4c1e91bd993bfee..1eb0cb658d67d7815e97cc8621125d5ef574192a 100644 (file)
@@ -10,6 +10,7 @@ import os
 import io
 import re
 
+import six
 
 from ceph_argparse import * # noqa
 
@@ -20,7 +21,7 @@ class UnexpectedReturn(Exception):
     if isinstance(cmd, list):
       self.cmd = ' '.join(cmd)
     else:
-      assert isinstance(cmd, str) or isinstance(cmd, unicode), \
+      assert isinstance(cmd, str) or isinstance(cmd, six.text_type), \
           'cmd needs to be either a list or a str'
       self.cmd = cmd
     self.cmd = str(self.cmd)
@@ -35,7 +36,7 @@ class UnexpectedReturn(Exception):
 def call(cmd):
   if isinstance(cmd, list):
     args = cmd
-  elif isinstance(cmd, str) or isinstance(cmd, unicode):
+  elif isinstance(cmd, str) or isinstance(cmd, six.text_type):
     args = shlex.split(cmd)
   else:
     assert False, 'cmd is not a string/unicode nor a list!'
@@ -71,7 +72,7 @@ def expect_to_file(cmd, expected_ret, out_file, mode='a'):
       'expected result doesn\'t match and no exception was thrown!'
 
   with io.open(out_file, mode) as file:
-    file.write(unicode(p.stdout.read()))
+    file.write(six.text_type(p.stdout.read()))
 
   return p
 
@@ -85,7 +86,7 @@ class Command:
     self.args = []
     for s in j['sig']:
       if not isinstance(s, dict):
-        assert isinstance(s, str) or isinstance(s,unicode), \
+        assert isinstance(s, str) or isinstance(s,six.text_type), \
             'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j)
         if len(self.sig) > 0:
           self.sig += ' '
index 1773c7369317d0d88fa57cfa4a77e5597aa7d712..f39da885f6ff4993c60be51cb286206af371cf6b 100755 (executable)
@@ -3,14 +3,8 @@
 import json
 import shlex
 import subprocess
-import sys
 
-if sys.version_info[0] == 2:
-    string = basestring
-    unicode = unicode
-elif sys.version_info[0] == 3:
-    string = str
-    unicode = str
+import six
 
 
 class UnexpectedReturn(Exception):
@@ -18,7 +12,7 @@ class UnexpectedReturn(Exception):
         if isinstance(cmd, list):
             self.cmd = ' '.join(cmd)
         else:
-            assert isinstance(cmd, string) or isinstance(cmd, unicode), \
+            assert isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type), \
                 'cmd needs to be either a list or a str'
             self.cmd = cmd
         self.cmd = str(self.cmd)
@@ -34,7 +28,7 @@ class UnexpectedReturn(Exception):
 def call(cmd):
     if isinstance(cmd, list):
         args = cmd
-    elif isinstance(cmd, string) or isinstance(cmd, unicode):
+    elif isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type):
         args = shlex.split(cmd)
     else:
         assert False, 'cmd is not a string/unicode nor a list!'
index 7b09dfd9071878d1aff259d20824a53a9a411b43..c6cf195d9f3bbeaadc7e4d6329b98fb02e07c856 100755 (executable)
@@ -59,7 +59,7 @@ def init():
 
     keys = []
     values = []
-    for x in xrange(20000):
+    for x in range(20000):
         keys.append(str(x))
         values.append(buffer)
 
index 07fe8845f4e63fba2788f48c6d200bc70e96d3b3..86048e6d883acd0a40593328903afbee21c6cef7 100755 (executable)
@@ -9,7 +9,7 @@ import time
 import sys
 
 if sys.version_info[0] == 2:
-    range = xrange
+    range = xrange # noqa
 
 elif sys.version_info[0] == 3:
     range = range