]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Run flake8 on python2 and python3
authorThomas Bechtold <tbechtold@suse.com>
Mon, 9 Dec 2019 16:27:46 +0000 (17:27 +0100)
committerThomas Bechtold <tbechtold@suse.com>
Fri, 13 Dec 2019 08:24:20 +0000 (09:24 +0100)
To be able to catch problems with python2 *and* python3, run flake8
with both versions. From the flake8 homepage:

It is very important to install Flake8 on the correct version of
Python for your needs. If you want Flake8 to properly parse new
language features in Python 3.5 (for example), you need it to be
installed on 3.5 for Flake8 to understand those features. In many
ways, Flake8 is tied to the version of Python on which it runs.

Also fix the problems with python3 on the way.
Note: This requires now the six module for teuthology. But this is
already an install_require in teuthology itself.

Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
36 files changed:
qa/CMakeLists.txt
qa/tasks/autotest.py
qa/tasks/ceph.py
qa/tasks/ceph_manager.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/xfstests_dev.py
qa/tasks/cephfs_test_runner.py
qa/tasks/cram.py
qa/tasks/keystone.py
qa/tasks/manypools.py
qa/tasks/omapbench.py
qa/tasks/peer.py
qa/tasks/qemu.py
qa/tasks/rados.py
qa/tasks/radosbench.py
qa/tasks/radosbenchsweep.py
qa/tasks/radosgw_admin.py
qa/tasks/ragweed.py
qa/tasks/rbd.py
qa/tasks/s3readwrite.py
qa/tasks/s3roundtrip.py
qa/tasks/s3tests.py
qa/tasks/samba.py
qa/tasks/tempest.py
qa/tasks/vstart_runner.py
qa/tasks/watch_notify_same_primary.py
qa/tasks/watch_notify_stress.py
qa/tasks/workunit.py
qa/tox.ini
qa/workunits/mon/caps.py
qa/workunits/mon/ping.py
qa/workunits/rados/test_large_omap_detection.py
qa/workunits/restart/test-backtraces.py

index 06de6620b953fcb9e4753687ce9f26c8649963a2..46f1f1def4527718196131e1fc43ab8fbc420989 100644 (file)
@@ -5,5 +5,5 @@ endif()
 
 if(WITH_TESTS)
   include(AddCephTest)
-  add_tox_test(qa flake8)
+  add_tox_test(qa flake8-py2 flake8-py3)
 endif()
index 743b6fac11925c4291afdb36da100e4f2365f233..a78987dca70e79843f570301bcdaafd38a6acc55 100644 (file)
@@ -1,10 +1,12 @@
-""" 
+"""
 Run an autotest test on the ceph cluster.
 """
 import json
 import logging
 import os
 
+import six
+
 from teuthology import misc as teuthology
 from teuthology.parallel import parallel
 from teuthology.orchestra import run
@@ -48,7 +50,7 @@ def task(ctx, config):
 
     log.info('Making a separate scratch dir for every client...')
     for role in config.keys():
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
@@ -103,7 +105,7 @@ def _run_tests(testdir, remote, role, tests):
     """
     Spawned to run test on remote site
     """
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     id_ = role[len(PREFIX):]
index 364c6aceac44f6c93e20f4061ba579f74462f6db..7e27f4cc4557a7d1073f0a5cf3d7f250147a6564 100644 (file)
@@ -166,13 +166,13 @@ def ceph_log(ctx, config):
                     # case we will see connection errors that we should ignore.
                     log.debug("Missed logrotate, node '{0}' is offline".format(
                         e.node))
-                except EOFError as e:
+                except EOFError:
                     # Paramiko sometimes raises this when it fails to
                     # connect to a node during open_session.  As with
                     # ConnectionLostError, we ignore this because nodes
                     # are allowed to get power cycled during tests.
                     log.debug("Missed logrotate, EOFError")
-                except SSHException as e:
+                except SSHException:
                     log.debug("Missed logrotate, SSHException")
                 except socket.error as e:
                     if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET):
@@ -190,7 +190,7 @@ def ceph_log(ctx, config):
     def write_rotate_conf(ctx, daemons):
         testdir = teuthology.get_testdir(ctx)
         rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
-        with file(rotate_conf_path, 'rb') as f:
+        with open(rotate_conf_path, 'rb') as f:
             conf = ""
             for daemon, size in daemons.items():
                 log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
index d2d7f43632fd40dfcd5690005db9e189bf455df4..bf67303d8a9d6f15ddf07a191cabee573ff3190a 100644 (file)
@@ -23,6 +23,7 @@ from teuthology.orchestra.remote import Remote
 from teuthology.orchestra import run
 from teuthology.exceptions import CommandFailedError
 from tasks.thrasher import Thrasher
+import six
 
 try:
     from subprocess import DEVNULL # py3k
@@ -1803,7 +1804,7 @@ class CephManager:
         :param erasure_code_use_overwrites: if true, allow overwrites
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(pg_num, int)
             assert pool_name not in self.pools
             self.log("creating pool_name %s" % (pool_name,))
@@ -1855,7 +1856,7 @@ class CephManager:
         :param pool_name: Pool to be removed
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert pool_name in self.pools
             self.log("removing pool_name %s" % (pool_name,))
             del self.pools[pool_name]
@@ -1874,7 +1875,7 @@ class CephManager:
         Return the number of pgs in the pool specified.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             if pool_name in self.pools:
                 return self.pools[pool_name]
             return 0
@@ -1886,8 +1887,8 @@ class CephManager:
         :returns: property as string
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
-            assert isinstance(prop, basestring)
+            assert isinstance(pool_name, six.string_types)
+            assert isinstance(prop, six.string_types)
             output = self.raw_cluster_cmd(
                 'osd',
                 'pool',
@@ -1908,8 +1909,8 @@ class CephManager:
         This routine retries if set operation fails.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
-            assert isinstance(prop, basestring)
+            assert isinstance(pool_name, six.string_types)
+            assert isinstance(prop, six.string_types)
             assert isinstance(val, int)
             tries = 0
             while True:
@@ -1936,7 +1937,7 @@ class CephManager:
         Increase the number of pgs in a pool
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(by, int)
             assert pool_name in self.pools
             if self.get_num_creating() > 0:
@@ -1956,7 +1957,7 @@ class CephManager:
         with self.lock:
             self.log('contract_pool %s by %s min %s' % (
                      pool_name, str(by), str(min_pgs)))
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert isinstance(by, int)
             assert pool_name in self.pools
             if self.get_num_creating() > 0:
@@ -1996,7 +1997,7 @@ class CephManager:
         Set pgpnum property of pool_name pool.
         """
         with self.lock:
-            assert isinstance(pool_name, basestring)
+            assert isinstance(pool_name, six.string_types)
             assert pool_name in self.pools
             if not force and self.get_num_creating() > 0:
                 return False
@@ -2467,8 +2468,8 @@ class CephManager:
                 else:
                     self.log("no progress seen, keeping timeout for now")
                     if now - start >= timeout:
-                       if self.is_recovered():
-                           break
+                        if self.is_recovered():
+                            break
                         self.log('dumping pgs')
                         out = self.raw_cluster_cmd('pg', 'dump')
                         self.log(out)
index 5c778231f0b0ba28624995b10e9bced8d25ce312..d629a7c4a74614a6b03530502ddf5b1fe250a998 100644 (file)
@@ -778,7 +778,8 @@ class Filesystem(MDSCluster):
         """
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['state'] == state or state is None:
                 result.append(mds_status['name'])
 
@@ -796,7 +797,8 @@ class Filesystem(MDSCluster):
     def get_all_mds_rank(self, status=None):
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
                 result.append(mds_status['rank'])
 
@@ -846,7 +848,8 @@ class Filesystem(MDSCluster):
         """
         mdsmap = self.get_mds_map(status)
         result = []
-        for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+        for mds_status in sorted(mdsmap['info'].values(),
+                                 key=lambda _: _['rank']):
             if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
                 result.append(mds_status['name'])
 
index fdee8fc7cf2e621201676d5360ad360936268d30..7bc567aed83c87e0ffb5bd1a259d9141c03bbed9 100644 (file)
@@ -447,10 +447,10 @@ class TestClientRecovery(CephFSTestCase):
         self.mount_a.wait_until_mounted()
 
     def test_dir_fsync(self):
-       self._test_fsync(True);
+        self._test_fsync(True);
 
     def test_create_fsync(self):
-       self._test_fsync(False);
+        self._test_fsync(False);
 
     def _test_fsync(self, dirfsync):
         """
index 7d2a3425a894b74933f3595748d7e9ff34613ae1..060131add70f1bda4d4e52235ecc5ef8b40197b8 100644 (file)
@@ -132,8 +132,8 @@ class TestExports(CephFSTestCase):
             p = self.mount_a.client_remote.run(args=['uname', '-r'], stdout=StringIO(), wait=True)
             dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
             log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
-           if str(p.stdout.getvalue()) < "5" and not(dir_pin):
-               self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
+            if str(p.stdout.getvalue()) < "5" and not(dir_pin):
+                self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
         self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1")
         self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0")
         if (len(self.fs.get_active_names()) > 2):
index 3b67c36b0224c2cb73ab60700a8f5bd2a615e636..523dc54b220a953705e2c2cb95052d3544b00d24 100644 (file)
@@ -280,11 +280,11 @@ class TestScrubChecks(CephFSTestCase):
         self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir),
                           lambda j, r: self.json_validator(j, r, "return_code", 0))
 
-       # wait a few second for background repair
-       time.sleep(10)
+        # wait a few second for background repair
+        time.sleep(10)
 
-       # fragstat should be fixed
-       self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+        # fragstat should be fixed
+        self.mount_a.run_shell(["sudo", "rmdir", test_dir])
 
     @staticmethod
     def json_validator(json_out, rc, element, expected_value):
index d8520d3bf215b3ea853a1b4d473b3c7f01ef9941..ff8134b826b2564b51ff7861b432a5cdbf1be644 100644 (file)
@@ -1,3 +1,4 @@
+import six
 import logging
 from StringIO import StringIO
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -58,7 +59,7 @@ class XFSTestsDev(CephFSTestCase):
                 'auth', 'get-or-create', 'client.admin'))
         # TODO: remove this part when we stop supporting Python 2
         elif sys_version_info.major <= 2:
-            cp.read_string(unicode(self.fs.mon_manager.raw_cluster_cmd(
+            cp.read_string(six.text_type(self.fs.mon_manager.raw_cluster_cmd(
                 'auth', 'get-or-create', 'client.admin')))
 
         return cp['client.admin']['key']
index d57e85d306f61d117a59b2650177701df633caba..4455c086f31314b4acddb3da792ad34d59e4ba26 100644 (file)
@@ -133,7 +133,7 @@ def task(ctx, config):
 
     # Mount objects, sorted by ID
     if hasattr(ctx, 'mounts'):
-        mounts = [v for k, v in sorted(ctx.mounts.items(), lambda a, b: cmp(a[0], b[0]))]
+        mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])]
     else:
         # The test configuration has a filesystem but no fuse/kclient mounts
         mounts = []
index 8d3886159ee43226ddf14313d0e4a5cb6787dbe7..a397eb51ff23ed5cd7c53c700fcdad7ef3fac87c 100644 (file)
@@ -4,6 +4,8 @@ Cram tests
 import logging
 import os
 
+import six
+
 from util.workunit import get_refspec_after_overrides
 
 from teuthology import misc as teuthology
@@ -124,7 +126,7 @@ def _run_tests(ctx, role):
     :param ctx: Context
     :param role: Roles
     """
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     id_ = role[len(PREFIX):]
index 522f1673da2c87993c25cffe9630572b059a2f0d..4433ce20464989864021130bb43b5b971f9a1a46 100644 (file)
@@ -135,7 +135,7 @@ def setup_venv(ctx, config):
     for (client, _) in config.items():
         run_in_keystone_dir(ctx, client,
             [   'source',
-               '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
+                '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
                 run.Raw('&&'),
                 'tox', '-e', 'venv', '--notest'
             ])
@@ -147,7 +147,7 @@ def setup_venv(ctx, config):
     try:
         yield
     finally:
-       pass
+        pass
 
 @contextlib.contextmanager
 def configure_instance(ctx, config):
index 1f508a56fc202f60859fd779024bbeb2f1aad394..233c936bf2af98c998cc0ded868a6ec4c86b569c 100644 (file)
@@ -46,28 +46,28 @@ def task(ctx, config):
     poolprocs=dict()
     while (remaining_pools > 0):
         log.info('{n} pools remaining to create'.format(n=remaining_pools))
-       for remote, role_ in creator_remotes:
+        for remote, role_ in creator_remotes:
             poolnum = remaining_pools
             remaining_pools -= 1
             if remaining_pools < 0:
                 continue
             log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
-           proc = remote.run(
-               args=[
-                   'ceph',
-                   '--name', role_,
-                   'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
-                   run.Raw('&&'),
-                   'rados',
-                   '--name', role_,
-                   '--pool', 'pool{num}'.format(num=poolnum),
-                   'bench', '0', 'write', '-t', '16', '--block-size', '1'
-                   ],
-               wait = False
-           )
+            proc = remote.run(
+                args=[
+                    'ceph',
+                    '--name', role_,
+                    'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
+                    run.Raw('&&'),
+                    'rados',
+                    '--name', role_,
+                    '--pool', 'pool{num}'.format(num=poolnum),
+                    'bench', '0', 'write', '-t', '16', '--block-size', '1'
+                ],
+                wait = False
+            )
             log.info('waiting for pool and object creates')
-           poolprocs[remote] = proc
-        
+            poolprocs[remote] = proc
+
         run.wait(poolprocs.itervalues())
-    
+
     log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum))
index a6372a3bda9707278620e3aabc9ea477f6c9fed2..b6bd5b584db5908abe7cad5a9f74af200cae6171 100644 (file)
@@ -4,6 +4,8 @@ Run omapbench executable within teuthology
 import contextlib
 import logging
 
+import six
+
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
@@ -48,7 +50,7 @@ def task(ctx, config):
     testdir = teuthology.get_testdir(ctx)
     print(str(config.get('increment',-1)))
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index 7ec82f8f6a373bca234293e3ef6176b693d65c07..e5344a1cea0325ee686c92ee2319ef5e2640e630 100644 (file)
@@ -65,7 +65,7 @@ def task(ctx, config):
     pgs = manager.get_pg_stats()
     for pg in pgs:
         out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query')
-       log.debug("out string %s",out)
+        log.debug("out string %s",out)
         j = json.loads(out)
         log.info("pg is %s, query json is %s", pg, j)
 
index 4a06fde44ecce2b440454b9ce298bfffcfc0ea23..54bf09f2c4038cb05fe87dcb0c2cbf453e50e41a 100644 (file)
@@ -54,7 +54,7 @@ def create_clones(ctx, config, managers):
             num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
             if isinstance(num_disks, list):
                 num_disks = len(num_disks)
-            for i in xrange(num_disks):
+            for i in range(num_disks):
                 create_config = {
                     client: {
                         'image_name':
@@ -122,7 +122,7 @@ def generate_iso(ctx, config):
         userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
         metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
 
-        with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
             test_setup = ''.join(f.readlines())
             # configuring the commands to setup the nfs mount
             mnt_dir = "/export/{client}".format(client=client)
@@ -130,7 +130,7 @@ def generate_iso(ctx, config):
                 mnt_dir=mnt_dir
             )
 
-        with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
             test_teardown = ''.join(f.readlines())
 
         user_data = test_setup
@@ -138,7 +138,7 @@ def generate_iso(ctx, config):
             num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
             if isinstance(num_disks, list):
                 num_disks = len(num_disks)
-            for i in xrange(1, num_disks):
+            for i in range(1, num_disks):
                 dev_letter = chr(ord('a') + i)
                 user_data += """
 - |
@@ -174,7 +174,7 @@ def generate_iso(ctx, config):
             ceph_sha1=ctx.config.get('sha1'))
         teuthology.write_file(remote, userdata_path, StringIO(user_data))
 
-        with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
+        with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
             teuthology.write_file(remote, metadata_path, f)
 
         test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
@@ -395,7 +395,7 @@ def run_qemu(ctx, config):
         num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
         if isinstance(num_disks, list):
             num_disks = len(num_disks)
-        for i in xrange(num_disks):
+        for i in range(num_disks):
             suffix = '-clone' if clone else ''
             args.extend([
                 '-drive',
index e6b56d22beba09bc0881bb82395786251cd2e497..e108cd6585074269e1416d008f5c5216ea4929f9 100644 (file)
@@ -6,6 +6,8 @@ import logging
 import gevent
 from teuthology import misc as teuthology
 
+import six
+
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
@@ -225,7 +227,7 @@ def task(ctx, config):
             existing_pools = config.get('pools', [])
             created_pools = []
             for role in config.get('clients', clients):
-                assert isinstance(role, basestring)
+                assert isinstance(role, six.string_types)
                 PREFIX = 'client.'
                 assert role.startswith(PREFIX)
                 id_ = role[len(PREFIX):]
index d73b9476314e4f9c1fe062b6950588b67ebebece..dfe3e6153395e8eada4600cd58dc28d18ea1ffff 100644 (file)
@@ -7,6 +7,8 @@ import logging
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
+import six
+
 log = logging.getLogger(__name__)
 
 @contextlib.contextmanager
@@ -52,7 +54,7 @@ def task(ctx, config):
 
     create_pool = config.get('create_pool', True)
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index f008dee60ab31daf14060e6a4fc04785ec96ad0b..f1fa7e04d0076a4c885392fc8aa0ada518bdb18c 100644 (file)
@@ -11,6 +11,8 @@ from itertools import product
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
 
+import six
+
 log = logging.getLogger(__name__)
 
 
@@ -167,7 +169,7 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep):
     log.info('  repetition =' + str(rep))
 
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index e9cfc36966836e0d0e0c0be00d646c35dfcbbf70..5919b97cd2e1d8629ca8607d5220c5a12ff727f3 100644 (file)
@@ -186,7 +186,7 @@ class usage_acc:
                 x2 = s2['total']
             except Exception as ex:
                 r.append("malformed summary looking for totals for user "
-                   + e['user'] + " " + str(ex))
+                         + e['user'] + " " + str(ex))
                 break
             usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user'])
         return r
@@ -199,10 +199,10 @@ class requestlog_queue():
         self.adder = add
     def handle_request_data(self, request, response, error=False):
         now = datetime.datetime.now()
-       if error:
-           pass
-       elif response.status < 200 or response.status >= 400:
-           error = True
+        if error:
+            pass
+        elif response.status < 200 or response.status >= 400:
+            error = True
         self.q.put({'t': now, 'o': request, 'i': response, 'e': error})
     def clear(self):
         with self.q.mutex:
@@ -210,17 +210,17 @@ class requestlog_queue():
     def log_and_clear(self, cat, bucket, user, add_entry = None):
         while not self.q.empty():
             j = self.q.get()
-           bytes_out = 0
+            bytes_out = 0
             if 'Content-Length' in j['o'].headers:
-               bytes_out = int(j['o'].headers['Content-Length'])
+                bytes_out = int(j['o'].headers['Content-Length'])
             bytes_in = 0
             if 'content-length' in j['i'].msg.dict:
-               bytes_in = int(j['i'].msg.dict['content-length'])
+                bytes_in = int(j['i'].msg.dict['content-length'])
             log.info('RL: %s %s %s bytes_out=%d bytes_in=%d failed=%r'
-               % (cat, bucket, user, bytes_out, bytes_in, j['e']))
-           if add_entry == None:
-               add_entry = self.adder
-           add_entry(cat, bucket, user, bytes_out, bytes_in, j['e'])
+                     % (cat, bucket, user, bytes_out, bytes_in, j['e']))
+            if add_entry == None:
+                add_entry = self.adder
+            add_entry(cat, bucket, user, bytes_out, bytes_in, j['e'])
 
 def create_presigned_url(conn, method, bucket_name, key_name, expiration):
     return conn.generate_url(expires_in=expiration,
@@ -1040,8 +1040,8 @@ def task(ctx, config):
     assert len(out['placement_pools']) == orig_placement_pools + 1
 
     zonecmd = ['zone', 'placement', 'rm',
-       '--rgw-zone', 'default',
-       '--placement-id', 'new-placement']
+               '--rgw-zone', 'default',
+               '--placement-id', 'new-placement']
 
     (err, out) = rgwadmin(ctx, client, zonecmd, check_status=True)
 
@@ -1054,14 +1054,14 @@ import argparse;
 
 def main():
     if len(sys.argv) == 3:
-       user = sys.argv[1] + "@"
-       host = sys.argv[2]
+        user = sys.argv[1] + "@"
+        host = sys.argv[2]
     elif len(sys.argv) == 2:
         user = ""
-       host = sys.argv[1]
+        host = sys.argv[1]
     else:
         sys.stderr.write("usage: radosgw_admin.py [user] host\n")
-       exit(1)
+        exit(1)
     client0 = remote.Remote(user + host)
     ctx = config
     ctx.cluster=cluster.Cluster(remotes=[(client0,
index dffd10a0dc847c77aa716f8f2c6729bf3e9bacf7..baa4fd644df22e2344a95bff6594796b86077dea 100644 (file)
@@ -40,7 +40,7 @@ def download(ctx, config):
             ragweed_repo = ctx.config.get('ragweed_repo', teuth_config.ceph_git_base_url + 'ragweed.git')
             if suite_branch in s3_branches:
                 branch = cconf.get('branch', 'ceph-' + suite_branch)
-           else:
+            else:
                 branch = cconf.get('branch', suite_branch)
         if not branch:
             raise ValueError(
@@ -100,7 +100,7 @@ def _config_user(ragweed_conf, section, user):
     ragweed_conf[section].setdefault('user_id', user)
     ragweed_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 
@@ -198,7 +198,7 @@ def configure(ctx, config, run_stages):
 
         ragweed_conf = config['ragweed_conf'][client]
         if properties is not None and 'slow_backend' in properties:
-           ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
+            ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
 
         conf_fp = StringIO()
         ragweed_conf.write(conf_fp)
@@ -211,7 +211,7 @@ def configure(ctx, config, run_stages):
     log.info('Configuring boto...')
     boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
     for client, properties in config['clients'].items():
-        with file(boto_src, 'rb') as f:
+        with open(boto_src, 'rb') as f:
             (remote,) = ctx.cluster.only(client).remotes.keys()
             conf = f.read().format(
                 idle_timeout=config.get('idle_timeout', 30)
index ce0ea17145da896ae1c92bcd79a4470046f5aea2..1962f583f2aebf0630cbc4c65c11bdecbf46438a 100644 (file)
@@ -16,6 +16,8 @@ from teuthology.task.common_fs_utils import generic_mkfs
 from teuthology.task.common_fs_utils import generic_mount
 from teuthology.task.common_fs_utils import default_image_name
 
+import six
+
 #V1 image unsupported but required for testing purposes
 os.environ["RBD_FORCE_ALLOW_V1"] = "1"
 
@@ -355,7 +357,7 @@ def run_xfstests(ctx, config):
             except:
                 exc_info = sys.exc_info()
         if exc_info:
-            raise exc_info[0], exc_info[1], exc_info[2]
+            six.reraise(exc_info[0], exc_info[1], exc_info[2])
     yield
 
 def run_xfstests_one_client(ctx, role, properties):
index 8ed7828c61cceca759fa4505052d2304bec02dee..c1e878a92e281d4f3987cc7417f3e5e8407d94d2 100644 (file)
@@ -78,7 +78,7 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 @contextlib.contextmanager
index 7823defd7c6e3bedd71446cff0501f1944dc67be..39ab51a57a6233e4a75ba7ce4a37a744daa2ab95 100644 (file)
@@ -78,7 +78,7 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
 
 @contextlib.contextmanager
index 95ee8be5a8c547d5aaf6bd3430ddf87905370998..918dc408c01534d47bc081fa1b2efa75f0981dba 100644 (file)
@@ -78,9 +78,9 @@ def _config_user(s3tests_conf, section, user):
     s3tests_conf[section].setdefault('user_id', user)
     s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
     s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
-    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+    s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
     s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
-    s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in xrange(10)))
+    s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in range(10)))
     s3tests_conf[section].setdefault('totp_seed', base64.b32encode(os.urandom(40)))
     s3tests_conf[section].setdefault('totp_seconds', '5')
 
@@ -217,7 +217,7 @@ def configure(ctx, config):
 
         slow_backend = properties.get('slow_backend')
         if slow_backend:
-           s3tests_conf['fixtures']['slow backend'] = slow_backend
+            s3tests_conf['fixtures']['slow backend'] = slow_backend
 
         (remote,) = ctx.cluster.only(client).remotes.keys()
         remote.run(
@@ -239,7 +239,7 @@ def configure(ctx, config):
     log.info('Configuring boto...')
     boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
     for client, properties in config['clients'].items():
-        with file(boto_src, 'rb') as f:
+        with open(boto_src, 'rb') as f:
             (remote,) = ctx.cluster.only(client).remotes.keys()
             conf = f.read().format(
                 idle_timeout=config.get('idle_timeout', 30)
index 319c6d5e5bf2eeb524c31f5936628062199d3435..1dd62d8624cf36ecd3d0ffde53a00a328acf977f 100644 (file)
@@ -6,6 +6,8 @@ import logging
 import sys
 import time
 
+import six
+
 from teuthology import misc as teuthology
 from teuthology.orchestra import run
 from teuthology.orchestra.daemon import DaemonGroup
@@ -22,7 +24,7 @@ def get_sambas(ctx, roles):
     :param roles: roles for this test (extracted from yaml files)
     """
     for role in roles:
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'samba.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
@@ -196,7 +198,7 @@ def task(ctx, config):
                 exc_info = sys.exc_info()
                 log.exception('Saw exception from %s.%s', d.role, d.id_)
         if exc_info != (None, None, None):
-            raise exc_info[0], exc_info[1], exc_info[2]
+            six.reraise(exc_info[0], exc_info[1], exc_info[2])
 
         for id_, remote in samba_servers:
             remote.run(
index 71e80bc6471c3851ec3e55bd679e1cd5bf6855dc..8550f9b90f2c06e19a63ff09f06a22f2c3807c39 100644 (file)
@@ -142,7 +142,7 @@ def configure_instance(ctx, config):
         to_config(cconfig, params, 'identity', cpar)
         to_config(cconfig, params, 'object-storage', cpar)
         to_config(cconfig, params, 'object-storage-feature-enabled', cpar)
-        cpar.write(file(local_conf, 'w+'))
+        cpar.write(open(local_conf, 'w+'))
 
         remote.put_file(local_conf, tetcdir + '/tempest.conf')
     yield
index e5d1c4c6ea65d57005a4da144e5e76b4995023b3..39e0a02ce51ab50bffe55dbd94c6f7d225c29a9a 100644 (file)
@@ -49,7 +49,7 @@ import platform
 from teuthology.orchestra.run import Raw, quote
 from teuthology.orchestra.daemon import DaemonGroup
 from teuthology.config import config as teuth_config
-
+import six
 import logging
 
 def init_log():
@@ -283,7 +283,7 @@ class LocalRemote(object):
     def _perform_checks_and_return_list_of_args(self, args, omit_sudo):
         # Since Python's shell simulation can only work when commands are
         # provided as a list of argumensts...
-        if isinstance(args, str) or isinstance(args, unicode):
+        if isinstance(args, str) or isinstance(args, six.text_type):
             args = args.split()
 
         # We'll let sudo be a part of command even omit flag says otherwise in
@@ -360,7 +360,7 @@ class LocalRemote(object):
         else:
             # Sanity check that we've got a list of strings
             for arg in args:
-                if not isinstance(arg, basestring):
+                if not isinstance(arg, six.string_types):
                     raise RuntimeError("Oops, can't handle arg {0} type {1}".format(
                         arg, arg.__class__
                     ))
@@ -373,7 +373,7 @@ class LocalRemote(object):
                                        env=env)
 
         if stdin:
-            if not isinstance(stdin, basestring):
+            if not isinstance(stdin, six.string_types):
                 raise RuntimeError("Can't handle non-string stdins on a vstart cluster")
 
             # Hack: writing to stdin is not deadlock-safe, but it "always" works
index b462e5e4d76ed7e06f0571aa204e91fbd7475c1f..56bf98425f00344b15dc7d0bc43a2a13d0d8b139 100644 (file)
@@ -6,6 +6,8 @@ from cStringIO import StringIO
 import contextlib
 import logging
 
+import six
+
 from teuthology.orchestra import run
 from teuthology.contextutil import safe_while
 
@@ -41,7 +43,7 @@ def task(ctx, config):
     clients = config.get('clients', ['client.0'])
     assert len(clients) == 1
     role = clients[0]
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     PREFIX = 'client.'
     assert role.startswith(PREFIX)
     (remote,) = ctx.cluster.only(role).remotes.keys()
index e54d4b6e07249da5ee5444e6a8b0c7c898750be5..8bce6282092adeeb3646404fd3ff9b3983656883 100644 (file)
@@ -5,6 +5,7 @@ import contextlib
 import logging
 import proc_thrasher
 
+import six
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
@@ -36,7 +37,7 @@ def task(ctx, config):
     remotes = []
 
     for role in config.get('clients', ['client.0']):
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         PREFIX = 'client.'
         assert role.startswith(PREFIX)
         id_ = role[len(PREFIX):]
index 81ad2ee0ce33941f5f58fdcdc8df1b2528b387bc..99f511befb0419090d5bff119ddb952db704cf4c 100644 (file)
@@ -6,6 +6,8 @@ import pipes
 import os
 import re
 
+import six
+
 from util import get_remote_for_role
 from util.workunit import get_refspec_after_overrides
 
@@ -103,7 +105,7 @@ def task(ctx, config):
     # Create scratch dirs for any non-all workunits
     log.info('Making a separate scratch dir for every client...')
     for role in clients.keys():
-        assert isinstance(role, basestring)
+        assert isinstance(role, six.string_types)
         if role == "all":
             continue
 
@@ -311,7 +313,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir,
                     to False is passed, the 'timeout' command is not used.
     """
     testdir = misc.get_testdir(ctx)
-    assert isinstance(role, basestring)
+    assert isinstance(role, six.string_types)
     cluster, type_, id_ = misc.split_role(role)
     assert type_ == 'client'
     remote = get_remote_for_role(ctx, role)
index c5826ecb6ecd86fe4347f2019705b6fdac0e0887..16792011cff98aadba5d82da6757b95df8b7bf34 100644 (file)
@@ -1,8 +1,16 @@
 [tox]
-envlist = flake8
+envlist = flake8-py2, flake8-py3
 skipsdist = True
 
-[testenv:flake8]
+[testenv:flake8-py2]
+basepython = python2
 deps=
   flake8
 commands=flake8 --select=F,E9 --exclude=venv,.tox
+
+[testenv:flake8-py3]
+basepython = python3
+deps=
+  flake8
+commands=flake8 --select=F,E9 --exclude=venv,.tox
+
index 20bb9e912dca8909d7db8b3c8db3dd6251f2a10a..47ee925537f7c80ffd36208eaf2b19d6b92248d6 100644 (file)
@@ -10,6 +10,7 @@ import os
 import io
 import re
 
+import six
 
 from ceph_argparse import * # noqa
 
@@ -20,7 +21,7 @@ class UnexpectedReturn(Exception):
     if isinstance(cmd, list):
       self.cmd = ' '.join(cmd)
     else:
-      assert isinstance(cmd, str) or isinstance(cmd, unicode), \
+      assert isinstance(cmd, str) or isinstance(cmd, six.text_type), \
           'cmd needs to be either a list or a str'
       self.cmd = cmd
     self.cmd = str(self.cmd)
@@ -35,7 +36,7 @@ class UnexpectedReturn(Exception):
 def call(cmd):
   if isinstance(cmd, list):
     args = cmd
-  elif isinstance(cmd, str) or isinstance(cmd, unicode):
+  elif isinstance(cmd, str) or isinstance(cmd, six.text_type):
     args = shlex.split(cmd)
   else:
     assert False, 'cmd is not a string/unicode nor a list!'
@@ -71,7 +72,7 @@ def expect_to_file(cmd, expected_ret, out_file, mode='a'):
       'expected result doesn\'t match and no exception was thrown!'
 
   with io.open(out_file, mode) as file:
-    file.write(unicode(p.stdout.read()))
+    file.write(six.text_type(p.stdout.read()))
 
   return p
 
@@ -85,7 +86,7 @@ class Command:
     self.args = []
     for s in j['sig']:
       if not isinstance(s, dict):
-        assert isinstance(s, str) or isinstance(s,unicode), \
+        assert isinstance(s, str) or isinstance(s,six.text_type), \
             'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j)
         if len(self.sig) > 0:
           self.sig += ' '
index 1773c7369317d0d88fa57cfa4a77e5597aa7d712..f39da885f6ff4993c60be51cb286206af371cf6b 100755 (executable)
@@ -3,14 +3,8 @@
 import json
 import shlex
 import subprocess
-import sys
 
-if sys.version_info[0] == 2:
-    string = basestring
-    unicode = unicode
-elif sys.version_info[0] == 3:
-    string = str
-    unicode = str
+import six
 
 
 class UnexpectedReturn(Exception):
@@ -18,7 +12,7 @@ class UnexpectedReturn(Exception):
         if isinstance(cmd, list):
             self.cmd = ' '.join(cmd)
         else:
-            assert isinstance(cmd, string) or isinstance(cmd, unicode), \
+            assert isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type), \
                 'cmd needs to be either a list or a str'
             self.cmd = cmd
         self.cmd = str(self.cmd)
@@ -34,7 +28,7 @@ class UnexpectedReturn(Exception):
 def call(cmd):
     if isinstance(cmd, list):
         args = cmd
-    elif isinstance(cmd, string) or isinstance(cmd, unicode):
+    elif isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type):
         args = shlex.split(cmd)
     else:
         assert False, 'cmd is not a string/unicode nor a list!'
index 7b09dfd9071878d1aff259d20824a53a9a411b43..c6cf195d9f3bbeaadc7e4d6329b98fb02e07c856 100755 (executable)
@@ -59,7 +59,7 @@ def init():
 
     keys = []
     values = []
-    for x in xrange(20000):
+    for x in range(20000):
         keys.append(str(x))
         values.append(buffer)
 
index 07fe8845f4e63fba2788f48c6d200bc70e96d3b3..86048e6d883acd0a40593328903afbee21c6cef7 100755 (executable)
@@ -9,7 +9,7 @@ import time
 import sys
 
 if sys.version_info[0] == 2:
-    range = xrange
+    range = xrange # noqa
 
 elif sys.version_info[0] == 3:
     range = range