From: Thomas Bechtold Date: Mon, 9 Dec 2019 16:27:46 +0000 (+0100) Subject: qa: Run flake8 on python2 and python3 X-Git-Tag: v14.2.10~17^2~99 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=57b4b604d21ada06568353e6bf5c83d0a18b5db3;p=ceph.git qa: Run flake8 on python2 and python3 To be able to catch problems with python2 *and* python3, run flake8 with both versions. From the flake8 homepage: It is very important to install Flake8 on the correct version of Python for your needs. If you want Flake8 to properly parse new language features in Python 3.5 (for example), you need it to be installed on 3.5 for Flake8 to understand those features. In many ways, Flake8 is tied to the version of Python on which it runs. Also fix the problems with python3 on the way. Note: This requires now the six module for teuthology. But this is already an install_require in teuthology itself. Signed-off-by: Thomas Bechtold (cherry picked from commit bdcc94a1d1f659b1524fdc6bb8bd1da6d38a30d2) Conflicts: qa/CMakeLists.txt qa/tasks/ceph_manager.py qa/tasks/cephfs/xfstests_dev.py qa/tasks/cram.py qa/tasks/manypools.py qa/tasks/ragweed.py qa/tasks/s3tests.py qa/tasks/vstart_runner.py qa/tasks/workunit.py qa/tox.ini: trivial resolutions --- diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py index 1735f677380..2f96373d0a9 100644 --- a/qa/tasks/autotest.py +++ b/qa/tasks/autotest.py @@ -1,10 +1,12 @@ -""" +""" Run an autotest test on the ceph cluster. """ import json import logging import os +import six + from teuthology import misc as teuthology from teuthology.parallel import parallel from teuthology.orchestra import run @@ -48,7 +50,7 @@ def task(ctx, config): log.info('Making a separate scratch dir for every client...') for role in config.keys(): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] @@ -103,7 +105,7 @@ def _run_tests(testdir, remote, role, tests): """ Spawned to run test on remote site """ - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index c44e2cf26bf..e38f7c62edf 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -166,13 +166,13 @@ def ceph_log(ctx, config): # case we will see connection errors that we should ignore. log.debug("Missed logrotate, node '{0}' is offline".format( e.node)) - except EOFError as e: + except EOFError: # Paramiko sometimes raises this when it fails to # connect to a node during open_session. As with # ConnectionLostError, we ignore this because nodes # are allowed to get power cycled during tests. log.debug("Missed logrotate, EOFError") - except SSHException as e: + except SSHException: log.debug("Missed logrotate, SSHException") except socket.error as e: if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET): @@ -191,7 +191,7 @@ def ceph_log(ctx, config): testdir = teuthology.get_testdir(ctx) remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf') - with file(rotate_conf_path, 'rb') as f: + with open(rotate_conf_path, 'rb') as f: conf = "" for daemon, size in daemons.items(): log.info('writing logrotate stanza for {}'.format(daemon)) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index e2058376338..a7267acfe69 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -23,6 +23,7 @@ from teuthology.contextutil import safe_while from teuthology.orchestra.remote import Remote from teuthology.orchestra import run from teuthology.exceptions import CommandFailedError +import six try: from subprocess import DEVNULL # py3k @@ -1631,7 +1632,7 @@ class CephManager: :param erasure_code_use_overwrites: if true, allow overwrites """ with self.lock: - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) assert isinstance(pg_num, int) assert pool_name not in self.pools self.log("creating pool_name %s" % (pool_name,)) @@ -1683,7 +1684,7 @@ class CephManager: :param pool_name: Pool to be removed """ with self.lock: - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) assert pool_name in self.pools self.log("removing pool_name %s" % (pool_name,)) del self.pools[pool_name] @@ -1702,7 +1703,7 @@ class CephManager: Return the number of pgs in the pool specified. """ with self.lock: - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) if pool_name in self.pools: return self.pools[pool_name] return 0 @@ -1714,8 +1715,8 @@ class CephManager: :returns: property as an int value. """ with self.lock: - assert isinstance(pool_name, basestring) - assert isinstance(prop, basestring) + assert isinstance(pool_name, six.string_types) + assert isinstance(prop, six.string_types) output = self.raw_cluster_cmd( 'osd', 'pool', @@ -1733,8 +1734,8 @@ class CephManager: This routine retries if set operation fails. """ with self.lock: - assert isinstance(pool_name, basestring) - assert isinstance(prop, basestring) + assert isinstance(pool_name, six.string_types) + assert isinstance(prop, six.string_types) assert isinstance(val, int) tries = 0 while True: @@ -1761,7 +1762,7 @@ class CephManager: Increase the number of pgs in a pool """ with self.lock: - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) assert isinstance(by, int) assert pool_name in self.pools if self.get_num_creating() > 0: @@ -1781,7 +1782,7 @@ class CephManager: with self.lock: self.log('contract_pool %s by %s min %s' % ( pool_name, str(by), str(min_pgs))) - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) assert isinstance(by, int) assert pool_name in self.pools if self.get_num_creating() > 0: @@ -1820,7 +1821,7 @@ class CephManager: Set pgpnum property of pool_name pool. """ with self.lock: - assert isinstance(pool_name, basestring) + assert isinstance(pool_name, six.string_types) assert pool_name in self.pools if not force and self.get_num_creating() > 0: return False diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 3f1f9bf8036..e02d822e26f 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -781,7 +781,8 @@ class Filesystem(MDSCluster): """ mdsmap = self.get_mds_map(status) result = [] - for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])): + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): if mds_status['state'] == state or state is None: result.append(mds_status['name']) @@ -799,7 +800,8 @@ class Filesystem(MDSCluster): def get_all_mds_rank(self, status=None): mdsmap = self.get_mds_map(status) result = [] - for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])): + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay': result.append(mds_status['rank']) @@ -849,7 +851,8 @@ class Filesystem(MDSCluster): """ mdsmap = self.get_mds_map(status) result = [] - for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])): + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay': result.append(mds_status['name']) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index bdd8e1388d2..ab86ae7a89c 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -449,10 +449,10 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.wait_until_mounted() def test_dir_fsync(self): - self._test_fsync(True); + self._test_fsync(True); def test_create_fsync(self): - self._test_fsync(False); + self._test_fsync(False); def _test_fsync(self, dirfsync): """ diff --git a/qa/tasks/cephfs/test_exports.py b/qa/tasks/cephfs/test_exports.py index 70e55060684..bd895e381b5 100644 --- a/qa/tasks/cephfs/test_exports.py +++ b/qa/tasks/cephfs/test_exports.py @@ -131,8 +131,8 @@ class TestExports(CephFSTestCase): p = self.mount_a.client_remote.sh('uname -r'), wait=True) dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin") log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin) - if str(p.stdout.getvalue()) < "5" and not(dir_pin): - self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin") + if str(p.stdout.getvalue()) < "5" and not(dir_pin): + self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin") self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1") self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0") if (len(self.fs.get_active_names()) > 2): diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index 80c69e3c29c..54ed16ffa1f 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -310,11 +310,11 @@ class TestScrubChecks(CephFSTestCase): self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir), lambda j, r: self.json_validator(j, r, "return_code", 0)) - # wait a few second for background repair - time.sleep(10) + # wait a few second for background repair + time.sleep(10) - # fragstat should be fixed - self.mount_a.run_shell(["sudo", "rmdir", test_dir]) + # fragstat should be fixed + self.mount_a.run_shell(["sudo", "rmdir", test_dir]) @staticmethod def json_validator(json_out, rc, element, expected_value): diff --git a/qa/tasks/cephfs_test_runner.py b/qa/tasks/cephfs_test_runner.py index d57e85d306f..4455c086f31 100644 --- a/qa/tasks/cephfs_test_runner.py +++ b/qa/tasks/cephfs_test_runner.py @@ -133,7 +133,7 @@ def task(ctx, config): # Mount objects, sorted by ID if hasattr(ctx, 'mounts'): - mounts = [v for k, v in sorted(ctx.mounts.items(), lambda a, b: cmp(a[0], b[0]))] + mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])] else: # The test configuration has a filesystem but no fuse/kclient mounts mounts = [] diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py index 3e765936483..85fbb9e66b2 100644 --- a/qa/tasks/cram.py +++ b/qa/tasks/cram.py @@ -4,6 +4,8 @@ Cram tests import logging import os +import six + from tasks.util.workunit import get_refspec_after_overrides from teuthology import misc as teuthology @@ -124,7 +126,7 @@ def _run_tests(ctx, role): :param ctx: Context :param role: Roles """ - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index baf99324d8e..250c297298d 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -134,7 +134,7 @@ def setup_venv(ctx, config): for (client, _) in config.items(): run_in_keystone_dir(ctx, client, [ 'source', - '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)), + '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)), run.Raw('&&'), 'tox', '-e', 'venv', '--notest' ]) diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py index 7aec5df1738..7fe7e43e1b8 100644 --- a/qa/tasks/manypools.py +++ b/qa/tasks/manypools.py @@ -46,25 +46,25 @@ def task(ctx, config): poolprocs=dict() while (remaining_pools > 0): log.info('{n} pools remaining to create'.format(n=remaining_pools)) - for remote, role_ in creator_remotes: + for remote, role_ in creator_remotes: poolnum = remaining_pools remaining_pools -= 1 if remaining_pools < 0: continue log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_)) - proc = remote.run( - args=[ - 'ceph', - '--name', role_, - 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8', - run.Raw('&&'), - 'rados', - '--name', role_, - '--pool', 'pool{num}'.format(num=poolnum), - 'bench', '0', 'write', '-t', '16', '--block-size', '1' - ], - wait = False - ) + proc = remote.run( + args=[ + 'ceph', + '--name', role_, + 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8', + run.Raw('&&'), + 'rados', + '--name', role_, + '--pool', 'pool{num}'.format(num=poolnum), + 'bench', '0', 'write', '-t', '16', '--block-size', '1' + ], + wait = False + ) log.info('waiting for pool and object creates') poolprocs[remote] = proc diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py index 77f5dd0676e..af0793d9564 100644 --- a/qa/tasks/omapbench.py +++ b/qa/tasks/omapbench.py @@ -4,6 +4,8 @@ Run omapbench executable within teuthology import contextlib import logging +import six + from teuthology.orchestra import run from teuthology import misc as teuthology @@ -48,7 +50,7 @@ def task(ctx, config): testdir = teuthology.get_testdir(ctx) print(str(config.get('increment',-1))) for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index aa490214ed9..48ed5b024d6 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -53,7 +53,7 @@ def create_clones(ctx, config, managers): num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) if isinstance(num_disks, list): num_disks = len(num_disks) - for i in xrange(num_disks): + for i in range(num_disks): create_config = { client: { 'image_name': @@ -121,7 +121,7 @@ def generate_iso(ctx, config): userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) - with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f: + with open(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f: test_setup = ''.join(f.readlines()) # configuring the commands to setup the nfs mount mnt_dir = "/export/{client}".format(client=client) @@ -129,7 +129,7 @@ def generate_iso(ctx, config): mnt_dir=mnt_dir ) - with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f: + with open(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f: test_teardown = ''.join(f.readlines()) user_data = test_setup @@ -137,7 +137,7 @@ def generate_iso(ctx, config): num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) if isinstance(num_disks, list): num_disks = len(num_disks) - for i in xrange(1, num_disks): + for i in range(1, num_disks): dev_letter = chr(ord('a') + i) user_data += """ - | @@ -173,7 +173,7 @@ def generate_iso(ctx, config): ceph_sha1=ctx.config.get('sha1')) teuthology.write_file(remote, userdata_path, StringIO(user_data)) - with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: + with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: teuthology.write_file(remote, metadata_path, f) test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) @@ -394,7 +394,7 @@ def run_qemu(ctx, config): num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) if isinstance(num_disks, list): num_disks = len(num_disks) - for i in xrange(num_disks): + for i in range(num_disks): suffix = '-clone' if clone else '' args.extend([ '-drive', diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py index d4872fb1157..121d06ea288 100644 --- a/qa/tasks/rados.py +++ b/qa/tasks/rados.py @@ -6,6 +6,8 @@ import logging import gevent from teuthology import misc as teuthology +import six + from teuthology.orchestra import run log = logging.getLogger(__name__) @@ -223,7 +225,7 @@ def task(ctx, config): existing_pools = config.get('pools', []) created_pools = [] for role in config.get('clients', clients): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py index 770e3aff7c5..7b6b98adad0 100644 --- a/qa/tasks/radosbench.py +++ b/qa/tasks/radosbench.py @@ -7,6 +7,8 @@ import logging from teuthology.orchestra import run from teuthology import misc as teuthology +import six + log = logging.getLogger(__name__) @contextlib.contextmanager @@ -53,7 +55,7 @@ def task(ctx, config): create_pool = config.get('create_pool', True) for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/radosbenchsweep.py b/qa/tasks/radosbenchsweep.py index 5d18f7a3bad..0aeb7218681 100644 --- a/qa/tasks/radosbenchsweep.py +++ b/qa/tasks/radosbenchsweep.py @@ -11,6 +11,8 @@ from itertools import product from teuthology.orchestra import run from teuthology import misc as teuthology +import six + log = logging.getLogger(__name__) @@ -167,7 +169,7 @@ def run_radosbench(ctx, config, f, num_osds, size, replica, rep): log.info(' repetition =' + str(rep)) for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index 8c6e952080c..63cb1375f35 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -187,7 +187,7 @@ class usage_acc: x2 = s2['total'] except Exception as ex: r.append("malformed summary looking for totals for user " - + e['user'] + " " + str(ex)) + + e['user'] + " " + str(ex)) break usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user']) return r @@ -909,8 +909,8 @@ def task(ctx, config): assert len(out['placement_pools']) == orig_placement_pools + 1 zonecmd = ['zone', 'placement', 'rm', - '--rgw-zone', 'default', - '--placement-id', 'new-placement'] + '--rgw-zone', 'default', + '--placement-id', 'new-placement'] (err, out) = rgwadmin(ctx, client, zonecmd, check_status=True) diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index bb72267e5e4..fab0853ce98 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -41,7 +41,7 @@ def download(ctx, config): ragweed_repo = ctx.config.get('ragweed_repo', teuth_config.ceph_git_base_url + 'ragweed.git') if suite_branch in s3_branches: branch = cconf.get('branch', 'ceph-' + suite_branch) - else: + else: branch = cconf.get('branch', suite_branch) if not branch: raise ValueError( @@ -101,7 +101,7 @@ def _config_user(ragweed_conf, section, user): ragweed_conf[section].setdefault('user_id', user) ragweed_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20))) ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) @@ -211,7 +211,7 @@ def configure(ctx, config, run_stages): ragweed_conf['rgw']['host'] = 'localhost' if properties is not None and 'slow_backend' in properties: - ragweed_conf['fixtures']['slow backend'] = properties['slow_backend'] + ragweed_conf['fixtures']['slow backend'] = properties['slow_backend'] conf_fp = StringIO() ragweed_conf.write(conf_fp) @@ -223,8 +223,8 @@ def configure(ctx, config, run_stages): log.info('Configuring boto...') boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') - for client, properties in config['clients'].iteritems(): - with file(boto_src, 'rb') as f: + for client, properties in config['clients'].items(): + with open(boto_src, 'rb') as f: (remote,) = ctx.cluster.only(client).remotes.keys() conf = f.read().format( idle_timeout=config.get('idle_timeout', 30) diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py index 193ab1c3961..ee30e02c435 100644 --- a/qa/tasks/rbd.py +++ b/qa/tasks/rbd.py @@ -16,6 +16,8 @@ from teuthology.task.common_fs_utils import generic_mkfs from teuthology.task.common_fs_utils import generic_mount from teuthology.task.common_fs_utils import default_image_name +import six + #V1 image unsupported but required for testing purposes os.environ["RBD_FORCE_ALLOW_V1"] = "1" @@ -355,7 +357,7 @@ def run_xfstests(ctx, config): except: exc_info = sys.exc_info() if exc_info: - raise exc_info[0], exc_info[1], exc_info[2] + six.reraise(exc_info[0], exc_info[1], exc_info[2]) yield def run_xfstests_one_client(ctx, role, properties): diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py index 76c4ebbab14..2dae973e9b2 100644 --- a/qa/tasks/s3readwrite.py +++ b/qa/tasks/s3readwrite.py @@ -78,7 +78,7 @@ def _config_user(s3tests_conf, section, user): s3tests_conf[section].setdefault('user_id', user) s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20))) s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) @contextlib.contextmanager diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py index ebaeb4bc6e6..8bf770c8653 100644 --- a/qa/tasks/s3roundtrip.py +++ b/qa/tasks/s3roundtrip.py @@ -77,7 +77,7 @@ def _config_user(s3tests_conf, section, user): s3tests_conf[section].setdefault('user_id', user) s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20))) s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) @contextlib.contextmanager diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py index 7064ce01a85..f6fce64ddfa 100644 --- a/qa/tasks/s3tests.py +++ b/qa/tasks/s3tests.py @@ -78,9 +78,9 @@ def _config_user(s3tests_conf, section, user): s3tests_conf[section].setdefault('user_id', user) s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) - s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20))) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20))) s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40))) - s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in xrange(10))) + s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in range(10))) s3tests_conf[section].setdefault('totp_seed', base64.b32encode(os.urandom(40))) s3tests_conf[section].setdefault('totp_seconds', '5') @@ -183,7 +183,7 @@ def configure(ctx, config): s3tests_conf['DEFAULT']['host'] = 'localhost' if properties is not None and 'slow_backend' in properties: - s3tests_conf['fixtures']['slow backend'] = properties['slow_backend'] + s3tests_conf['fixtures']['slow backend'] = properties['slow_backend'] (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( @@ -204,8 +204,8 @@ def configure(ctx, config): log.info('Configuring boto...') boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') - for client, properties in config['clients'].iteritems(): - with file(boto_src, 'rb') as f: + for client, properties in config['clients'].items(): + with open(boto_src, 'rb') as f: (remote,) = ctx.cluster.only(client).remotes.keys() conf = f.read().format( idle_timeout=config.get('idle_timeout', 30) diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py index 319c6d5e5bf..1dd62d8624c 100644 --- a/qa/tasks/samba.py +++ b/qa/tasks/samba.py @@ -6,6 +6,8 @@ import logging import sys import time +import six + from teuthology import misc as teuthology from teuthology.orchestra import run from teuthology.orchestra.daemon import DaemonGroup @@ -22,7 +24,7 @@ def get_sambas(ctx, roles): :param roles: roles for this test (extracted from yaml files) """ for role in roles: - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'samba.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] @@ -196,7 +198,7 @@ def task(ctx, config): exc_info = sys.exc_info() log.exception('Saw exception from %s.%s', d.role, d.id_) if exc_info != (None, None, None): - raise exc_info[0], exc_info[1], exc_info[2] + six.reraise(exc_info[0], exc_info[1], exc_info[2]) for id_, remote in samba_servers: remote.run( diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index a8f1d82c32a..10a682da24a 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -133,7 +133,7 @@ def configure_instance(ctx, config): to_config(cconfig, params, 'identity', cpar) to_config(cconfig, params, 'object-storage', cpar) to_config(cconfig, params, 'object-storage-feature-enabled', cpar) - cpar.write(file(local_conf, 'w+')) + cpar.write(open(local_conf, 'w+')) remote.put_file(local_conf, tetcdir + '/tempest.conf') yield diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index bf156446027..79548737146 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -50,7 +50,7 @@ from teuthology import misc from teuthology.orchestra.run import Raw, quote from teuthology.orchestra.daemon import DaemonGroup from teuthology.config import config as teuth_config - +import six import logging log = logging.getLogger(__name__) @@ -271,7 +271,7 @@ class LocalRemote(object): else: # Sanity check that we've got a list of strings for arg in args: - if not isinstance(arg, basestring): + if not isinstance(arg, six.string_types): raise RuntimeError("Oops, can't handle arg {0} type {1}".format( arg, arg.__class__ )) @@ -284,7 +284,7 @@ class LocalRemote(object): env=env) if stdin: - if not isinstance(stdin, basestring): + if not isinstance(stdin, six.string_types): raise RuntimeError("Can't handle non-string stdins on a vstart cluster") # Hack: writing to stdin is not deadlock-safe, but it "always" works diff --git a/qa/tasks/watch_notify_same_primary.py b/qa/tasks/watch_notify_same_primary.py index 3cb7eeb704a..7160979119b 100644 --- a/qa/tasks/watch_notify_same_primary.py +++ b/qa/tasks/watch_notify_same_primary.py @@ -6,6 +6,8 @@ from io import BytesIO import contextlib import logging +import six + from teuthology.orchestra import run from teuthology.contextutil import safe_while @@ -41,7 +43,7 @@ def task(ctx, config): clients = config.get('clients', ['client.0']) assert len(clients) == 1 role = clients[0] - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) (remote,) = ctx.cluster.only(role).remotes.keys() diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py index 1e10e99b62a..e5e380492e0 100644 --- a/qa/tasks/watch_notify_stress.py +++ b/qa/tasks/watch_notify_stress.py @@ -4,6 +4,7 @@ test_stress_watch task import contextlib import logging +import six from teuthology.orchestra import run from teuthology.task import proc_thrasher @@ -36,7 +37,7 @@ def task(ctx, config): remotes = [] for role in config.get('clients', ['client.0']): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py index 4372c2549f0..6ef08a43015 100644 --- a/qa/tasks/workunit.py +++ b/qa/tasks/workunit.py @@ -6,8 +6,10 @@ import pipes import os import re -from tasks.util import get_remote_for_role -from tasks.util.workunit import get_refspec_after_overrides +import six + +from util import get_remote_for_role +from util.workunit import get_refspec_after_overrides from teuthology import misc from teuthology.config import config as teuth_config @@ -103,7 +105,7 @@ def task(ctx, config): # Create scratch dirs for any non-all workunits log.info('Making a separate scratch dir for every client...') for role in clients.keys(): - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) if role == "all": continue @@ -311,7 +313,7 @@ def _run_tests(ctx, refspec, role, tests, env, basedir, to False is passed, the 'timeout' command is not used. """ testdir = misc.get_testdir(ctx) - assert isinstance(role, basestring) + assert isinstance(role, six.string_types) cluster, type_, id_ = misc.split_role(role) assert type_ == 'client' remote = get_remote_for_role(ctx, role) diff --git a/qa/tox.ini b/qa/tox.ini index c5826ecb6ec..5088e120315 100644 --- a/qa/tox.ini +++ b/qa/tox.ini @@ -1,8 +1,15 @@ [tox] -envlist = flake8 +envlist = flake8-py2, flake8-py3 skipsdist = True -[testenv:flake8] +[testenv:flake8-py2] +basepython = python2 +deps= + flake8 +commands=flake8 --select=F,E9 --exclude=venv,.tox + +[testenv:flake8-py3] +basepython = python3 deps= flake8 commands=flake8 --select=F,E9 --exclude=venv,.tox diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index 454bea37da6..1eb0cb658d6 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -10,6 +10,7 @@ import os import io import re +import six from ceph_argparse import * # noqa @@ -20,7 +21,7 @@ class UnexpectedReturn(Exception): if isinstance(cmd, list): self.cmd = ' '.join(cmd) else: - assert isinstance(cmd, str) or isinstance(cmd, unicode), \ + assert isinstance(cmd, str) or isinstance(cmd, six.text_type), \ 'cmd needs to be either a list or a str' self.cmd = cmd self.cmd = str(self.cmd) @@ -35,7 +36,7 @@ class UnexpectedReturn(Exception): def call(cmd): if isinstance(cmd, list): args = cmd - elif isinstance(cmd, str) or isinstance(cmd, unicode): + elif isinstance(cmd, str) or isinstance(cmd, six.text_type): args = shlex.split(cmd) else: assert False, 'cmd is not a string/unicode nor a list!' @@ -71,7 +72,7 @@ def expect_to_file(cmd, expected_ret, out_file, mode='a'): 'expected result doesn\'t match and no exception was thrown!' with io.open(out_file, mode) as file: - file.write(unicode(p.stdout.read())) + file.write(six.text_type(p.stdout.read())) return p @@ -85,7 +86,7 @@ class Command: self.args = [] for s in j['sig']: if not isinstance(s, dict): - assert isinstance(s, str) or isinstance(s,unicode), \ + assert isinstance(s, str) or isinstance(s,six.text_type), \ 'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j) if len(self.sig) > 0: self.sig += ' ' diff --git a/qa/workunits/mon/ping.py b/qa/workunits/mon/ping.py index 1773c736931..f39da885f6f 100755 --- a/qa/workunits/mon/ping.py +++ b/qa/workunits/mon/ping.py @@ -3,14 +3,8 @@ import json import shlex import subprocess -import sys -if sys.version_info[0] == 2: - string = basestring - unicode = unicode -elif sys.version_info[0] == 3: - string = str - unicode = str +import six class UnexpectedReturn(Exception): @@ -18,7 +12,7 @@ class UnexpectedReturn(Exception): if isinstance(cmd, list): self.cmd = ' '.join(cmd) else: - assert isinstance(cmd, string) or isinstance(cmd, unicode), \ + assert isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type), \ 'cmd needs to be either a list or a str' self.cmd = cmd self.cmd = str(self.cmd) @@ -34,7 +28,7 @@ class UnexpectedReturn(Exception): def call(cmd): if isinstance(cmd, list): args = cmd - elif isinstance(cmd, string) or isinstance(cmd, unicode): + elif isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type): args = shlex.split(cmd) else: assert False, 'cmd is not a string/unicode nor a list!' diff --git a/qa/workunits/rados/test_large_omap_detection.py b/qa/workunits/rados/test_large_omap_detection.py index 7b09dfd9071..c6cf195d9f3 100755 --- a/qa/workunits/rados/test_large_omap_detection.py +++ b/qa/workunits/rados/test_large_omap_detection.py @@ -59,7 +59,7 @@ def init(): keys = [] values = [] - for x in xrange(20000): + for x in range(20000): keys.append(str(x)) values.append(buffer) diff --git a/qa/workunits/restart/test-backtraces.py b/qa/workunits/restart/test-backtraces.py index 07fe8845f4e..86048e6d883 100755 --- a/qa/workunits/restart/test-backtraces.py +++ b/qa/workunits/restart/test-backtraces.py @@ -9,7 +9,7 @@ import time import sys if sys.version_info[0] == 2: - range = xrange + range = xrange # noqa elif sys.version_info[0] == 3: range = range