From c8c8c4bfa13e28b48a1c2886f600eed29b63e496 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Mon, 9 Dec 2019 16:17:23 +0100 Subject: [PATCH] qa: Enable flake8 tox and fix failures There were a couple of problems found by flake8 in the qa/ directory (most of them fixed now). Enabling flake8 during the usual check runs hopefully avoids adding new issues in the future. Signed-off-by: Thomas Bechtold (cherry picked from commit 0127cd1e8817b05b1c3150540b021f9a24b47089) Conflicts: qa/tasks/barbican.py qa/tasks/cephadm.py qa/tasks/cephfs/test_cephfs_shell.py qa/tasks/cephfs/xfstests_dev.py qa/tasks/daemonwatchdog.py qa/tasks/mgr/dashboard/test_cephfs.py qa/tasks/mgr/dashboard/test_orchestrator.py qa/tasks/mgr/dashboard/test_rbd.py qa/tasks/mgr/mgr_test_case.py qa/tasks/mgr/test_orchestrator_cli.py qa/tasks/s3tests.py qa/tasks/s3tests_java.py qa/tasks/vstart_runner.py qa/workunits/mon/caps.py: trivial resolutions, and drop the the change to qa/CMakeLists.txt, as we don't have add_tox_test() back in nautilus --- qa/standalone/special/ceph_objectstore_tool.py | 8 ++++---- qa/tasks/cbt.py | 1 - qa/tasks/ceph.py | 12 +++++------- qa/tasks/ceph_fuse.py | 1 - qa/tasks/ceph_manager.py | 7 +++---- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/fuse_mount.py | 4 +--- qa/tasks/cephfs/test_cephfs_shell.py | 2 -- qa/tasks/cephfs/test_client_recovery.py | 2 +- qa/tasks/cephfs/test_exports.py | 1 - qa/tasks/cephfs/test_failover.py | 8 +++----- qa/tasks/cephfs/test_misc.py | 6 ++---- qa/tasks/cephfs/test_recovery_pool.py | 8 ++------ qa/tasks/cephfs/test_scrub.py | 5 +---- qa/tasks/cephfs/test_scrub_checks.py | 8 ++++---- qa/tasks/cephfs/test_sessionmap.py | 1 - qa/tasks/cephfs/test_snapshots.py | 4 +--- qa/tasks/cephfs/test_strays.py | 5 ++--- qa/tasks/cephfs/test_volume_client.py | 3 +-- qa/tasks/cephfs/test_volumes.py | 2 +- qa/tasks/check_counter.py | 1 - qa/tasks/create_verify_lfn_objects.py | 2 +- qa/tasks/divergent_priors.py | 2 +- qa/tasks/divergent_priors2.py | 2 +- qa/tasks/dump_stuck.py | 1 - qa/tasks/exec_on_cleanup.py | 1 - qa/tasks/fs.py | 2 -- qa/tasks/keystone.py | 2 -- qa/tasks/mds_creation_failure.py | 3 ++- qa/tasks/mgr/dashboard/test_ganesha.py | 3 +-- qa/tasks/mgr/mgr_test_case.py | 6 +++--- qa/tasks/mgr/test_orchestrator_cli.py | 1 - qa/tasks/mgr/test_ssh_orchestrator.py | 3 --- qa/tasks/mon_clock_skew_check.py | 3 --- qa/tasks/netem.py | 7 ++----- qa/tasks/osd_max_pg_per_osd.py | 2 -- qa/tasks/radosbench.py | 4 ++-- qa/tasks/radosgw_admin.py | 6 ------ qa/tasks/radosgw_admin_rest.py | 3 +-- qa/tasks/ragweed.py | 2 -- qa/tasks/rbd_fsx.py | 2 +- qa/tasks/rbd_mirror_thrash.py | 2 -- qa/tasks/reg11184.py | 2 +- qa/tasks/rgw.py | 10 +++------- qa/tasks/rgw_multisite.py | 3 +-- qa/tasks/rgw_multisite_tests.py | 1 - qa/tasks/s3a_hadoop.py | 1 - qa/tasks/swift.py | 1 - qa/tasks/tempest.py | 3 +-- qa/tasks/tox.py | 1 - qa/tasks/util/rgw.py | 6 ------ qa/tasks/vstart_runner.py | 3 +-- qa/workunits/fs/multiclient_sync_read_eof.py | 2 -- qa/workunits/mon/caps.py | 7 +++---- qa/workunits/restart/test-backtraces.py | 8 +------- 55 files changed, 58 insertions(+), 140 deletions(-) diff --git a/qa/standalone/special/ceph_objectstore_tool.py b/qa/standalone/special/ceph_objectstore_tool.py index b058c247c6104..496ae417e5303 100755 --- a/qa/standalone/special/ceph_objectstore_tool.py +++ b/qa/standalone/special/ceph_objectstore_tool.py @@ -45,7 +45,7 @@ if sys.version_info[0] >= 3: def decode(s): return s.decode('utf-8') - def check_output(*args, **kwargs): + def check_output(*args, **kwargs): # noqa return decode(subprocess.check_output(*args, **kwargs)) else: def decode(s): @@ -336,7 +336,7 @@ def check_entry_transactions(entry, enum): def check_transaction_ops(ops, enum, tnum): - if len(ops) is 0: + if len(ops) == 0: logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum)) errors = 0 for onum in range(len(ops)): @@ -375,7 +375,7 @@ def test_dump_journal(CFSD_PREFIX, osds): os.unlink(TMPFILE) journal_errors = check_journal(jsondict) - if journal_errors is not 0: + if journal_errors != 0: logging.error(jsondict) ERRORS += journal_errors @@ -519,7 +519,7 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path): for line in output.strip().split('\n'): print(line) linev = re.split('\s+', line) - if linev[0] is '': + if linev[0] == '': linev.pop(0) print('linev %s' % linev) weights.append(float(linev[2])) diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py index e0a1720dd32ba..941694802cf62 100644 --- a/qa/tasks/cbt.py +++ b/qa/tasks/cbt.py @@ -3,7 +3,6 @@ import os import yaml from teuthology import misc -from teuthology.config import config as teuth_config from teuthology.orchestra import run from teuthology.task import Task diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index ab2747175f231..040508255e602 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -84,18 +84,18 @@ def ceph_crash(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) - except OSError as e: + except OSError: pass try: teuthology.pull_directory(remote, '/var/lib/ceph/crash', os.path.join(sub, 'crash')) - except ReadError as e: + except ReadError: pass @@ -269,13 +269,13 @@ def ceph_log(ctx, config): path = os.path.join(ctx.archive, 'remote') try: os.makedirs(path) - except OSError as e: + except OSError: pass for remote in ctx.cluster.remotes.keys(): sub = os.path.join(path, remote.shortname) try: os.makedirs(sub) - except OSError as e: + except OSError: pass teuthology.pull_directory(remote, '/var/log/ceph', os.path.join(sub, 'log')) @@ -396,8 +396,6 @@ def create_rbd_pool(ctx, config): @contextlib.contextmanager def cephfs_setup(ctx, config): cluster_name = config['cluster'] - testdir = teuthology.get_testdir(ctx) - coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) first_mon = teuthology.get_first_mon(ctx, config, cluster_name) (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py index 08254fed3c3d0..03f5a56e4a701 100644 --- a/qa/tasks/ceph_fuse.py +++ b/qa/tasks/ceph_fuse.py @@ -7,7 +7,6 @@ import logging from teuthology import misc as teuthology from cephfs.fuse_mount import FuseMount -from tasks.cephfs.filesystem import Filesystem log = logging.getLogger(__name__) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index b1f4ab4e3f2b3..72da3c606135a 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -656,7 +656,7 @@ class Thrasher: Decrease the size of the pool """ pool = self.ceph_manager.get_pool() - orig_pg_num = self.ceph_manager.get_pool_pg_num(pool) + _ = self.ceph_manager.get_pool_pg_num(pool) self.log("Shrinking pool %s" % (pool,)) if self.ceph_manager.contract_pool( pool, @@ -807,7 +807,6 @@ class Thrasher: Random action selector. """ chance_down = self.config.get('chance_down', 0.4) - chance_test_min_size = self.config.get('chance_test_min_size', 0) chance_test_backfill_full = \ self.config.get('chance_test_backfill_full', 0) if isinstance(chance_down, int): @@ -840,7 +839,7 @@ class Thrasher: actions.append((self.fix_pgp_num, self.config.get('chance_pgpnum_fix', 0),)) actions.append((self.test_pool_min_size, - chance_test_min_size,)) + self.config.get('chance_test_min_size', 0),)) actions.append((self.test_backfill_full, chance_test_backfill_full,)) if self.chance_thrash_cluster_full > 0: @@ -1480,7 +1479,7 @@ class CephManager: while True: proc = self.admin_socket(service_type, service_id, args, check_status=False, stdout=stdout) - if proc.exitstatus is 0: + if proc.exitstatus == 0: return proc else: tries += 1 diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 09dfc121af28c..2e1a0d854af2b 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -1195,7 +1195,7 @@ class Filesystem(MDSCluster): def dirfrag_exists(self, ino, frag): try: self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)]) - except CommandFailedError as e: + except CommandFailedError: return False else: return True diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py index bbd56b3c57c0c..d0665270aba25 100644 --- a/qa/tasks/cephfs/fuse_mount.py +++ b/qa/tasks/cephfs/fuse_mount.py @@ -1,4 +1,3 @@ - from StringIO import StringIO import json import time @@ -10,7 +9,6 @@ from teuthology.contextutil import MaxWhileTries from teuthology.orchestra import run from teuthology.orchestra.run import CommandFailedError from .mount import CephFSMount -from tasks.cephfs.filesystem import Filesystem log = logging.getLogger(__name__) @@ -170,7 +168,7 @@ class FuseMount(CephFSMount): try: self.inst = status['inst_str'] self.addr = status['addr_str'] - except KeyError as e: + except KeyError: sessions = self.fs.rank_asok(['session', 'ls']) for s in sessions: if s['id'] == self.id: diff --git a/qa/tasks/cephfs/test_cephfs_shell.py b/qa/tasks/cephfs/test_cephfs_shell.py index 31f16b44a2570..8cf8474e2699d 100644 --- a/qa/tasks/cephfs/test_cephfs_shell.py +++ b/qa/tasks/cephfs/test_cephfs_shell.py @@ -3,8 +3,6 @@ import crypt import logging from StringIO import StringIO from tasks.cephfs.cephfs_test_case import CephFSTestCase -from tasks.cephfs.fuse_mount import FuseMount -from teuthology.exceptions import CommandFailedError log = logging.getLogger(__name__) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 91ef54415fc9b..73bd815c4f70a 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -593,7 +593,7 @@ class TestClientRecovery(CephFSTestCase): SESSION_AUTOCLOSE = 50 time_at_beg = time.time() mount_a_gid = self.mount_a.get_global_id() - mount_a_pid = self.mount_a.client_pid + _ = self.mount_a.client_pid self.fs.set_var('session_timeout', SESSION_TIMEOUT) self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE) self.assert_session_count(2, self.fs.mds_asok(['session', 'ls'])) diff --git a/qa/tasks/cephfs/test_exports.py b/qa/tasks/cephfs/test_exports.py index 3ffdb553cf03d..7d2a3425a894b 100644 --- a/qa/tasks/cephfs/test_exports.py +++ b/qa/tasks/cephfs/test_exports.py @@ -150,7 +150,6 @@ class TestExports(CephFSTestCase): status = self.fs.wait_for_daemons() rank1 = self.fs.get_rank(rank=1, status=status) - name1 = 'mds.'+rank1['name'] # Create a directory that is pre-exported to rank 1 self.mount_a.run_shell(["mkdir", "-p", "a/aa"]) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 07702435afd2c..07431213a2cb2 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -1,13 +1,11 @@ import time import signal -import json import logging from unittest import case, SkipTest from random import randint from cephfs_test_case import CephFSTestCase from teuthology.exceptions import CommandFailedError -from teuthology import misc as teuthology from tasks.cephfs.fuse_mount import FuseMount log = logging.getLogger(__name__) @@ -25,7 +23,7 @@ class TestClusterResize(CephFSTestCase): log.info("status = {0}".format(status)) original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) - original_standbys = set([info['gid'] for info in status.get_standbys()]) + _ = set([info['gid'] for info in status.get_standbys()]) oldmax = self.fs.get_var('max_mds') self.assertTrue(n > oldmax) @@ -45,7 +43,7 @@ class TestClusterResize(CephFSTestCase): log.info("status = {0}".format(status)) original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) - original_standbys = set([info['gid'] for info in status.get_standbys()]) + _ = set([info['gid'] for info in status.get_standbys()]) oldmax = self.fs.get_var('max_mds') self.assertTrue(n < oldmax) @@ -361,7 +359,7 @@ class TestStandbyReplay(CephFSTestCase): def _confirm_no_replay(self): status = self.fs.status() - standby_count = len(list(status.get_standbys())) + _ = len(list(status.get_standbys())) self.assertEqual(0, len(list(self.fs.get_replays(status=status)))) return status diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index 6c4eab82157bf..887290dbb1ff9 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -7,7 +7,6 @@ import errno import time import json import logging -import time log = logging.getLogger(__name__) @@ -78,7 +77,7 @@ class TestMisc(CephFSTestCase): def get_pool_df(fs, name): try: return fs.get_pool_df(name)['objects'] > 0 - except RuntimeError as e: + except RuntimeError: return False self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30) @@ -172,8 +171,7 @@ class TestMisc(CephFSTestCase): out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get', pool_name, 'size', '-f', 'json-pretty') - j = json.loads(out) - pool_size = int(j['size']) + _ = json.loads(out) proc = self.mount_a.run_shell(['df', '.']) output = proc.stdout.getvalue() diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py index 1684d170c8e31..36b4e58ec8c17 100644 --- a/qa/tasks/cephfs/test_recovery_pool.py +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -1,17 +1,13 @@ - """ Test our tools for recovering metadata from the data pool into an alternate pool """ -import json import logging -import os -from textwrap import dedent import traceback -from collections import namedtuple, defaultdict +from collections import namedtuple from teuthology.orchestra.run import CommandFailedError -from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology +from tasks.cephfs.cephfs_test_case import CephFSTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/cephfs/test_scrub.py b/qa/tasks/cephfs/test_scrub.py index d96f5691ba21e..e4f0cb9beb32b 100644 --- a/qa/tasks/cephfs/test_scrub.py +++ b/qa/tasks/cephfs/test_scrub.py @@ -2,12 +2,9 @@ Test CephFS scrub (distinct from OSD scrub) functionality """ import logging -import os -import traceback from collections import namedtuple -from teuthology.orchestra.run import CommandFailedError -from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology +from tasks.cephfs.cephfs_test_case import CephFSTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index 87b759e5d7a2a..80c69e3c29c21 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -45,7 +45,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -68,7 +68,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -96,7 +96,7 @@ class TestScrubControls(CephFSTestCase): log.info("client_path: {0}".format(client_path)) log.info("Cloning repo into place") - repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) self.assertNotEqual(out_json, None) @@ -337,7 +337,7 @@ class TestScrubChecks(CephFSTestCase): success, errstring = validator(jout, 0) if not success: - raise AsokCommandFailedError(command, rout, jout, errstring) + raise AsokCommandFailedError(command, 0, jout, errstring) return jout def asok_command(self, mds_rank, command, validator): diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index 1a771461431cb..c16851719c4dc 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -1,4 +1,3 @@ -from StringIO import StringIO import time import json import logging diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index 7f81a76b992de..f627c4932a75d 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -1,8 +1,6 @@ import sys import logging import signal -import time -import errno from textwrap import dedent from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -528,7 +526,7 @@ class TestSnapshots(CephFSTestCase): self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)]) try: self.create_snap_dir(sname) - except CommandFailedError as e: + except CommandFailedError: # after reducing limit we expect the new snapshot creation to fail pass self.delete_dir_and_snaps("accounts", new_limit + 1) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 00033eb06c10a..d7be983c570be 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -4,7 +4,6 @@ import logging from textwrap import dedent import datetime import gevent -import datetime from teuthology.orchestra.run import CommandFailedError, Raw from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology @@ -137,7 +136,7 @@ class TestStrays(CephFSTestCase): size_unit = 1024 # small, numerous files file_multiplier = 200 else: - raise NotImplemented(throttle_type) + raise NotImplementedError(throttle_type) # Pick up config changes self.fs.mds_fail_restart() @@ -222,7 +221,7 @@ class TestStrays(CephFSTestCase): num_strays_purging, mds_max_purge_files )) else: - raise NotImplemented(throttle_type) + raise NotImplementedError(throttle_type) log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format( num_strays_purging, num_strays, diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index fcf3085444248..8687e910be1dd 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -1,6 +1,5 @@ import json import logging -import time import os from textwrap import dedent from tasks.cephfs.cephfs_test_case import CephFSTestCase @@ -1111,7 +1110,7 @@ vc.disconnect() volume_prefix = "/myprefix" group_id = "grpid" volume_id = "volid" - mount_path = self._volume_client_python(vc_mount, dedent(""" + self._volume_client_python(vc_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) print(create_result['mount_path']) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 7aa2182f07255..2b94583b7b1f5 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -459,7 +459,7 @@ class TestVolumes(CephFSTestCase): nsize = usedsize/2 try: self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) - except CommandFailedError as ce: + except CommandFailedError: raise RuntimeError("expected the 'fs subvolume resize' command to succeed") # verify the quota diff --git a/qa/tasks/check_counter.py b/qa/tasks/check_counter.py index b15dc6fe21cb6..fc877f285b6c8 100644 --- a/qa/tasks/check_counter.py +++ b/qa/tasks/check_counter.py @@ -4,7 +4,6 @@ import json from teuthology.task import Task from teuthology import misc -import ceph_manager log = logging.getLogger(__name__) diff --git a/qa/tasks/create_verify_lfn_objects.py b/qa/tasks/create_verify_lfn_objects.py index 01ab1a370b73e..5325415812879 100644 --- a/qa/tasks/create_verify_lfn_objects.py +++ b/qa/tasks/create_verify_lfn_objects.py @@ -35,7 +35,7 @@ def task(ctx, config): for ns in namespace: def object_name(i): nslength = 0 - if namespace is not '': + if namespace != '': nslength = len(namespace) numstr = str(i) fillerlen = l - nslength - len(prefix) - len(numstr) diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py index 7a4d1327020a8..b565c774c441b 100644 --- a/qa/tasks/divergent_priors.py +++ b/qa/tasks/divergent_priors.py @@ -155,6 +155,6 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 log.info("success") diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py index 49bc8e469252d..dda358b045fd3 100644 --- a/qa/tasks/divergent_priors2.py +++ b/qa/tasks/divergent_priors2.py @@ -185,7 +185,7 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 cmd = 'rm {file}'.format(file=expfile) exp_remote.run(args=cmd, wait=True) diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py index 76a5317f7c608..e503035f87772 100644 --- a/qa/tasks/dump_stuck.py +++ b/qa/tasks/dump_stuck.py @@ -2,7 +2,6 @@ Dump_stuck command """ import logging -import re import time import ceph_manager diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py index 6e40e4044b036..6431197e10664 100644 --- a/qa/tasks/exec_on_cleanup.py +++ b/qa/tasks/exec_on_cleanup.py @@ -5,7 +5,6 @@ import logging import contextlib from teuthology import misc as teuthology -from teuthology import contextutil log = logging.getLogger(__name__) diff --git a/qa/tasks/fs.py b/qa/tasks/fs.py index 4286318527e79..4b47e754bfa24 100644 --- a/qa/tasks/fs.py +++ b/qa/tasks/fs.py @@ -2,10 +2,8 @@ CephFS sub-tasks. """ -import contextlib import logging import re -import time from tasks.cephfs.filesystem import Filesystem diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py index c61320096bc1a..673547aa5b914 100644 --- a/qa/tasks/keystone.py +++ b/qa/tasks/keystone.py @@ -9,7 +9,6 @@ from cStringIO import StringIO from teuthology import misc as teuthology from teuthology import contextutil from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user from teuthology.packaging import install_package from teuthology.packaging import remove_package @@ -193,7 +192,6 @@ def run_keystone(ctx, config): # start the public endpoint client_public_with_id = 'keystone.public' + '.' + client_id - client_public_with_cluster = cluster_name + '.' + client_public_with_id public_host, public_port = ctx.keystone.public_endpoints[client] run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public', diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py index 2647eba761c87..29e2c35134619 100644 --- a/qa/tasks/mds_creation_failure.py +++ b/qa/tasks/mds_creation_failure.py @@ -1,4 +1,5 @@ - +# FIXME: this file has many undefined vars which are accessed! +# flake8: noqa import logging import contextlib import time diff --git a/qa/tasks/mgr/dashboard/test_ganesha.py b/qa/tasks/mgr/dashboard/test_ganesha.py index 6b89ca508fbf8..cd869a00e405b 100644 --- a/qa/tasks/mgr/dashboard/test_ganesha.py +++ b/qa/tasks/mgr/dashboard/test_ganesha.py @@ -3,9 +3,8 @@ from __future__ import absolute_import -import time -from .helper import DashboardTestCase, JObj, JLeaf, JList +from .helper import DashboardTestCase class GaneshaTest(DashboardTestCase): diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py index 7684a95565af7..44d25a7af80fa 100644 --- a/qa/tasks/mgr/mgr_test_case.py +++ b/qa/tasks/mgr/mgr_test_case.py @@ -101,9 +101,9 @@ class MgrTestCase(CephTestCase): assert cls.mgr_cluster is not None if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED: - raise case.SkipTest("Only have {0} manager daemons, " - "{1} are required".format( - len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) + cls.skipTest( + "Only have {0} manager daemons, {1} are required".format( + len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) cls.setup_mgrs() diff --git a/qa/tasks/mgr/test_orchestrator_cli.py b/qa/tasks/mgr/test_orchestrator_cli.py index 86f72678926b1..1516d44cb4537 100644 --- a/qa/tasks/mgr/test_orchestrator_cli.py +++ b/qa/tasks/mgr/test_orchestrator_cli.py @@ -1,7 +1,6 @@ import errno import json import logging -from tempfile import NamedTemporaryFile from teuthology.exceptions import CommandFailedError diff --git a/qa/tasks/mgr/test_ssh_orchestrator.py b/qa/tasks/mgr/test_ssh_orchestrator.py index 76a31dd332c6a..6fce66365c439 100644 --- a/qa/tasks/mgr/test_ssh_orchestrator.py +++ b/qa/tasks/mgr/test_ssh_orchestrator.py @@ -1,7 +1,4 @@ -import json import logging -from tempfile import NamedTemporaryFile -from teuthology.exceptions import CommandFailedError from mgr_test_case import MgrTestCase log = logging.getLogger(__name__) diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py index 5c4088c7369cd..f7862cb1354d7 100644 --- a/qa/tasks/mon_clock_skew_check.py +++ b/qa/tasks/mon_clock_skew_check.py @@ -2,11 +2,8 @@ Handle clock skews in monitors. """ import logging -import contextlib import ceph_manager import time -import gevent -from StringIO import StringIO from teuthology import misc as teuthology log = logging.getLogger(__name__) diff --git a/qa/tasks/netem.py b/qa/tasks/netem.py index 95018150da93b..4fa08bbc0a0b8 100644 --- a/qa/tasks/netem.py +++ b/qa/tasks/netem.py @@ -6,10 +6,7 @@ Reference:https://wiki.linuxfoundation.org/networking/netem. import logging import contextlib -from teuthology import misc as teuthology from cStringIO import StringIO -from teuthology.orchestra import run -from teuthology import contextutil from paramiko import SSHException import socket import time @@ -168,7 +165,7 @@ class Toggle: try: self.packet_drop() log.info('link down') - except SSHException as e: + except SSHException: log.debug('Failed to run command') self.stop_event.wait(timeout=self.interval) @@ -176,7 +173,7 @@ class Toggle: try: delete_dev(self.remote, self.interface) log.info('link up') - except SSHException as e: + except SSHException: log.debug('Failed to run command') def begin(self, gname): diff --git a/qa/tasks/osd_max_pg_per_osd.py b/qa/tasks/osd_max_pg_per_osd.py index 03ea218f5690a..739959e2fbdcb 100644 --- a/qa/tasks/osd_max_pg_per_osd.py +++ b/qa/tasks/osd_max_pg_per_osd.py @@ -76,7 +76,6 @@ def test_create_from_peer(ctx, config): 4. delete a pool, verify pgs go active. """ pg_num = config.get('pg_num', 1) - pool_size = config.get('pool_size', 2) from_primary = config.get('from_primary', True) manager = ctx.managers['ceph'] @@ -121,7 +120,6 @@ def test_create_from_peer(ctx, config): def task(ctx, config): assert isinstance(config, dict), \ 'osd_max_pg_per_osd task only accepts a dict for config' - manager = ctx.managers['ceph'] if config.get('test_create_from_mon', True): test_create_from_mon(ctx, config) else: diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py index c4f1ab62d85d2..6de1bbacf5933 100644 --- a/qa/tasks/radosbench.py +++ b/qa/tasks/radosbench.py @@ -79,7 +79,7 @@ def task(ctx, config): concurrency = config.get('concurrency', 16) osize = config.get('objectsize', 65536) - if osize is 0: + if osize == 0: objectsize = [] else: objectsize = ['-O', str(osize)] @@ -134,5 +134,5 @@ def task(ctx, config): log.info('joining radosbench (timing out after %ss)', timeout) run.wait(radosbench.itervalues(), timeout=timeout) - if pool is not 'data' and create_pool: + if pool != 'data' and create_pool: manager.remove_pool(pool) diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py index 81906bea0a2b7..a89fcaa9db712 100644 --- a/qa/tasks/radosgw_admin.py +++ b/qa/tasks/radosgw_admin.py @@ -10,7 +10,6 @@ Rgw admin testing against a running instance # python qa/tasks/radosgw_admin.py [USER] HOSTNAME # -import copy import json import logging import time @@ -24,11 +23,9 @@ from cStringIO import StringIO import boto.exception import boto.s3.connection import boto.s3.acl -from boto.utils import RequestHook import httplib2 -import util.rgw as rgw_utils from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops @@ -288,7 +285,6 @@ def task(ctx, config): display_name1='Foo' display_name2='Fud' email='foo@foo.com' - email2='bar@bar.com' access_key='9te6NH5mcdcq0Tc5i8i1' secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' access_key2='p5YnriCv1nAtykxBrupQ' @@ -920,8 +916,6 @@ def task(ctx, config): # TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds' (err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True) -import sys -from tasks.radosgw_admin import task from teuthology.config import config from teuthology.orchestra import cluster, remote import argparse; diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py index 12d3ac046cfc8..1edf05fd3736a 100644 --- a/qa/tasks/radosgw_admin_rest.py +++ b/qa/tasks/radosgw_admin_rest.py @@ -7,9 +7,8 @@ To extract the inventory (in csv format) use the command: grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' """ -from cStringIO import StringIO import logging -import json + import boto.exception import boto.s3.connection diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py index 5b6c7939e31cc..bb72267e5e4ca 100644 --- a/qa/tasks/ragweed.py +++ b/qa/tasks/ragweed.py @@ -10,8 +10,6 @@ import os import random import string -import util.rgw as rgw_utils - from teuthology import misc as teuthology from teuthology import contextutil from teuthology.config import config as teuth_config diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py index 12e50d98b05b3..396d8fed2a21a 100644 --- a/qa/tasks/rbd_fsx.py +++ b/qa/tasks/rbd_fsx.py @@ -4,7 +4,7 @@ Run fsx on an rbd image import contextlib import logging -from teuthology.orchestra import run +from teuthology.exceptions import ConfigError from teuthology.parallel import parallel from teuthology import misc as teuthology diff --git a/qa/tasks/rbd_mirror_thrash.py b/qa/tasks/rbd_mirror_thrash.py index 081b353d92bdc..67e1c332c648b 100644 --- a/qa/tasks/rbd_mirror_thrash.py +++ b/qa/tasks/rbd_mirror_thrash.py @@ -13,9 +13,7 @@ from gevent import sleep from gevent.greenlet import Greenlet from gevent.event import Event -from teuthology import misc from teuthology.exceptions import CommandFailedError -from teuthology.task import Task from teuthology.orchestra import run log = logging.getLogger(__name__) diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py index 73fedb966ec48..1059fda712539 100644 --- a/qa/tasks/reg11184.py +++ b/qa/tasks/reg11184.py @@ -233,7 +233,7 @@ def task(ctx, config): for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, '/tmp/existing']) - assert exit_status is 0 + assert exit_status == 0 (remote,) = ctx.\ cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py index 4502e5e6994a2..44478fa209b80 100644 --- a/qa/tasks/rgw.py +++ b/qa/tasks/rgw.py @@ -3,11 +3,7 @@ rgw routines """ import argparse import contextlib -import json import logging -import os -import errno -import util.rgw as rgw_utils from teuthology.orchestra import run from teuthology import misc as teuthology @@ -15,9 +11,9 @@ from teuthology import contextutil from teuthology.exceptions import ConfigError from util import get_remote_for_role from util.rgw import rgwadmin, wait_for_radosgw -from util.rados import (rados, create_ec_pool, - create_replicated_pool, - create_cache_pool) +from util.rados import (create_ec_pool, + create_replicated_pool, + create_cache_pool) log = logging.getLogger(__name__) diff --git a/qa/tasks/rgw_multisite.py b/qa/tasks/rgw_multisite.py index 9dea39312dee3..08d52891cdabf 100644 --- a/qa/tasks/rgw_multisite.py +++ b/qa/tasks/rgw_multisite.py @@ -2,7 +2,6 @@ rgw multisite configuration routines """ import argparse -import contextlib import logging import random import string @@ -223,7 +222,7 @@ class Gateway(multisite.Gateway): # insert zone args before the first | pipe = args.index(run.Raw('|')) args = args[0:pipe] + zone.zone_args() + args[pipe:] - except ValueError, e: + except ValueError: args += zone.zone_args() self.daemon.command_kwargs['args'] = args diff --git a/qa/tasks/rgw_multisite_tests.py b/qa/tasks/rgw_multisite_tests.py index dade6e47483e1..dee6bfaa30396 100644 --- a/qa/tasks/rgw_multisite_tests.py +++ b/qa/tasks/rgw_multisite_tests.py @@ -2,7 +2,6 @@ rgw multisite testing """ import logging -import sys import nose.core import nose.config diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py index 143abc34c1f60..b0c4ede60028b 100644 --- a/qa/tasks/s3a_hadoop.py +++ b/qa/tasks/s3a_hadoop.py @@ -1,6 +1,5 @@ import contextlib import logging -import time from teuthology import misc from teuthology.orchestra import run diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py index 96d586d6a65d3..bfd032c7572d0 100644 --- a/qa/tasks/swift.py +++ b/qa/tasks/swift.py @@ -13,7 +13,6 @@ from teuthology import misc as teuthology from teuthology import contextutil from teuthology.config import config as teuth_config from teuthology.orchestra import run -from teuthology.orchestra.connection import split_user log = logging.getLogger(__name__) diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py index 6d4a38ad0baec..a8f1d82c32ad1 100644 --- a/qa/tasks/tempest.py +++ b/qa/tasks/tempest.py @@ -6,7 +6,7 @@ import logging from teuthology import misc as teuthology from teuthology import contextutil -from teuthology.config import config as teuth_config +from teuthology.exceptions import ConfigError from teuthology.orchestra import run log = logging.getLogger(__name__) @@ -238,7 +238,6 @@ def task(ctx, config): config = all_clients if isinstance(config, list): config = dict.fromkeys(config) - clients = config.keys() overrides = ctx.config.get('overrides', {}) # merge each client section, not the top level. diff --git a/qa/tasks/tox.py b/qa/tasks/tox.py index 46b4f565dc063..81d712f44b193 100644 --- a/qa/tasks/tox.py +++ b/qa/tasks/tox.py @@ -3,7 +3,6 @@ import contextlib import logging from teuthology import misc as teuthology -from teuthology import contextutil from teuthology.orchestra import run log = logging.getLogger(__name__) diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py index d3abf1ced3b97..d1ea39d1cd05c 100644 --- a/qa/tasks/util/rgw.py +++ b/qa/tasks/util/rgw.py @@ -1,14 +1,8 @@ from cStringIO import StringIO import logging import json -import requests import time -from requests.packages.urllib3 import PoolManager -from requests.packages.urllib3.util import Retry -from urlparse import urlparse - -from teuthology.orchestra.connection import split_user from teuthology import misc as teuthology log = logging.getLogger(__name__) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index d2be17ee831c1..70780dea451e8 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -41,7 +41,6 @@ import shutil import re import os import time -import json import sys import errno from unittest import suite, loader @@ -833,7 +832,7 @@ def scan_tests(modules): max_required_mgr = 0 require_memstore = False - for suite, case in enumerate_methods(overall_suite): + for suite_, case in enumerate_methods(overall_suite): max_required_mds = max(max_required_mds, getattr(case, "MDSS_REQUIRED", 0)) max_required_clients = max(max_required_clients, diff --git a/qa/workunits/fs/multiclient_sync_read_eof.py b/qa/workunits/fs/multiclient_sync_read_eof.py index d3e0f8e652e96..1d5bb6506217a 100755 --- a/qa/workunits/fs/multiclient_sync_read_eof.py +++ b/qa/workunits/fs/multiclient_sync_read_eof.py @@ -2,8 +2,6 @@ import argparse import os -import sys -import time def main(): parser = argparse.ArgumentParser() diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py index cca170ac4edce..e83bcad9e7e60 100644 --- a/qa/workunits/mon/caps.py +++ b/qa/workunits/mon/caps.py @@ -1,9 +1,9 @@ #!/usr/bin/python -import json +from __future__ import print_function + import subprocess import shlex -from StringIO import StringIO import errno import sys import os @@ -11,8 +11,7 @@ import io import re -import rados -from ceph_argparse import * +from ceph_argparse import * # noqa keyring_base = '/tmp/cephtest-caps.keyring' diff --git a/qa/workunits/restart/test-backtraces.py b/qa/workunits/restart/test-backtraces.py index 2fa67a23f3852..07fe8845f4e63 100755 --- a/qa/workunits/restart/test-backtraces.py +++ b/qa/workunits/restart/test-backtraces.py @@ -9,13 +9,9 @@ import time import sys if sys.version_info[0] == 2: - from cStringIO import StringIO - range = xrange elif sys.version_info[0] == 3: - from io import StringIO - range = range import rados as rados @@ -47,8 +43,6 @@ def set_mds_config_param(ceph, param): if r != 0: raise Exception -import ConfigParser -import contextlib class _TrimIndentFile(object): def __init__(self, fp): @@ -150,10 +144,10 @@ def verify(rados_ioctx, ino, values, pool): bt = decode(binbt) + ind = 0 if bt['ino'] != ino: raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format( bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values)) - ind = 0 for (n, i) in values: if bt['ancestors'][ind]['dirino'] != i: raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format( -- 2.39.5