]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Enable flake8 tox and fix failures
authorThomas Bechtold <tbechtold@suse.com>
Mon, 9 Dec 2019 15:17:23 +0000 (16:17 +0100)
committerKefu Chai <kchai@redhat.com>
Tue, 2 Jun 2020 02:32:22 +0000 (10:32 +0800)
There were a couple of problems found by flake8 in the qa/
directory (most of them fixed now). Enabling flake8 during the usual
check runs hopefully avoids adding new issues in the future.

Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
(cherry picked from commit 0127cd1e8817b05b1c3150540b021f9a24b47089)

Conflicts:
qa/tasks/barbican.py
qa/tasks/cephadm.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/xfstests_dev.py
qa/tasks/daemonwatchdog.py
qa/tasks/mgr/dashboard/test_cephfs.py
qa/tasks/mgr/dashboard/test_orchestrator.py
qa/tasks/mgr/dashboard/test_rbd.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/mgr/test_orchestrator_cli.py
qa/tasks/s3tests.py
qa/tasks/s3tests_java.py
qa/tasks/vstart_runner.py
qa/workunits/mon/caps.py: trivial resolutions,
          and drop the the change to qa/CMakeLists.txt, as we don't have
          add_tox_test() back in nautilus

55 files changed:
qa/standalone/special/ceph_objectstore_tool.py
qa/tasks/cbt.py
qa/tasks/ceph.py
qa/tasks/ceph_fuse.py
qa/tasks/ceph_manager.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_sessionmap.py
qa/tasks/cephfs/test_snapshots.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volume_client.py
qa/tasks/cephfs/test_volumes.py
qa/tasks/check_counter.py
qa/tasks/create_verify_lfn_objects.py
qa/tasks/divergent_priors.py
qa/tasks/divergent_priors2.py
qa/tasks/dump_stuck.py
qa/tasks/exec_on_cleanup.py
qa/tasks/fs.py
qa/tasks/keystone.py
qa/tasks/mds_creation_failure.py
qa/tasks/mgr/dashboard/test_ganesha.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/mgr/test_orchestrator_cli.py
qa/tasks/mgr/test_ssh_orchestrator.py
qa/tasks/mon_clock_skew_check.py
qa/tasks/netem.py
qa/tasks/osd_max_pg_per_osd.py
qa/tasks/radosbench.py
qa/tasks/radosgw_admin.py
qa/tasks/radosgw_admin_rest.py
qa/tasks/ragweed.py
qa/tasks/rbd_fsx.py
qa/tasks/rbd_mirror_thrash.py
qa/tasks/reg11184.py
qa/tasks/rgw.py
qa/tasks/rgw_multisite.py
qa/tasks/rgw_multisite_tests.py
qa/tasks/s3a_hadoop.py
qa/tasks/swift.py
qa/tasks/tempest.py
qa/tasks/tox.py
qa/tasks/util/rgw.py
qa/tasks/vstart_runner.py
qa/workunits/fs/multiclient_sync_read_eof.py
qa/workunits/mon/caps.py
qa/workunits/restart/test-backtraces.py

index b058c247c6104f08818a5cf81870cb0b8c9b12fa..496ae417e53031d44db336cad5b50fbbfc441a47 100755 (executable)
@@ -45,7 +45,7 @@ if sys.version_info[0] >= 3:
     def decode(s):
         return s.decode('utf-8')
 
-    def check_output(*args, **kwargs):
+    def check_output(*args, **kwargs): # noqa
         return decode(subprocess.check_output(*args, **kwargs))
 else:
     def decode(s):
@@ -336,7 +336,7 @@ def check_entry_transactions(entry, enum):
 
 
 def check_transaction_ops(ops, enum, tnum):
-    if len(ops) is 0:
+    if len(ops) == 0:
         logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
     errors = 0
     for onum in range(len(ops)):
@@ -375,7 +375,7 @@ def test_dump_journal(CFSD_PREFIX, osds):
         os.unlink(TMPFILE)
 
         journal_errors = check_journal(jsondict)
-        if journal_errors is not 0:
+        if journal_errors != 0:
             logging.error(jsondict)
         ERRORS += journal_errors
 
@@ -519,7 +519,7 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
     for line in output.strip().split('\n'):
         print(line)
         linev = re.split('\s+', line)
-        if linev[0] is '':
+        if linev[0] == '':
             linev.pop(0)
         print('linev %s' % linev)
         weights.append(float(linev[2]))
index e0a1720dd32ba57db7a43119f2ef3cbf18a95aed..941694802cf6200c9eb8c58a315511cc16a2a667 100644 (file)
@@ -3,7 +3,6 @@ import os
 import yaml
 
 from teuthology import misc
-from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
 from teuthology.task import Task
 
index ab2747175f23142f03ca0a6c92494ac1be830ae8..040508255e602cb3a902ff410c24e925fefff01f 100644 (file)
@@ -84,18 +84,18 @@ def ceph_crash(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.shortname)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 try:
                     teuthology.pull_directory(remote, '/var/lib/ceph/crash',
                                               os.path.join(sub, 'crash'))
-                except ReadError as e:
+                except ReadError:
                     pass
 
 
@@ -269,13 +269,13 @@ def ceph_log(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.shortname)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 teuthology.pull_directory(remote, '/var/log/ceph',
                                           os.path.join(sub, 'log'))
@@ -396,8 +396,6 @@ def create_rbd_pool(ctx, config):
 @contextlib.contextmanager
 def cephfs_setup(ctx, config):
     cluster_name = config['cluster']
-    testdir = teuthology.get_testdir(ctx)
-    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
 
     first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
     (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
index 08254fed3c3d0001bc669688a8ea911c72589091..03f5a56e4a7015f819e8bd0ad076052e35c547fb 100644 (file)
@@ -7,7 +7,6 @@ import logging
 
 from teuthology import misc as teuthology
 from cephfs.fuse_mount import FuseMount
-from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
 
index b1f4ab4e3f2b3368f626142aee08a4e93e3300a1..72da3c606135a4f4756dc0d94f5f63036a25346f 100644 (file)
@@ -656,7 +656,7 @@ class Thrasher:
         Decrease the size of the pool
         """
         pool = self.ceph_manager.get_pool()
-        orig_pg_num = self.ceph_manager.get_pool_pg_num(pool)
+        _ = self.ceph_manager.get_pool_pg_num(pool)
         self.log("Shrinking pool %s" % (pool,))
         if self.ceph_manager.contract_pool(
                 pool,
@@ -807,7 +807,6 @@ class Thrasher:
         Random action selector.
         """
         chance_down = self.config.get('chance_down', 0.4)
-        chance_test_min_size = self.config.get('chance_test_min_size', 0)
         chance_test_backfill_full = \
             self.config.get('chance_test_backfill_full', 0)
         if isinstance(chance_down, int):
@@ -840,7 +839,7 @@ class Thrasher:
         actions.append((self.fix_pgp_num,
                         self.config.get('chance_pgpnum_fix', 0),))
         actions.append((self.test_pool_min_size,
-                        chance_test_min_size,))
+                        self.config.get('chance_test_min_size', 0),))
         actions.append((self.test_backfill_full,
                         chance_test_backfill_full,))
         if self.chance_thrash_cluster_full > 0:
@@ -1480,7 +1479,7 @@ class CephManager:
         while True:
             proc = self.admin_socket(service_type, service_id,
                                      args, check_status=False, stdout=stdout)
-            if proc.exitstatus is 0:
+            if proc.exitstatus == 0:
                 return proc
             else:
                 tries += 1
index 09dfc121af28c0cd955e042793d50d53861a34fd..2e1a0d854af2bf3e52c6b5ab12bc2fed7765c5ab 100644 (file)
@@ -1195,7 +1195,7 @@ class Filesystem(MDSCluster):
     def dirfrag_exists(self, ino, frag):
         try:
             self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)])
-        except CommandFailedError as e:
+        except CommandFailedError:
             return False
         else:
             return True
index bbd56b3c57c0c7446ccfe480ad44cd92f796ee82..d0665270aba25fe2c8059b58e13625c5456135d1 100644 (file)
@@ -1,4 +1,3 @@
-
 from StringIO import StringIO
 import json
 import time
@@ -10,7 +9,6 @@ from teuthology.contextutil import MaxWhileTries
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError
 from .mount import CephFSMount
-from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
 
@@ -170,7 +168,7 @@ class FuseMount(CephFSMount):
         try:
             self.inst = status['inst_str']
             self.addr = status['addr_str']
-        except KeyError as e:
+        except KeyError:
             sessions = self.fs.rank_asok(['session', 'ls'])
             for s in sessions:
                 if s['id'] == self.id:
index 31f16b44a257057b4e9db8e01f2c80a4fc617074..8cf8474e2699d559dbbcbb88e3b0640f90a5d04f 100644 (file)
@@ -3,8 +3,6 @@ import crypt
 import logging
 from StringIO import StringIO
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
-from tasks.cephfs.fuse_mount import FuseMount
-from teuthology.exceptions import CommandFailedError
 
 log = logging.getLogger(__name__)
 
index 91ef54415fc9b8de32273d73c56666f5393bb28b..73bd815c4f70a239f00e45a53d30cda903dab154 100644 (file)
@@ -593,7 +593,7 @@ class TestClientRecovery(CephFSTestCase):
         SESSION_AUTOCLOSE = 50
         time_at_beg = time.time()
         mount_a_gid = self.mount_a.get_global_id()
-        mount_a_pid = self.mount_a.client_pid
+        _ = self.mount_a.client_pid
         self.fs.set_var('session_timeout', SESSION_TIMEOUT)
         self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE)
         self.assert_session_count(2, self.fs.mds_asok(['session', 'ls']))
index 3ffdb553cf03d2980e859806cd6c80e0dc94338c..7d2a3425a894b74933f3595748d7e9ff34613ae1 100644 (file)
@@ -150,7 +150,6 @@ class TestExports(CephFSTestCase):
         status = self.fs.wait_for_daemons()
 
         rank1 = self.fs.get_rank(rank=1, status=status)
-        name1 = 'mds.'+rank1['name']
 
         # Create a directory that is pre-exported to rank 1
         self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
index 07702435afd2c1e6ecb7d2bf5bb0a86c81572568..07431213a2cb27adc01ae3a02b1e8fd652ca3ca2 100644 (file)
@@ -1,13 +1,11 @@
 import time
 import signal
-import json
 import logging
 from unittest import case, SkipTest
 from random import randint
 
 from cephfs_test_case import CephFSTestCase
 from teuthology.exceptions import CommandFailedError
-from teuthology import misc as teuthology
 from tasks.cephfs.fuse_mount import FuseMount
 
 log = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ class TestClusterResize(CephFSTestCase):
         log.info("status = {0}".format(status))
 
         original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
-        original_standbys = set([info['gid'] for info in status.get_standbys()])
+        _ = set([info['gid'] for info in status.get_standbys()])
 
         oldmax = self.fs.get_var('max_mds')
         self.assertTrue(n > oldmax)
@@ -45,7 +43,7 @@ class TestClusterResize(CephFSTestCase):
         log.info("status = {0}".format(status))
 
         original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
-        original_standbys = set([info['gid'] for info in status.get_standbys()])
+        _ = set([info['gid'] for info in status.get_standbys()])
 
         oldmax = self.fs.get_var('max_mds')
         self.assertTrue(n < oldmax)
@@ -361,7 +359,7 @@ class TestStandbyReplay(CephFSTestCase):
 
     def _confirm_no_replay(self):
         status = self.fs.status()
-        standby_count = len(list(status.get_standbys()))
+        _ = len(list(status.get_standbys()))
         self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
         return status
 
index 6c4eab82157bf2a1956b8efb10c82f8165ad959b..887290dbb1ff917ff5aff775587f0b55d136cbb1 100644 (file)
@@ -7,7 +7,6 @@ import errno
 import time
 import json
 import logging
-import time
 
 log = logging.getLogger(__name__)
 
@@ -78,7 +77,7 @@ class TestMisc(CephFSTestCase):
         def get_pool_df(fs, name):
             try:
                 return fs.get_pool_df(name)['objects'] > 0
-            except RuntimeError as e:
+            except RuntimeError:
                 return False
 
         self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
@@ -172,8 +171,7 @@ class TestMisc(CephFSTestCase):
         out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
                                                   pool_name, 'size',
                                                   '-f', 'json-pretty')
-        j = json.loads(out)
-        pool_size = int(j['size'])
+        _ = json.loads(out)
 
         proc = self.mount_a.run_shell(['df', '.'])
         output = proc.stdout.getvalue()
index 1684d170c8e310b7f59be4a7bfb064f69bb81728..36b4e58ec8c17f985e5824575abe5fb04e4843b7 100644 (file)
@@ -1,17 +1,13 @@
-
 """
 Test our tools for recovering metadata from the data pool into an alternate pool
 """
-import json
 
 import logging
-import os
-from textwrap import dedent
 import traceback
-from collections import namedtuple, defaultdict
+from collections import namedtuple
 
 from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
 log = logging.getLogger(__name__)
 
index d96f5691ba21e13093e673f8bc54c4e35fafde51..e4f0cb9beb32b9c02e0a7f74bf289452de2a974d 100644 (file)
@@ -2,12 +2,9 @@
 Test CephFS scrub (distinct from OSD scrub) functionality
 """
 import logging
-import os
-import traceback
 from collections import namedtuple
 
-from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
 log = logging.getLogger(__name__)
 
index 87b759e5d7a2a7f28a992551c33702babf439c35..80c69e3c29c216e445f7ca62759ec78d33c58727 100644 (file)
@@ -45,7 +45,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -68,7 +68,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -96,7 +96,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -337,7 +337,7 @@ class TestScrubChecks(CephFSTestCase):
 
         success, errstring = validator(jout, 0)
         if not success:
-            raise AsokCommandFailedError(command, rout, jout, errstring)
+            raise AsokCommandFailedError(command, 0, jout, errstring)
         return jout
 
     def asok_command(self, mds_rank, command, validator):
index 1a771461431cb264ff397a993fe5242a485d660a..c16851719c4dcc97455039b740753fefe8b31a7e 100644 (file)
@@ -1,4 +1,3 @@
-from StringIO import StringIO
 import time
 import json
 import logging
index 7f81a76b992dea93eb20ecd542b1771cf651ba68..f627c4932a75dc67f9437a55eb6c4cf17385be6f 100644 (file)
@@ -1,8 +1,6 @@
 import sys
 import logging
 import signal
-import time
-import errno
 from textwrap import dedent
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -528,7 +526,7 @@ class TestSnapshots(CephFSTestCase):
         self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
         try:
             self.create_snap_dir(sname)
-        except CommandFailedError as e:
+        except CommandFailedError:
             # after reducing limit we expect the new snapshot creation to fail
             pass
         self.delete_dir_and_snaps("accounts", new_limit + 1)
index 00033eb06c10a2a084161e8d52c7d47377efb348..d7be983c570be0a1f89346fb6c2be48b6784d663 100644 (file)
@@ -4,7 +4,6 @@ import logging
 from textwrap import dedent
 import datetime
 import gevent
-import datetime
 
 from teuthology.orchestra.run import CommandFailedError, Raw
 from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
@@ -137,7 +136,7 @@ class TestStrays(CephFSTestCase):
             size_unit = 1024  # small, numerous files
             file_multiplier = 200
         else:
-            raise NotImplemented(throttle_type)
+            raise NotImplementedError(throttle_type)
 
         # Pick up config changes
         self.fs.mds_fail_restart()
@@ -222,7 +221,7 @@ class TestStrays(CephFSTestCase):
                             num_strays_purging, mds_max_purge_files
                         ))
                 else:
-                    raise NotImplemented(throttle_type)
+                    raise NotImplementedError(throttle_type)
 
                 log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format(
                     num_strays_purging, num_strays,
index fcf30854442482b17a0e77f069eb7d52d5de5298..8687e910be1ddb8377c7674b0ec5f6b9118b86cb 100644 (file)
@@ -1,6 +1,5 @@
 import json
 import logging
-import time
 import os
 from textwrap import dedent
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -1111,7 +1110,7 @@ vc.disconnect()
         volume_prefix = "/myprefix"
         group_id = "grpid"
         volume_id = "volid"
-        mount_path = self._volume_client_python(vc_mount, dedent("""
+        self._volume_client_python(vc_mount, dedent("""
             vp = VolumePath("{group_id}", "{volume_id}")
             create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
             print(create_result['mount_path'])
index 7aa2182f072552fc93f31c9b1a8504c282b503e6..2b94583b7b1f5de9ac79f84f991485c594f0bc79 100644 (file)
@@ -459,7 +459,7 @@ class TestVolumes(CephFSTestCase):
         nsize = usedsize/2
         try:
             self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
-        except CommandFailedError as ce:
+        except CommandFailedError:
             raise RuntimeError("expected the 'fs subvolume resize' command to succeed")
 
         # verify the quota
index b15dc6fe21cb60845845b5df8b1fc929f72bdcaa..fc877f285b6c8174204ef23f2c4b084f2f7e22f5 100644 (file)
@@ -4,7 +4,6 @@ import json
 
 from teuthology.task import Task
 from teuthology import misc
-import ceph_manager
 
 log = logging.getLogger(__name__)
 
index 01ab1a370b73e8721db029b79da190c48c8a6037..53254158128794330ba328369be80288210412f9 100644 (file)
@@ -35,7 +35,7 @@ def task(ctx, config):
         for ns in namespace:
             def object_name(i):
                 nslength = 0
-                if namespace is not '':
+                if namespace != '':
                     nslength = len(namespace)
                 numstr = str(i)
                 fillerlen = l - nslength - len(prefix) - len(numstr)
index 7a4d1327020a87fe84def7da26568b9918976e7a..b565c774c441b60b156451a73ebe832b6be6f69d 100644 (file)
@@ -155,6 +155,6 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     log.info("success")
index 49bc8e469252df968d5a9275b10c7aa5e6087033..dda358b045fd39f0543a516ee186cf2412b2e0f4 100644 (file)
@@ -185,7 +185,7 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     cmd = 'rm {file}'.format(file=expfile)
     exp_remote.run(args=cmd, wait=True)
index 76a5317f7c608bbca0798beeade4728689f35427..e503035f877728caf052e2678e79574d0fde7318 100644 (file)
@@ -2,7 +2,6 @@
 Dump_stuck command
 """
 import logging
-import re
 import time
 
 import ceph_manager
index 6e40e4044b0363b74dea99c8b71b6f2ec21f90ed..6431197e10664654e93c19ab316e9adf6c6e4718 100644 (file)
@@ -5,7 +5,6 @@ import logging
 import contextlib
 
 from teuthology import misc as teuthology
-from teuthology import contextutil
 
 log = logging.getLogger(__name__)
 
index 4286318527e7900207943e4265120e7c2ed1fb21..4b47e754bfa2460e81651685f065bb423dc0cd32 100644 (file)
@@ -2,10 +2,8 @@
 CephFS sub-tasks.
 """
 
-import contextlib
 import logging
 import re
-import time
 
 from tasks.cephfs.filesystem import Filesystem
 
index c61320096bc1afa9079d28cc3e4736ad0e20972e..673547aa5b914b1ccf8b5d69654a689c991a0c62 100644 (file)
@@ -9,7 +9,6 @@ from cStringIO import StringIO
 from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 from teuthology.packaging import install_package
 from teuthology.packaging import remove_package
 
@@ -193,7 +192,6 @@ def run_keystone(ctx, config):
 
         # start the public endpoint
         client_public_with_id = 'keystone.public' + '.' + client_id
-        client_public_with_cluster = cluster_name + '.' + client_public_with_id
 
         public_host, public_port = ctx.keystone.public_endpoints[client]
         run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public',
index 2647eba761c87f048e880d0eec1b2b7e70dbc6c2..29e2c351346195a7e528daad89c1c465fd4f96bf 100644 (file)
@@ -1,4 +1,5 @@
-
+# FIXME: this file has many undefined vars which are accessed!
+# flake8: noqa
 import logging
 import contextlib
 import time
index 6b89ca508fbf883ca32c7136db67f7da5a58452c..cd869a00e405b2f251fc88f81e4f98fd74e040c2 100644 (file)
@@ -3,9 +3,8 @@
 
 from __future__ import absolute_import
 
-import time
 
-from .helper import DashboardTestCase, JObj, JLeaf, JList
+from .helper import DashboardTestCase
 
 
 class GaneshaTest(DashboardTestCase):
index 7684a95565af7cfe480e1cbca4b90f61904f1d82..44d25a7af80fad99d4e6a15ea720ea76ebb75c50 100644 (file)
@@ -101,9 +101,9 @@ class MgrTestCase(CephTestCase):
         assert cls.mgr_cluster is not None
 
         if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
-            raise case.SkipTest("Only have {0} manager daemons, "
-                                "{1} are required".format(
-                len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
+            cls.skipTest(
+                "Only have {0} manager daemons, {1} are required".format(
+                    len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
 
         cls.setup_mgrs()
 
index 86f72678926b1bfd465eee700df8cf3b7698f768..1516d44cb45377bdb472a80bf4a78dd0f9157321 100644 (file)
@@ -1,7 +1,6 @@
 import errno
 import json
 import logging
-from tempfile import NamedTemporaryFile
 
 from teuthology.exceptions import CommandFailedError
 
index 76a31dd332c6a2eb57fe2ea1b6a268824d8d2c45..6fce66365c43914e0594a0d5dbbb46faab292393 100644 (file)
@@ -1,7 +1,4 @@
-import json
 import logging
-from tempfile import NamedTemporaryFile
-from teuthology.exceptions import CommandFailedError
 from mgr_test_case import MgrTestCase
 
 log = logging.getLogger(__name__)
index 5c4088c7369cde19863a947440c87bb149a1e872..f7862cb1354d7b9f9f176d1caf81ca796b9cacd1 100644 (file)
@@ -2,11 +2,8 @@
 Handle clock skews in monitors.
 """
 import logging
-import contextlib
 import ceph_manager
 import time
-import gevent
-from StringIO import StringIO
 from teuthology import misc as teuthology
 
 log = logging.getLogger(__name__)
index 95018150da93b38a2afb74d9425bb1337a8f4b60..4fa08bbc0a0b8683c96ca0b14b558b1075713c8a 100644 (file)
@@ -6,10 +6,7 @@ Reference:https://wiki.linuxfoundation.org/networking/netem.
 
 import logging
 import contextlib
-from teuthology import misc as teuthology
 from cStringIO import StringIO
-from teuthology.orchestra import run
-from teuthology import contextutil
 from paramiko import SSHException
 import socket
 import time
@@ -168,7 +165,7 @@ class Toggle:
             try:
                 self.packet_drop()
                 log.info('link down')
-            except SSHException as e:
+            except SSHException:
                 log.debug('Failed to run command')
 
             self.stop_event.wait(timeout=self.interval)
@@ -176,7 +173,7 @@ class Toggle:
             try:
                 delete_dev(self.remote, self.interface)
                 log.info('link up')
-            except SSHException as e:
+            except SSHException:
                 log.debug('Failed to run command')
 
     def begin(self, gname):
index 03ea218f5690adba4c1e2fa3093b908ddd551cc0..739959e2fbdcb0fcca2e440a80c27f4e5b383804 100644 (file)
@@ -76,7 +76,6 @@ def test_create_from_peer(ctx, config):
     4. delete a pool, verify pgs go active.
     """
     pg_num = config.get('pg_num', 1)
-    pool_size = config.get('pool_size', 2)
     from_primary = config.get('from_primary', True)
 
     manager = ctx.managers['ceph']
@@ -121,7 +120,6 @@ def test_create_from_peer(ctx, config):
 def task(ctx, config):
     assert isinstance(config, dict), \
         'osd_max_pg_per_osd task only accepts a dict for config'
-    manager = ctx.managers['ceph']
     if config.get('test_create_from_mon', True):
         test_create_from_mon(ctx, config)
     else:
index c4f1ab62d85d2001b044fb17eaf83560ed00ab3e..6de1bbacf5933f729aec6c5d0140a3bb2f0c6b27 100644 (file)
@@ -79,7 +79,7 @@ def task(ctx, config):
 
         concurrency = config.get('concurrency', 16)
         osize = config.get('objectsize', 65536)
-        if osize is 0:
+        if osize == 0:
             objectsize = []
         else:
             objectsize = ['-O', str(osize)]
@@ -134,5 +134,5 @@ def task(ctx, config):
         log.info('joining radosbench (timing out after %ss)', timeout)
         run.wait(radosbench.itervalues(), timeout=timeout)
 
-        if pool is not 'data' and create_pool:
+        if pool != 'data' and create_pool:
             manager.remove_pool(pool)
index 81906bea0a2b7d73f20a36ba0f1a0bb381ed7992..a89fcaa9db712adffb3b9f2a0604f67cd08b8cda 100644 (file)
@@ -10,7 +10,6 @@ Rgw admin testing against a running instance
 #      python qa/tasks/radosgw_admin.py [USER] HOSTNAME
 #
 
-import copy
 import json
 import logging
 import time
@@ -24,11 +23,9 @@ from cStringIO import StringIO
 import boto.exception
 import boto.s3.connection
 import boto.s3.acl
-from boto.utils import RequestHook
 
 import httplib2
 
-import util.rgw as rgw_utils
 
 from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
 
@@ -288,7 +285,6 @@ def task(ctx, config):
     display_name1='Foo'
     display_name2='Fud'
     email='foo@foo.com'
-    email2='bar@bar.com'
     access_key='9te6NH5mcdcq0Tc5i8i1'
     secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
     access_key2='p5YnriCv1nAtykxBrupQ'
@@ -920,8 +916,6 @@ def task(ctx, config):
     # TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds'
     (err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True)
 
-import sys
-from tasks.radosgw_admin import task
 from teuthology.config import config
 from teuthology.orchestra import cluster, remote
 import argparse;
index 12d3ac046cfc8570c5687da239273a7bd2dbd2ed..1edf05fd3736ace9788bc292923e055d7ad7131c 100644 (file)
@@ -7,9 +7,8 @@ To extract the inventory (in csv format) use the command:
    grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
 
 """
-from cStringIO import StringIO
 import logging
-import json
+
 
 import boto.exception
 import boto.s3.connection
index 5b6c7939e31ccb5d335097933dfb3631f7bc9c43..bb72267e5e4ca37e4fc79c6fcaf96d34a5a9f783 100644 (file)
@@ -10,8 +10,6 @@ import os
 import random
 import string
 
-import util.rgw as rgw_utils
-
 from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.config import config as teuth_config
index 12e50d98b05b3852d51b5aa9fb5adf4bd9e6c3cb..396d8fed2a21a4f539d8dabd473cbc23027c7758 100644 (file)
@@ -4,7 +4,7 @@ Run fsx on an rbd image
 import contextlib
 import logging
 
-from teuthology.orchestra import run
+from teuthology.exceptions import ConfigError
 from teuthology.parallel import parallel
 from teuthology import misc as teuthology
 
index 081b353d92bdc4fc63268eb76232af925228d7da..67e1c332c648b483ab09ed085d58d40307c80904 100644 (file)
@@ -13,9 +13,7 @@ from gevent import sleep
 from gevent.greenlet import Greenlet
 from gevent.event import Event
 
-from teuthology import misc
 from teuthology.exceptions import CommandFailedError
-from teuthology.task import Task
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
index 73fedb966ec489777d96fd8e4799bb3714d46a71..1059fda712539978c815540e4ffc0068de27839b 100644 (file)
@@ -233,7 +233,7 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     (remote,) = ctx.\
         cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
index 4502e5e6994a2cba4ddc9c714da04eea7fa417a3..44478fa209b80a3def12ce7d96c471cda14b92b6 100644 (file)
@@ -3,11 +3,7 @@ rgw routines
 """
 import argparse
 import contextlib
-import json
 import logging
-import os
-import errno
-import util.rgw as rgw_utils
 
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
@@ -15,9 +11,9 @@ from teuthology import contextutil
 from teuthology.exceptions import ConfigError
 from util import get_remote_for_role
 from util.rgw import rgwadmin, wait_for_radosgw
-from util.rados import (rados, create_ec_pool,
-                                        create_replicated_pool,
-                                        create_cache_pool)
+from util.rados import (create_ec_pool,
+                        create_replicated_pool,
+                        create_cache_pool)
 
 log = logging.getLogger(__name__)
 
index 9dea39312dee37d229aa059781db42d119c9a829..08d52891cdabf914fde2a7ac9877c5c6d38294e0 100644 (file)
@@ -2,7 +2,6 @@
 rgw multisite configuration routines
 """
 import argparse
-import contextlib
 import logging
 import random
 import string
@@ -223,7 +222,7 @@ class Gateway(multisite.Gateway):
             # insert zone args before the first |
             pipe = args.index(run.Raw('|'))
             args = args[0:pipe] + zone.zone_args() + args[pipe:]
-        except ValueError, e:
+        except ValueError:
             args += zone.zone_args()
         self.daemon.command_kwargs['args'] = args
 
index dade6e47483e13bfb9830a02402451469761ac60..dee6bfaa303966cd9569fe660433b6e81b6362d8 100644 (file)
@@ -2,7 +2,6 @@
 rgw multisite testing
 """
 import logging
-import sys
 import nose.core
 import nose.config
 
index 143abc34c1f607dcdd5c4eea0f80478291577907..b0c4ede60028bb71d959de62af21fe3a2b9dd8a6 100644 (file)
@@ -1,6 +1,5 @@
 import contextlib
 import logging
-import time
 from teuthology import misc
 from teuthology.orchestra import run
 
index 96d586d6a65d3f4383cf7e16f3e8b20a0c8d88cf..bfd032c7572d0b3b64cbeb3bc56a53bd0a07384f 100644 (file)
@@ -13,7 +13,6 @@ from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 
 log = logging.getLogger(__name__)
 
index 6d4a38ad0baec3d71e2e5af588ce3de032f005f4..a8f1d82c32ad1b9246dc171e4e864bc094dfc5dc 100644 (file)
@@ -6,7 +6,7 @@ import logging
 
 from teuthology import misc as teuthology
 from teuthology import contextutil
-from teuthology.config import config as teuth_config
+from teuthology.exceptions import ConfigError
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
@@ -238,7 +238,6 @@ def task(ctx, config):
         config = all_clients
     if isinstance(config, list):
         config = dict.fromkeys(config)
-    clients = config.keys()
 
     overrides = ctx.config.get('overrides', {})
     # merge each client section, not the top level.
index 46b4f565dc0637ff973525fb097cbe2fa64117f1..81d712f44b193bba811322e4c301a171c23bfd9a 100644 (file)
@@ -3,7 +3,6 @@ import contextlib
 import logging
 
 from teuthology import misc as teuthology
-from teuthology import contextutil
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
index d3abf1ced3b971ea997598422492e0d3ac6b967e..d1ea39d1cd05c203252e0a3fcc166cdba8323584 100644 (file)
@@ -1,14 +1,8 @@
 from cStringIO import StringIO
 import logging
 import json
-import requests
 import time
 
-from requests.packages.urllib3 import PoolManager
-from requests.packages.urllib3.util import Retry
-from urlparse import urlparse
-
-from teuthology.orchestra.connection import split_user
 from teuthology import misc as teuthology
 
 log = logging.getLogger(__name__)
index d2be17ee831c1e0990fa7b8521de26ac6644fbe7..70780dea451e8d59f76703c326d177f9cf5e3fb7 100644 (file)
@@ -41,7 +41,6 @@ import shutil
 import re
 import os
 import time
-import json
 import sys
 import errno
 from unittest import suite, loader
@@ -833,7 +832,7 @@ def scan_tests(modules):
     max_required_mgr = 0
     require_memstore = False
 
-    for suite, case in enumerate_methods(overall_suite):
+    for suite_, case in enumerate_methods(overall_suite):
         max_required_mds = max(max_required_mds,
                                getattr(case, "MDSS_REQUIRED", 0))
         max_required_clients = max(max_required_clients,
index d3e0f8e652e9658c88c4e891ed77ef019cc33402..1d5bb6506217a7ccd3b4dc3dca57cb6530e77ee1 100755 (executable)
@@ -2,8 +2,6 @@
 
 import argparse
 import os
-import sys
-import time
 
 def main():
     parser = argparse.ArgumentParser()
index cca170ac4edce064ed35171a986c6b085a54054b..e83bcad9e7e6054d8fb6f81fa421baa0657f2f95 100644 (file)
@@ -1,9 +1,9 @@
 #!/usr/bin/python
 
-import json
+from __future__ import print_function
+
 import subprocess
 import shlex
-from StringIO import StringIO
 import errno
 import sys
 import os
@@ -11,8 +11,7 @@ import io
 import re
 
 
-import rados
-from ceph_argparse import *
+from ceph_argparse import * # noqa
 
 keyring_base = '/tmp/cephtest-caps.keyring'
 
index 2fa67a23f385215f85282340a3cc15d1180e8721..07fe8845f4e63fba2788f48c6d200bc70e96d3b3 100755 (executable)
@@ -9,13 +9,9 @@ import time
 import sys
 
 if sys.version_info[0] == 2:
-    from cStringIO import StringIO
-
     range = xrange
 
 elif sys.version_info[0] == 3:
-    from io import StringIO
-
     range = range
 
 import rados as rados
@@ -47,8 +43,6 @@ def set_mds_config_param(ceph, param):
         if r != 0:
             raise Exception
 
-import ConfigParser
-import contextlib
 
 class _TrimIndentFile(object):
     def __init__(self, fp):
@@ -150,10 +144,10 @@ def verify(rados_ioctx, ino, values, pool):
 
     bt = decode(binbt)
 
+    ind = 0
     if bt['ino'] != ino:
         raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
                     bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
-    ind = 0
     for (n, i) in values:
         if bt['ancestors'][ind]['dirino'] != i:
             raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(