]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: Enable flake8 tox and fix failures 32129/head
authorThomas Bechtold <tbechtold@suse.com>
Mon, 9 Dec 2019 15:17:23 +0000 (16:17 +0100)
committerThomas Bechtold <tbechtold@suse.com>
Thu, 12 Dec 2019 09:21:01 +0000 (10:21 +0100)
There were a couple of problems found by flake8 in the qa/
directory (most of them fixed now). Enabling flake8 during the usual
check runs hopefully avoids adding new issues in the future.

Signed-off-by: Thomas Bechtold <tbechtold@suse.com>
66 files changed:
CMakeLists.txt
qa/CMakeLists.txt [new file with mode: 0644]
qa/standalone/special/ceph_objectstore_tool.py
qa/tasks/barbican.py
qa/tasks/cbt.py
qa/tasks/ceph.py
qa/tasks/ceph_fuse.py
qa/tasks/ceph_manager.py
qa/tasks/cephadm.py
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/fuse_mount.py
qa/tasks/cephfs/test_cephfs_shell.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_sessionmap.py
qa/tasks/cephfs/test_snapshots.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volume_client.py
qa/tasks/cephfs/test_volumes.py
qa/tasks/cephfs/xfstests_dev.py
qa/tasks/check_counter.py
qa/tasks/create_verify_lfn_objects.py
qa/tasks/daemonwatchdog.py
qa/tasks/divergent_priors.py
qa/tasks/divergent_priors2.py
qa/tasks/dump_stuck.py
qa/tasks/exec_on_cleanup.py
qa/tasks/fs.py
qa/tasks/keystone.py
qa/tasks/mds_creation_failure.py
qa/tasks/mgr/dashboard/test_cephfs.py
qa/tasks/mgr/dashboard/test_ganesha.py
qa/tasks/mgr/dashboard/test_orchestrator.py
qa/tasks/mgr/dashboard/test_rbd.py
qa/tasks/mgr/mgr_test_case.py
qa/tasks/mgr/test_cephadm_orchestrator.py
qa/tasks/mgr/test_orchestrator_cli.py
qa/tasks/mon_clock_skew_check.py
qa/tasks/netem.py
qa/tasks/osd_max_pg_per_osd.py
qa/tasks/radosbench.py
qa/tasks/radosgw_admin.py
qa/tasks/radosgw_admin_rest.py
qa/tasks/ragweed.py
qa/tasks/rbd_fsx.py
qa/tasks/rbd_mirror_thrash.py
qa/tasks/reg11184.py
qa/tasks/rgw.py
qa/tasks/rgw_multisite.py
qa/tasks/rgw_multisite_tests.py
qa/tasks/s3a_hadoop.py
qa/tasks/s3tests.py
qa/tasks/s3tests_java.py
qa/tasks/swift.py
qa/tasks/tempest.py
qa/tasks/tox.py
qa/tasks/util/rgw.py
qa/tasks/vstart_runner.py
qa/workunits/fs/multiclient_sync_read_eof.py
qa/workunits/mon/caps.py
qa/workunits/restart/test-backtraces.py

index 2fe35f0091a712371387661ec5b23757ecf7ee7c..bab40cc4965d0dbd052c6e5e5471ef5315d06d55 100644 (file)
@@ -628,6 +628,8 @@ add_custom_target(check
 
 add_subdirectory(src)
 
+add_subdirectory(qa)
+
 add_subdirectory(doc)
 if(WITH_MANPAGE)
   add_subdirectory(man)
diff --git a/qa/CMakeLists.txt b/qa/CMakeLists.txt
new file mode 100644 (file)
index 0000000..06de662
--- /dev/null
@@ -0,0 +1,9 @@
+set(CEPH_BUILD_VIRTUALENV $ENV{TMPDIR})
+if(NOT CEPH_BUILD_VIRTUALENV)
+  set(CEPH_BUILD_VIRTUALENV ${CMAKE_BINARY_DIR})
+endif()
+
+if(WITH_TESTS)
+  include(AddCephTest)
+  add_tox_test(qa flake8)
+endif()
index 952eda3b3ae9527acf1fd6b46c3d8060a52b0d4b..21cf5e3bb99935d6c990eb8e91faa0c7602bdb79 100755 (executable)
@@ -46,7 +46,7 @@ if sys.version_info[0] >= 3:
     def decode(s):
         return s.decode('utf-8')
 
-    def check_output(*args, **kwargs):
+    def check_output(*args, **kwargs): # noqa
         return decode(subprocess.check_output(*args, **kwargs))
 else:
     def decode(s):
@@ -337,7 +337,7 @@ def check_entry_transactions(entry, enum):
 
 
 def check_transaction_ops(ops, enum, tnum):
-    if len(ops) is 0:
+    if len(ops) == 0:
         logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
     errors = 0
     for onum in range(len(ops)):
@@ -376,7 +376,7 @@ def test_dump_journal(CFSD_PREFIX, osds):
         os.unlink(TMPFILE)
 
         journal_errors = check_journal(jsondict)
-        if journal_errors is not 0:
+        if journal_errors != 0:
             logging.error(jsondict)
         ERRORS += journal_errors
 
@@ -520,7 +520,7 @@ def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path):
     for line in output.strip().split('\n'):
         print(line)
         linev = re.split('\s+', line)
-        if linev[0] is '':
+        if linev[0] == '':
             linev.pop(0)
         print('linev %s' % linev)
         weights.append(float(linev[2]))
index 37c8f34aa43d7968f33f5db6c2a8b5fc4ca2a1e6..0ce4aefb7d0b04809d20c248d58dc38f30d13632 100644 (file)
@@ -4,14 +4,12 @@ Deploy and configure Barbican for Teuthology
 import argparse
 import contextlib
 import logging
-import string
 import httplib
 from urlparse import urlparse
 import json
 
 from teuthology import misc as teuthology
 from teuthology import contextutil
-from teuthology import safepath
 from teuthology.orchestra import run
 from teuthology.exceptions import ConfigError
 
@@ -201,7 +199,6 @@ def run_barbican(ctx, config):
 
         # start the public endpoint
         client_public_with_id = 'barbican.public' + '.' + client_id
-        client_public_with_cluster = cluster_name + '.' + client_public_with_id
 
         run_cmd = ['cd', get_barbican_dir(ctx), run.Raw('&&'),
                    '.', '.barbicanenv/bin/activate', run.Raw('&&'),
@@ -248,8 +245,6 @@ def create_secrets(ctx, config):
 
     keystone_role = cconfig.get('use-keystone-role', None)
     keystone_host, keystone_port = ctx.keystone.public_endpoints[keystone_role]
-    keystone_url = 'http://{host}:{port}/v2.0'.format(host=keystone_host,
-                                                      port=keystone_port)
     barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
     barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
                                                  port=barbican_port)
@@ -482,7 +477,6 @@ def task(ctx, config):
         config = all_clients
     if isinstance(config, list):
         config = dict.fromkeys(config)
-    clients = config.keys()
 
     overrides = ctx.config.get('overrides', {})
     # merge each client section, not the top level.
index 606a0c4db3d421006aca3d7525460768ce352470..c529dd6eef952680a8a8275999e6de5aea3bd419 100644 (file)
@@ -3,7 +3,6 @@ import os
 import yaml
 
 from teuthology import misc
-from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
 from teuthology.task import Task
 
index 2baca41c4fa749d022e1d7f69248809550655bae..364c6aceac44f6c93e20f4061ba579f74462f6db 100644 (file)
@@ -85,18 +85,18 @@ def ceph_crash(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.shortname)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 try:
                     teuthology.pull_directory(remote, '/var/lib/ceph/crash',
                                               os.path.join(sub, 'crash'))
-                except ReadError as e:
+                except ReadError:
                     pass
 
 
@@ -270,13 +270,13 @@ def ceph_log(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.shortname)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 teuthology.pull_directory(remote, '/var/log/ceph',
                                           os.path.join(sub, 'log'))
@@ -397,8 +397,6 @@ def create_rbd_pool(ctx, config):
 @contextlib.contextmanager
 def cephfs_setup(ctx, config):
     cluster_name = config['cluster']
-    testdir = teuthology.get_testdir(ctx)
-    coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
 
     first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
     (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
index 08254fed3c3d0001bc669688a8ea911c72589091..03f5a56e4a7015f819e8bd0ad076052e35c547fb 100644 (file)
@@ -7,7 +7,6 @@ import logging
 
 from teuthology import misc as teuthology
 from cephfs.fuse_mount import FuseMount
-from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
 
index 6d8bebeb07d0fb46fdfd1eb555c59595bc22e179..d2d7f43632fd40dfcd5690005db9e189bf455df4 100644 (file)
@@ -679,7 +679,7 @@ class OSDThrasher(Thrasher):
         Decrease the size of the pool
         """
         pool = self.ceph_manager.get_pool()
-        orig_pg_num = self.ceph_manager.get_pool_pg_num(pool)
+        _ = self.ceph_manager.get_pool_pg_num(pool)
         self.log("Shrinking pool %s" % (pool,))
         if self.ceph_manager.contract_pool(
                 pool,
@@ -906,7 +906,7 @@ class OSDThrasher(Thrasher):
         Random action selector.
         """
         chance_down = self.config.get('chance_down', 0.4)
-        chance_test_min_size = self.config.get('chance_test_min_size', 0)
+        _ = self.config.get('chance_test_min_size', 0)
         chance_test_backfill_full = \
             self.config.get('chance_test_backfill_full', 0)
         if isinstance(chance_down, int):
@@ -1653,7 +1653,7 @@ class CephManager:
         while True:
             proc = self.admin_socket(service_type, service_id,
                                      args, check_status=False, stdout=stdout)
-            if proc.exitstatus is 0:
+            if proc.exitstatus == 0:
                 return proc
             else:
                 tries += 1
index fb2d24d22f4daa9de57f977005e1bf58e925402b..911068fe5f4626bf0091e9397a890e911ab593d1 100644 (file)
@@ -6,27 +6,17 @@ from cStringIO import StringIO
 import argparse
 import configobj
 import contextlib
-import errno
 import logging
 import os
 import json
-import time
-import gevent
-import re
-import socket
 import uuid
 
-from paramiko import SSHException
-from ceph_manager import CephManager, write_conf
+from ceph_manager import CephManager
 from tarfile import ReadError
-from tasks.cephfs.filesystem import Filesystem
 from teuthology import misc as teuthology
 from teuthology import contextutil
-from teuthology import exceptions
 from teuthology.orchestra import run
-import ceph_client as cclient
 from teuthology.orchestra.daemon import DaemonGroup
-from tasks.daemonwatchdog import DaemonWatchdog
 from teuthology.config import config as teuth_config
 
 # these items we use from ceph.py should probably eventually move elsewhere
@@ -93,7 +83,6 @@ def normalize_hostnames(ctx):
 @contextlib.contextmanager
 def download_cephadm(ctx, config, ref):
     cluster_name = config['cluster']
-    testdir = teuthology.get_testdir(ctx)
 
     if config.get('cephadm_mode') != 'cephadm-package':
         ref = config.get('cephadm_branch', ref)
@@ -179,13 +168,13 @@ def ceph_log(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.name)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 teuthology.pull_directory(remote, '/var/log/ceph/' + fsid,
                                           os.path.join(sub, 'log'))
@@ -207,19 +196,19 @@ def ceph_crash(ctx, config):
             path = os.path.join(ctx.archive, 'remote')
             try:
                 os.makedirs(path)
-            except OSError as e:
+            except OSError:
                 pass
             for remote in ctx.cluster.remotes.keys():
                 sub = os.path.join(path, remote.name)
                 try:
                     os.makedirs(sub)
-                except OSError as e:
+                except OSError:
                     pass
                 try:
                     teuthology.pull_directory(remote,
                                               '/var/lib/ceph/%s/crash' % fsid,
                                               os.path.join(sub, 'crash'))
-                except ReadError as e:
+                except ReadError:
                     pass
 
 @contextlib.contextmanager
@@ -387,7 +376,6 @@ def ceph_mons(ctx, config):
     """
     cluster_name = config['cluster']
     fsid = ctx.ceph[cluster_name].fsid
-    testdir = teuthology.get_testdir(ctx)
     num_mons = 1
 
     try:
@@ -457,7 +445,6 @@ def ceph_mgrs(ctx, config):
     """
     cluster_name = config['cluster']
     fsid = ctx.ceph[cluster_name].fsid
-    testdir = teuthology.get_testdir(ctx)
 
     try:
         nodes = []
@@ -549,7 +536,6 @@ def ceph_mdss(ctx, config):
     """
     cluster_name = config['cluster']
     fsid = ctx.ceph[cluster_name].fsid
-    testdir = teuthology.get_testdir(ctx)
 
     nodes = []
     daemons = {}
@@ -628,7 +614,6 @@ def shell(ctx, config):
     """
     Execute (shell) commands
     """
-    testdir = teuthology.get_testdir(ctx)
     cluster_name = config.get('cluster', 'ceph')
 
     if 'all' in config and len(config) == 1:
@@ -724,7 +709,7 @@ def restart(ctx, config):
             healthy(ctx=ctx, config=dict(cluster=cluster))
     if config.get('wait-for-osds-up', False):
         for cluster in clusters:
-            wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+            ctx.managers[cluster].wait_for_all_osds_up()
     yield
 
 @contextlib.contextmanager
index be0fc9197a7b098425ffa31f533046e5e1abbb90..5c778231f0b0ba28624995b10e9bced8d25ce312 100644 (file)
@@ -1191,7 +1191,7 @@ class Filesystem(MDSCluster):
     def dirfrag_exists(self, ino, frag):
         try:
             self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)])
-        except CommandFailedError as e:
+        except CommandFailedError:
             return False
         else:
             return True
index 3392b1762c5b5362c30669f35498a469e426a75f..c7eb0ff7ef2974331ac98f0a519c509232735d02 100644 (file)
@@ -1,4 +1,3 @@
-
 from StringIO import StringIO
 import json
 import time
@@ -10,7 +9,6 @@ from teuthology.contextutil import MaxWhileTries
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError
 from .mount import CephFSMount
-from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
 
@@ -166,7 +164,7 @@ class FuseMount(CephFSMount):
         try:
             self.inst = status['inst_str']
             self.addr = status['addr_str']
-        except KeyError as e:
+        except KeyError:
             sessions = self.fs.rank_asok(['session', 'ls'])
             for s in sessions:
                 if s['id'] == self.id:
index 527301bbfad775a60515d4c84de7299677d56d3b..5835e3f1ac8911d809b1eb3efee9fe1511a5674a 100644 (file)
@@ -12,8 +12,6 @@ from re import search as re_search
 from time import sleep
 from StringIO import StringIO
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
-from tasks.cephfs.fuse_mount import FuseMount
-from teuthology.exceptions import CommandFailedError
 from teuthology.misc import sudo_write_file
 
 log = logging.getLogger(__name__)
@@ -608,8 +606,8 @@ class TestDU(TestCephFSShell):
             path_prefix='')
 
         args = ['du', '/']
-        for path in path_to_files:
-            args.append(path)
+        for p in path_to_files:
+            args.append(p)
         du_output = self.get_cephfs_shell_cmd_output(args)
 
         for expected_output in expected_patterns_in_output:
index 2bad19d8afe844e5f95e87c1804a6259069fd7ce..fdee8fc7cf2e621201676d5360ad360936268d30 100644 (file)
@@ -591,7 +591,7 @@ class TestClientRecovery(CephFSTestCase):
         SESSION_AUTOCLOSE = 50
         time_at_beg = time.time()
         mount_a_gid = self.mount_a.get_global_id()
-        mount_a_pid = self.mount_a.client_pid
+        _ = self.mount_a.client_pid
         self.fs.set_var('session_timeout', SESSION_TIMEOUT)
         self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE)
         self.assert_session_count(2, self.fs.mds_asok(['session', 'ls']))
index 3ffdb553cf03d2980e859806cd6c80e0dc94338c..7d2a3425a894b74933f3595748d7e9ff34613ae1 100644 (file)
@@ -150,7 +150,6 @@ class TestExports(CephFSTestCase):
         status = self.fs.wait_for_daemons()
 
         rank1 = self.fs.get_rank(rank=1, status=status)
-        name1 = 'mds.'+rank1['name']
 
         # Create a directory that is pre-exported to rank 1
         self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
index 786980ba08763a559d1eae4faa161257c4d10064..f3ce939328de6d3e196e4023934d7afda98fa140 100644 (file)
@@ -1,12 +1,10 @@
 import time
 import signal
-import json
 import logging
 from random import randint
 
 from cephfs_test_case import CephFSTestCase
 from teuthology.exceptions import CommandFailedError
-from teuthology import misc as teuthology
 from tasks.cephfs.fuse_mount import FuseMount
 
 log = logging.getLogger(__name__)
@@ -24,7 +22,7 @@ class TestClusterResize(CephFSTestCase):
         log.info("status = {0}".format(status))
 
         original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
-        original_standbys = set([info['gid'] for info in status.get_standbys()])
+        _ = set([info['gid'] for info in status.get_standbys()])
 
         oldmax = self.fs.get_var('max_mds')
         self.assertTrue(n > oldmax)
@@ -44,7 +42,7 @@ class TestClusterResize(CephFSTestCase):
         log.info("status = {0}".format(status))
 
         original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
-        original_standbys = set([info['gid'] for info in status.get_standbys()])
+        _ = set([info['gid'] for info in status.get_standbys()])
 
         oldmax = self.fs.get_var('max_mds')
         self.assertTrue(n < oldmax)
@@ -360,7 +358,7 @@ class TestStandbyReplay(CephFSTestCase):
 
     def _confirm_no_replay(self):
         status = self.fs.status()
-        standby_count = len(list(status.get_standbys()))
+        _ = len(list(status.get_standbys()))
         self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
         return status
 
index cca9bb617d42186c1cd535dc97a3991bc23b2b2c..a22169071d94b931594cae6a54dbd91821602da3 100644 (file)
@@ -6,7 +6,6 @@ import errno
 import time
 import json
 import logging
-import time
 
 log = logging.getLogger(__name__)
 
@@ -77,7 +76,7 @@ class TestMisc(CephFSTestCase):
         def get_pool_df(fs, name):
             try:
                 return fs.get_pool_df(name)['objects'] > 0
-            except RuntimeError as e:
+            except RuntimeError:
                 return False
 
         self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
@@ -171,8 +170,7 @@ class TestMisc(CephFSTestCase):
         out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
                                                   pool_name, 'size',
                                                   '-f', 'json-pretty')
-        j = json.loads(out)
-        pool_size = int(j['size'])
+        _ = json.loads(out)
 
         proc = self.mount_a.run_shell(['df', '.'])
         output = proc.stdout.getvalue()
index 1684d170c8e310b7f59be4a7bfb064f69bb81728..36b4e58ec8c17f985e5824575abe5fb04e4843b7 100644 (file)
@@ -1,17 +1,13 @@
-
 """
 Test our tools for recovering metadata from the data pool into an alternate pool
 """
-import json
 
 import logging
-import os
-from textwrap import dedent
 import traceback
-from collections import namedtuple, defaultdict
+from collections import namedtuple
 
 from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
 log = logging.getLogger(__name__)
 
index d96f5691ba21e13093e673f8bc54c4e35fafde51..e4f0cb9beb32b9c02e0a7f74bf289452de2a974d 100644 (file)
@@ -2,12 +2,9 @@
 Test CephFS scrub (distinct from OSD scrub) functionality
 """
 import logging
-import os
-import traceback
 from collections import namedtuple
 
-from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
 
 log = logging.getLogger(__name__)
 
index d6947805347dac92a7f5fa8888e5dc3a23596089..3b67c36b0224c2cb73ab60700a8f5bd2a615e636 100644 (file)
@@ -43,7 +43,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -66,7 +66,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -94,7 +94,7 @@ class TestScrubControls(CephFSTestCase):
         log.info("client_path: {0}".format(client_path))
 
         log.info("Cloning repo into place")
-        repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+        _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
 
         out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
         self.assertNotEqual(out_json, None)
@@ -307,7 +307,7 @@ class TestScrubChecks(CephFSTestCase):
 
         success, errstring = validator(jout, 0)
         if not success:
-            raise AsokCommandFailedError(command, rout, jout, errstring)
+            raise AsokCommandFailedError(command, 0, jout, errstring)
         return jout
 
     def asok_command(self, mds_rank, command, validator):
index db3d5dfe6cd0098c6d8a626c17b125c151d5bacc..8bacffdfbfb000f579ad94648850d906fedcf636 100644 (file)
@@ -1,4 +1,3 @@
-from StringIO import StringIO
 import time
 import json
 import logging
index ae5a58ca898f2ec2f395a1933e0070a68d45461c..067c7b1fb92d148dd23d1f37f783b69a72e9cfed 100644 (file)
@@ -1,8 +1,6 @@
 import sys
 import logging
 import signal
-import time
-import errno
 from textwrap import dedent
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -527,7 +525,7 @@ class TestSnapshots(CephFSTestCase):
         self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
         try:
             self.create_snap_dir(sname)
-        except CommandFailedError as e:
+        except CommandFailedError:
             # after reducing limit we expect the new snapshot creation to fail
             pass
         self.delete_dir_and_snaps("accounts", new_limit + 1)
index 56964880e50eabac21e49f4099eb431994295854..a294cc46087813dd347028d6c79e3642d4389fec 100644 (file)
@@ -4,7 +4,6 @@ import logging
 from textwrap import dedent
 import datetime
 import gevent
-import datetime
 
 from teuthology.orchestra.run import CommandFailedError, Raw
 from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
@@ -138,7 +137,7 @@ class TestStrays(CephFSTestCase):
             size_unit = 1024  # small, numerous files
             file_multiplier = 200
         else:
-            raise NotImplemented(throttle_type)
+            raise NotImplementedError(throttle_type)
 
         # Pick up config changes
         self.fs.mds_fail_restart()
@@ -225,7 +224,7 @@ class TestStrays(CephFSTestCase):
                             num_strays_purging, mds_max_purge_files
                         ))
                 else:
-                    raise NotImplemented(throttle_type)
+                    raise NotImplementedError(throttle_type)
 
                 log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format(
                     num_strays_purging, num_strays,
index d33223b2bc962806e2272dcae17412fbf120761f..0c59225dfc76984696c83b50cc1c1f57c9f85cb6 100644 (file)
@@ -1,6 +1,5 @@
 import json
 import logging
-import time
 import os
 from textwrap import dedent
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
@@ -1078,7 +1077,7 @@ vc.disconnect()
         volume_prefix = "/myprefix"
         group_id = "grpid"
         volume_id = "volid"
-        mount_path = self._volume_client_python(vc_mount, dedent("""
+        self._volume_client_python(vc_mount, dedent("""
             vp = VolumePath("{group_id}", "{volume_id}")
             create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
             print(create_result['mount_path'])
index a0fece8d5e42c06899c3d261a00044ad62722cb7..19ea2418c4f2be0e5fe9d1e14e3dca47226374b8 100644 (file)
@@ -291,7 +291,7 @@ class TestVolumes(CephFSTestCase):
         nsize = usedsize/2
         try:
             self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
-        except CommandFailedError as ce:
+        except CommandFailedError:
             raise RuntimeError("expected the 'fs subvolume resize' command to succeed")
 
         # verify the quota
index bf15b02a3a0c05de377cead1bfa5c73f6e0aac1c..d8520d3bf215b3ea853a1b4d473b3c7f01ef9941 100644 (file)
@@ -118,7 +118,7 @@ class XFSTestsDev(CephFSTestCase):
         else:
             raise RuntimeError('expected a yum based or a apt based system')
 
-        proc = self.mount_a.client_remote.run(args=args, omit_sudo=False)
+        self.mount_a.client_remote.run(args=args, omit_sudo=False)
 
     def create_reqd_users(self):
         self.mount_a.client_remote.run(args=['sudo', 'useradd', 'fsgqa'],
index b15dc6fe21cb60845845b5df8b1fc929f72bdcaa..fc877f285b6c8174204ef23f2c4b084f2f7e22f5 100644 (file)
@@ -4,7 +4,6 @@ import json
 
 from teuthology.task import Task
 from teuthology import misc
-import ceph_manager
 
 log = logging.getLogger(__name__)
 
index 01ab1a370b73e8721db029b79da190c48c8a6037..53254158128794330ba328369be80288210412f9 100644 (file)
@@ -35,7 +35,7 @@ def task(ctx, config):
         for ns in namespace:
             def object_name(i):
                 nslength = 0
-                if namespace is not '':
+                if namespace != '':
                     nslength = len(namespace)
                 numstr = str(i)
                 fillerlen = l - nslength - len(prefix) - len(numstr)
index b0212db1a00ad2b5a762f1983c85bceb072a6bc2..fcb1bffb2e3c9a82b1efdcb3d07408570d884699 100644 (file)
@@ -1,7 +1,6 @@
 import logging
 import signal
 import time
-import random
 
 from gevent import sleep
 from gevent.greenlet import Greenlet
index 7a4d1327020a87fe84def7da26568b9918976e7a..b565c774c441b60b156451a73ebe832b6be6f69d 100644 (file)
@@ -155,6 +155,6 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     log.info("success")
index fa2fae9e7dc3244dc1c6a96d305ac98813ca5c6e..12a9fd4a9afc0d1c3dc1ec33040fd5158cf39090 100644 (file)
@@ -183,7 +183,7 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     cmd = 'rm {file}'.format(file=expfile)
     exp_remote.run(args=cmd, wait=True)
index 8b6d2c7d5404b4f748f1dbfefdf6cc5dfd9edb4d..237d9127fc1487589dd1d114f4cd3cc2af8ce5ee 100644 (file)
@@ -2,7 +2,6 @@
 Dump_stuck command
 """
 import logging
-import re
 import time
 
 import ceph_manager
index 0aecf78e5b3e139c23ffb08cf26e16023ee0b4c9..a7c7ee5dae92e67ddac8e026622bc278df697262 100644 (file)
@@ -5,7 +5,6 @@ import logging
 import contextlib
 
 from teuthology import misc as teuthology
-from teuthology import contextutil
 
 log = logging.getLogger(__name__)
 
index 4286318527e7900207943e4265120e7c2ed1fb21..4b47e754bfa2460e81651685f065bb423dc0cd32 100644 (file)
@@ -2,10 +2,8 @@
 CephFS sub-tasks.
 """
 
-import contextlib
 import logging
 import re
-import time
 
 from tasks.cephfs.filesystem import Filesystem
 
index 5961165eb0d4070cf643653969e358ba7bda86a9..522f1673da2c87993c25cffe9630572b059a2f0d 100644 (file)
@@ -9,7 +9,6 @@ from cStringIO import StringIO
 from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 from teuthology.packaging import install_package
 from teuthology.packaging import remove_package
 from teuthology.exceptions import ConfigError
@@ -209,7 +208,6 @@ def run_keystone(ctx, config):
 
         # start the public endpoint
         client_public_with_id = 'keystone.public' + '.' + client_id
-        client_public_with_cluster = cluster_name + '.' + client_public_with_id
 
         public_host, public_port = ctx.keystone.public_endpoints[client]
         run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public',
index 2647eba761c87f048e880d0eec1b2b7e70dbc6c2..29e2c351346195a7e528daad89c1c465fd4f96bf 100644 (file)
@@ -1,4 +1,5 @@
-
+# FIXME: this file has many undefined vars which are accessed!
+# flake8: noqa
 import logging
 import contextlib
 import time
index bd8666ea49906be4ce4693a9a54c3a981a5a4adc..47a39d18bf6f1110409aa26b68b494c81c50b86c 100644 (file)
@@ -62,7 +62,7 @@ class CephfsTest(DashboardTestCase):
 
     def test_cephfs_evict_client_does_not_exist(self):
         fs_id = self.get_fs_id()
-        data = self._delete("/api/cephfs/{}/client/1234".format(fs_id))
+        self._delete("/api/cephfs/{}/client/1234".format(fs_id))
         self.assertStatus(404)
 
     def test_cephfs_get(self):
index 6b89ca508fbf883ca32c7136db67f7da5a58452c..cd869a00e405b2f251fc88f81e4f98fd74e040c2 100644 (file)
@@ -3,9 +3,8 @@
 
 from __future__ import absolute_import
 
-import time
 
-from .helper import DashboardTestCase, JObj, JLeaf, JList
+from .helper import DashboardTestCase
 
 
 class GaneshaTest(DashboardTestCase):
index 348bde14b720f561656476e12d9a15ddf6e9f77d..99c004711bd49942183281b0c1aa145d63f1df7b 100644 (file)
@@ -1,6 +1,5 @@
 # -*- coding: utf-8 -*-
 from __future__ import absolute_import
-import os
 import json
 
 from .helper import DashboardTestCase
index 7ac313a565008eebeb09a535e619aba1ad574e43..4623873a61db4fcb432c890ff9e742232af2b4ad 100644 (file)
@@ -764,7 +764,7 @@ class RbdTest(DashboardTestCase):
         id = self.create_image_in_trash('rbd', 'test_rbd')
         self.assertStatus(200)
 
-        img = self.get_image('rbd', None, 'test_rbd')
+        self.get_image('rbd', None, 'test_rbd')
         self.assertStatus(404)
 
         time.sleep(1)
index a1fa5515e5fbb512e6b12070976df98ccc3ad163..2830829e72506ba0c9e731c136efe7870a99c6f4 100644 (file)
@@ -97,9 +97,9 @@ class MgrTestCase(CephTestCase):
         assert cls.mgr_cluster is not None
 
         if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
-            self.skipTest("Only have {0} manager daemons, "
-                                "{1} are required".format(
-                len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
+            cls.skipTest(
+                "Only have {0} manager daemons, {1} are required".format(
+                    len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
 
         cls.setup_mgrs()
 
index 7c1bc826d8c6cc9f4909915f643c8389054d12f7..16966b2afbc20c3e670eb31d6a51ac44dbe39748 100644 (file)
@@ -1,7 +1,4 @@
-import json
 import logging
-from tempfile import NamedTemporaryFile
-from teuthology.exceptions import CommandFailedError
 from mgr_test_case import MgrTestCase
 
 log = logging.getLogger(__name__)
index e62af60ceab66ec3b2f2748dc846ed05833fa09b..0de542ff7383a585c013f7ccd260981b2455797a 100644 (file)
@@ -1,7 +1,6 @@
 import errno
 import json
 import logging
-from tempfile import NamedTemporaryFile
 from time import sleep
 
 from teuthology.exceptions import CommandFailedError
index 5c4088c7369cde19863a947440c87bb149a1e872..f7862cb1354d7b9f9f176d1caf81ca796b9cacd1 100644 (file)
@@ -2,11 +2,8 @@
 Handle clock skews in monitors.
 """
 import logging
-import contextlib
 import ceph_manager
 import time
-import gevent
-from StringIO import StringIO
 from teuthology import misc as teuthology
 
 log = logging.getLogger(__name__)
index 95018150da93b38a2afb74d9425bb1337a8f4b60..4fa08bbc0a0b8683c96ca0b14b558b1075713c8a 100644 (file)
@@ -6,10 +6,7 @@ Reference:https://wiki.linuxfoundation.org/networking/netem.
 
 import logging
 import contextlib
-from teuthology import misc as teuthology
 from cStringIO import StringIO
-from teuthology.orchestra import run
-from teuthology import contextutil
 from paramiko import SSHException
 import socket
 import time
@@ -168,7 +165,7 @@ class Toggle:
             try:
                 self.packet_drop()
                 log.info('link down')
-            except SSHException as e:
+            except SSHException:
                 log.debug('Failed to run command')
 
             self.stop_event.wait(timeout=self.interval)
@@ -176,7 +173,7 @@ class Toggle:
             try:
                 delete_dev(self.remote, self.interface)
                 log.info('link up')
-            except SSHException as e:
+            except SSHException:
                 log.debug('Failed to run command')
 
     def begin(self, gname):
index 03ea218f5690adba4c1e2fa3093b908ddd551cc0..739959e2fbdcb0fcca2e440a80c27f4e5b383804 100644 (file)
@@ -76,7 +76,6 @@ def test_create_from_peer(ctx, config):
     4. delete a pool, verify pgs go active.
     """
     pg_num = config.get('pg_num', 1)
-    pool_size = config.get('pool_size', 2)
     from_primary = config.get('from_primary', True)
 
     manager = ctx.managers['ceph']
@@ -121,7 +120,6 @@ def test_create_from_peer(ctx, config):
 def task(ctx, config):
     assert isinstance(config, dict), \
         'osd_max_pg_per_osd task only accepts a dict for config'
-    manager = ctx.managers['ceph']
     if config.get('test_create_from_mon', True):
         test_create_from_mon(ctx, config)
     else:
index 948620903222083db141524d86c20aff05ce4d6a..d73b9476314e4f9c1fe062b6950588b67ebebece 100644 (file)
@@ -81,7 +81,7 @@ def task(ctx, config):
                 pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
 
         osize = config.get('objectsize', 65536)
-        if osize is 0:
+        if osize == 0:
             objectsize = []
         else:
             objectsize = ['-O', str(osize)]
@@ -135,5 +135,5 @@ def task(ctx, config):
         log.info('joining radosbench (timing out after %ss)', timeout)
         run.wait(radosbench.itervalues(), timeout=timeout)
 
-        if pool is not 'data' and create_pool:
+        if pool != 'data' and create_pool:
             manager.remove_pool(pool)
index 6dab13f058fcf564fb5a6dbfa2c5774390837aae..e9cfc36966836e0d0e0c0be00d646c35dfcbbf70 100644 (file)
@@ -10,7 +10,6 @@ Rgw admin testing against a running instance
 #      python qa/tasks/radosgw_admin.py [USER] HOSTNAME
 #
 
-import copy
 import json
 import logging
 import time
@@ -24,11 +23,9 @@ from cStringIO import StringIO
 import boto.exception
 import boto.s3.connection
 import boto.s3.acl
-from boto.utils import RequestHook
 
 import httplib2
 
-import util.rgw as rgw_utils
 
 from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
 
@@ -291,7 +288,6 @@ def task(ctx, config):
     display_name2='Fud'
     display_name3='Bar'
     email='foo@foo.com'
-    email2='bar@bar.com'
     access_key='9te6NH5mcdcq0Tc5i8i1'
     secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
     access_key2='p5YnriCv1nAtykxBrupQ'
@@ -1052,8 +1048,6 @@ def task(ctx, config):
     # TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds'
     (err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True)
 
-import sys
-from tasks.radosgw_admin import task
 from teuthology.config import config
 from teuthology.orchestra import cluster, remote
 import argparse;
index 5d8bf18687b3515f5b0cc482a5a3e8d5a057cdfb..50f88ea85700890f025a9c5e71a7c204b11840f1 100644 (file)
@@ -7,9 +7,8 @@ To extract the inventory (in csv format) use the command:
    grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
 
 """
-from cStringIO import StringIO
 import logging
-import json
+
 
 import boto.exception
 import boto.s3.connection
index 67b16851b7bdfb1d7d08cdb71ff144795f5313d9..dffd10a0dc847c77aa716f8f2c6729bf3e9bacf7 100644 (file)
@@ -10,13 +10,10 @@ import os
 import random
 import string
 
-import util.rgw as rgw_utils
-
 from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 
 log = logging.getLogger(__name__)
 
index 12e50d98b05b3852d51b5aa9fb5adf4bd9e6c3cb..396d8fed2a21a4f539d8dabd473cbc23027c7758 100644 (file)
@@ -4,7 +4,7 @@ Run fsx on an rbd image
 import contextlib
 import logging
 
-from teuthology.orchestra import run
+from teuthology.exceptions import ConfigError
 from teuthology.parallel import parallel
 from teuthology import misc as teuthology
 
index d7fdf5607f9fdaeb6d95ca2c0008531e8ee21da4..a42d19e7083723b24b9b2778b1caa1051f7d5db5 100644 (file)
@@ -13,9 +13,7 @@ from gevent import sleep
 from gevent.greenlet import Greenlet
 from gevent.event import Event
 
-from teuthology import misc
 from teuthology.exceptions import CommandFailedError
-from teuthology.task import Task
 from teuthology.orchestra import run
 from tasks.thrasher import Thrasher
 
index 7bb304608b0b5318cc7f5acd06ca92093f374824..3a1daa0a385013eba48e42505de720ab806c1bc4 100644 (file)
@@ -232,7 +232,7 @@ def task(ctx, config):
     for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
         exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
                                        '/tmp/existing'])
-        assert exit_status is 0
+        assert exit_status == 0
 
     (remote,) = ctx.\
         cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
index d666a5e6a01a014c8303650256cb94d8afe6c6c2..aceef8894f8d3d5652a09723ff0958111274e350 100644 (file)
@@ -3,11 +3,7 @@ rgw routines
 """
 import argparse
 import contextlib
-import json
 import logging
-import os
-import errno
-import util.rgw as rgw_utils
 
 from teuthology.orchestra import run
 from teuthology import misc as teuthology
@@ -15,9 +11,9 @@ from teuthology import contextutil
 from teuthology.exceptions import ConfigError
 from util import get_remote_for_role
 from util.rgw import rgwadmin, wait_for_radosgw
-from util.rados import (rados, create_ec_pool,
-                                        create_replicated_pool,
-                                        create_cache_pool)
+from util.rados import (create_ec_pool,
+                        create_replicated_pool,
+                        create_cache_pool)
 
 log = logging.getLogger(__name__)
 
index 3c4c4da142e075f1ac66ee7a0e357ad3f9791449..5400020db751b233e173a3c8757c817063a6e824 100644 (file)
@@ -2,7 +2,6 @@
 rgw multisite configuration routines
 """
 import argparse
-import contextlib
 import logging
 import random
 import string
@@ -223,7 +222,7 @@ class Gateway(multisite.Gateway):
             # insert zone args before the first |
             pipe = args.index(run.Raw('|'))
             args = args[0:pipe] + zone.zone_args() + args[pipe:]
-        except ValueError, e:
+        except ValueError:
             args += zone.zone_args()
         self.daemon.command_kwargs['args'] = args
 
index dade6e47483e13bfb9830a02402451469761ac60..dee6bfaa303966cd9569fe660433b6e81b6362d8 100644 (file)
@@ -2,7 +2,6 @@
 rgw multisite testing
 """
 import logging
-import sys
 import nose.core
 import nose.config
 
index d3c503b0d07eb17956e2cb7aadb4c1d705b00058..6756f2cdbbfa350f64246161859035a584731531 100644 (file)
@@ -1,6 +1,5 @@
 import contextlib
 import logging
-import time
 from teuthology import misc
 from teuthology.orchestra import run
 
index 52b7915d9d524ef4ea08e3ae69d75d86360b2cbc..95ee8be5a8c547d5aaf6bd3430ddf87905370998 100644 (file)
@@ -14,7 +14,6 @@ from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 from teuthology.exceptions import ConfigError
 
 log = logging.getLogger(__name__)
index 47f9de1d2fcddbdcc75dc166d997a288a285a664..e14bb5ea7dd102f7129cdf86fda6ca3ac303c25b 100644 (file)
@@ -9,14 +9,11 @@ import os
 import random
 import string
 import yaml
-import socket
 import getpass
 
 from teuthology import misc as teuthology
-from teuthology.exceptions import ConfigError
 from teuthology.task import Task
 from teuthology.orchestra import run
-from teuthology.orchestra.remote import Remote
 
 log = logging.getLogger(__name__)
 
@@ -352,7 +349,7 @@ class S3tests_java(Task):
                         stdout=StringIO()
                     )
 
-                if gr is not 'All':
+                if gr != 'All':
                     self.ctx.cluster.only(client).run(
                         args=args + ['--tests'] + [gr] + extra_args,
                         stdout=StringIO()
index e89fcf03e480940cdb7c3a60950cfc47b0fab63e..402172c8cdcc93c64281728dcfa2c6864107042e 100644 (file)
@@ -13,7 +13,6 @@ from teuthology import misc as teuthology
 from teuthology import contextutil
 from teuthology.config import config as teuth_config
 from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
 
 log = logging.getLogger(__name__)
 
index 4dfaa44d4451d8d856b327851bfb1a89983c6585..71e80bc6471c3851ec3e55bd679e1cd5bf6855dc 100644 (file)
@@ -6,7 +6,7 @@ import logging
 
 from teuthology import misc as teuthology
 from teuthology import contextutil
-from teuthology.config import config as teuth_config
+from teuthology.exceptions import ConfigError
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
@@ -247,7 +247,6 @@ def task(ctx, config):
         config = all_clients
     if isinstance(config, list):
         config = dict.fromkeys(config)
-    clients = config.keys()
 
     overrides = ctx.config.get('overrides', {})
     # merge each client section, not the top level.
index 46b4f565dc0637ff973525fb097cbe2fa64117f1..81d712f44b193bba811322e4c301a171c23bfd9a 100644 (file)
@@ -3,7 +3,6 @@ import contextlib
 import logging
 
 from teuthology import misc as teuthology
-from teuthology import contextutil
 from teuthology.orchestra import run
 
 log = logging.getLogger(__name__)
index d3abf1ced3b971ea997598422492e0d3ac6b967e..d1ea39d1cd05c203252e0a3fcc166cdba8323584 100644 (file)
@@ -1,14 +1,8 @@
 from cStringIO import StringIO
 import logging
 import json
-import requests
 import time
 
-from requests.packages.urllib3 import PoolManager
-from requests.packages.urllib3.util import Retry
-from urlparse import urlparse
-
-from teuthology.orchestra.connection import split_user
 from teuthology import misc as teuthology
 
 log = logging.getLogger(__name__)
index 965ad9717f6e3d72b37d40faa71cd4a0125c3b92..e5d1c4c6ea65d57005a4da144e5e76b4995023b3 100644 (file)
@@ -41,7 +41,6 @@ import shutil
 import re
 import os
 import time
-import json
 import sys
 import errno
 from unittest import suite, loader
@@ -623,9 +622,9 @@ class LocalKernelMount(KernelMount):
         except Exception as e:
             self.client_remote.run(args=[
                 'sudo',
-                run.Raw('PATH=/usr/sbin:$PATH'),
+                Raw('PATH=/usr/sbin:$PATH'),
                 'lsof',
-                run.Raw(';'),
+                Raw(';'),
                 'ps', 'auxf',
             ], timeout=(15*60), omit_sudo=False)
             raise e
@@ -1215,7 +1214,7 @@ def scan_tests(modules):
     max_required_mgr = 0
     require_memstore = False
 
-    for suite, case in enumerate_methods(overall_suite):
+    for suite_, case in enumerate_methods(overall_suite):
         max_required_mds = max(max_required_mds,
                                getattr(case, "MDSS_REQUIRED", 0))
         max_required_clients = max(max_required_clients,
@@ -1292,7 +1291,6 @@ def exec_test():
     opt_teardown_cluster = False
     global opt_log_ps_output
     opt_log_ps_output = False
-    opt_clear_old_log = False
     use_kernel_client = False
 
     args = sys.argv[1:]
@@ -1312,7 +1310,6 @@ def exec_test():
         elif f == '--log-ps-output':
             opt_log_ps_output = True
         elif f == '--clear-old-log':
-            opt_clear_old_log = True
             clear_old_log()
         elif f == "--kclient":
             use_kernel_client = True
index d3e0f8e652e9658c88c4e891ed77ef019cc33402..1d5bb6506217a7ccd3b4dc3dca57cb6530e77ee1 100755 (executable)
@@ -2,8 +2,6 @@
 
 import argparse
 import os
-import sys
-import time
 
 def main():
     parser = argparse.ArgumentParser()
index 6b26d7a296f4262f80066fbd36d9a6937c57258f..20bb9e912dca8909d7db8b3c8db3dd6251f2a10a 100644 (file)
@@ -2,10 +2,8 @@
 
 from __future__ import print_function
 
-import json
 import subprocess
 import shlex
-from StringIO import StringIO
 import errno
 import sys
 import os
@@ -13,8 +11,7 @@ import io
 import re
 
 
-import rados
-from ceph_argparse import *
+from ceph_argparse import * # noqa
 
 keyring_base = '/tmp/cephtest-caps.keyring'
 
index 2fa67a23f385215f85282340a3cc15d1180e8721..07fe8845f4e63fba2788f48c6d200bc70e96d3b3 100755 (executable)
@@ -9,13 +9,9 @@ import time
 import sys
 
 if sys.version_info[0] == 2:
-    from cStringIO import StringIO
-
     range = xrange
 
 elif sys.version_info[0] == 3:
-    from io import StringIO
-
     range = range
 
 import rados as rados
@@ -47,8 +43,6 @@ def set_mds_config_param(ceph, param):
         if r != 0:
             raise Exception
 
-import ConfigParser
-import contextlib
 
 class _TrimIndentFile(object):
     def __init__(self, fp):
@@ -150,10 +144,10 @@ def verify(rados_ioctx, ino, values, pool):
 
     bt = decode(binbt)
 
+    ind = 0
     if bt['ino'] != ino:
         raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
                     bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
-    ind = 0
     for (n, i) in values:
         if bt['ancestors'][ind]['dirino'] != i:
             raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(