From 1071f73c76c2c8f627be6cef9f33661c6754338b Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 24 Apr 2019 09:35:46 -0700 Subject: [PATCH] qa: use skipTest method instead of exception This is the recommended method to skip a test according to [1]. It also lets us avoid an unnecessary import. [1] https://docs.python.org/2/library/unittest.html#unittest.TestCase.skipTest Signed-off-by: Patrick Donnelly --- qa/tasks/ceph_test_case.py | 3 +-- qa/tasks/cephfs/cephfs_test_case.py | 11 +++++------ qa/tasks/cephfs/test_cap_flush.py | 3 +-- qa/tasks/cephfs/test_client_limits.py | 7 +++---- qa/tasks/cephfs/test_client_recovery.py | 6 ++---- qa/tasks/cephfs/test_config_commands.py | 3 +-- qa/tasks/cephfs/test_failover.py | 5 ++--- qa/tasks/cephfs/test_misc.py | 3 +-- qa/tasks/cephfs/test_sessionmap.py | 3 +-- qa/tasks/cephfs/test_snapshots.py | 3 +-- qa/tasks/mgr/mgr_test_case.py | 4 +--- qa/tasks/mgr/test_progress.py | 3 +-- 12 files changed, 20 insertions(+), 34 deletions(-) diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py index 41a087abd8491..dd8f515a2ffc1 100644 --- a/qa/tasks/ceph_test_case.py +++ b/qa/tasks/ceph_test_case.py @@ -1,6 +1,5 @@ import unittest -from unittest import case import time import logging @@ -40,7 +39,7 @@ class CephTestCase(unittest.TestCase): if objectstore != "memstore": # You certainly *could* run this on a real OSD, but you don't want to sit # here for hours waiting for the test to fill up a 1TB drive! - raise case.SkipTest("Require `memstore` OSD backend (test " \ + raise self.skipTest("Require `memstore` OSD backend (test " \ "would take too long on full sized OSDs") diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 2b069a22730a7..37d2759cff602 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -1,6 +1,5 @@ import json import logging -from unittest import case from tasks.ceph_test_case import CephTestCase import os import re @@ -64,12 +63,12 @@ class CephFSTestCase(CephTestCase): super(CephFSTestCase, self).setUp() if len(self.mds_cluster.mds_ids) < self.MDSS_REQUIRED: - raise case.SkipTest("Only have {0} MDSs, require {1}".format( + self.skipTest("Only have {0} MDSs, require {1}".format( len(self.mds_cluster.mds_ids), self.MDSS_REQUIRED )) if len(self.mounts) < self.CLIENTS_REQUIRED: - raise case.SkipTest("Only have {0} clients, require {1}".format( + self.skipTest("Only have {0} clients, require {1}".format( len(self.mounts), self.CLIENTS_REQUIRED )) @@ -78,11 +77,11 @@ class CephFSTestCase(CephTestCase): # kclient kill() power cycles nodes, so requires clients to each be on # their own node if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname: - raise case.SkipTest("kclient clients must be on separate nodes") + self.skipTest("kclient clients must be on separate nodes") if self.REQUIRE_ONE_CLIENT_REMOTE: if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames(): - raise case.SkipTest("Require first client to be on separate server from MDSs") + self.skipTest("Require first client to be on separate server from MDSs") # Create friendly mount_a, mount_b attrs for i in range(0, self.CLIENTS_REQUIRED): @@ -150,7 +149,7 @@ class CephFSTestCase(CephTestCase): if self.REQUIRE_RECOVERY_FILESYSTEM: if not self.REQUIRE_FILESYSTEM: - raise case.SkipTest("Recovery filesystem requires a primary filesystem as well") + self.skipTest("Recovery filesystem requires a primary filesystem as well") self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set', 'enable_multiple', 'true', '--yes-i-really-mean-it') diff --git a/qa/tasks/cephfs/test_cap_flush.py b/qa/tasks/cephfs/test_cap_flush.py index 1cd102f3aa693..cea6ff3f9b0c3 100644 --- a/qa/tasks/cephfs/test_cap_flush.py +++ b/qa/tasks/cephfs/test_cap_flush.py @@ -2,7 +2,6 @@ import os import time from textwrap import dedent -from unittest import SkipTest from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology @@ -17,7 +16,7 @@ class TestCapFlush(CephFSTestCase): """ if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to inject client release failure") + self.skipTest("Require FUSE client to inject client release failure") dir_path = os.path.join(self.mount_a.mountpoint, "testdir") py_script = dedent(""" diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index 706f4af0addd3..cd9a9a6635a0f 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -6,7 +6,6 @@ exceed the limits of how many caps/inodes they should hold. import logging from textwrap import dedent -from unittest import SkipTest from teuthology.orchestra.run import CommandFailedError from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming from tasks.cephfs.fuse_mount import FuseMount @@ -116,7 +115,7 @@ class TestClientLimits(CephFSTestCase): # The debug hook to inject the failure only exists in the fuse client if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to inject client release failure") + self.skipTest("Require FUSE client to inject client release failure") self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true') self.mount_a.teardown() @@ -158,7 +157,7 @@ class TestClientLimits(CephFSTestCase): # The debug hook to inject the failure only exists in the fuse client if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to inject client release failure") + self.skipTest("Require FUSE client to inject client release failure") self.set_conf('client', 'client inject fixed oldest tid', 'true') self.mount_a.teardown() @@ -183,7 +182,7 @@ class TestClientLimits(CephFSTestCase): # The debug hook to inject the failure only exists in the fuse client if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to inject client release failure") + self.skipTest("Require FUSE client to inject client release failure") if mount_subdir: # fuse assigns a fix inode number (1) to root inode. But in mounting into diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 9b363267652b0..d2ebaa30bc170 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -14,8 +14,6 @@ from teuthology.orchestra.run import CommandFailedError, ConnectionLostError from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase from teuthology.packaging import get_package_version -from unittest import SkipTest - log = logging.getLogger(__name__) @@ -488,7 +486,7 @@ class TestClientRecovery(CephFSTestCase): def test_stale_renew(self): if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to handle signal STOP/CONT") + self.skipTest("Require FUSE client to handle signal STOP/CONT") session_timeout = self.fs.get_var("session_timeout") @@ -525,7 +523,7 @@ class TestClientRecovery(CephFSTestCase): Check that abort_conn() skips closing mds sessions. """ if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Testing libcephfs function") + self.skipTest("Testing libcephfs function") session_timeout = self.fs.get_var("session_timeout") diff --git a/qa/tasks/cephfs/test_config_commands.py b/qa/tasks/cephfs/test_config_commands.py index ce0619fe4a201..51bf40f2ccbfb 100644 --- a/qa/tasks/cephfs/test_config_commands.py +++ b/qa/tasks/cephfs/test_config_commands.py @@ -1,5 +1,4 @@ -from unittest import case from tasks.cephfs.cephfs_test_case import CephFSTestCase from tasks.cephfs.fuse_mount import FuseMount @@ -21,7 +20,7 @@ class TestConfigCommands(CephFSTestCase): """ if not isinstance(self.mount_a, FuseMount): - raise case.SkipTest("Test only applies to FUSE clients") + self.skipTest("Test only applies to FUSE clients") test_key = "client_cache_size" test_val = "123" diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index f1bb0f22a5b37..527386006fb99 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -2,7 +2,6 @@ import time import signal import json import logging -from unittest import case, SkipTest from random import randint from cephfs_test_case import CephFSTestCase @@ -242,11 +241,11 @@ class TestFailover(CephFSTestCase): """ if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Requires FUSE client to inject client metadata") + self.skipTest("Requires FUSE client to inject client metadata") require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true" if not require_active: - raise case.SkipTest("fuse_require_active_mds is not set") + self.skipTest("fuse_require_active_mds is not set") grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index dd1ff2b752873..9cd3469b88c30 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -1,5 +1,4 @@ -from unittest import SkipTest from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase from teuthology.orchestra.run import CommandFailedError, ConnectionLostError @@ -21,7 +20,7 @@ class TestMisc(CephFSTestCase): """ if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client") + self.skipTest("Require FUSE client") # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED # on lookup/open diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index e7de6ef057696..cadfccb5d2ed8 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -1,7 +1,6 @@ from StringIO import StringIO import json import logging -from unittest import SkipTest from tasks.cephfs.fuse_mount import FuseMount from teuthology.exceptions import CommandFailedError @@ -193,7 +192,7 @@ class TestSessionMap(CephFSTestCase): def test_session_reject(self): if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Requires FUSE client to inject client metadata") + self.skipTest("Requires FUSE client to inject client metadata") self.mount_a.run_shell(["mkdir", "foo"]) self.mount_a.run_shell(["mkdir", "foo/bar"]) diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index 7c45ed877eedf..5f18d32cbc564 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -5,7 +5,6 @@ from textwrap import dedent from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.cephfs_test_case import CephFSTestCase from teuthology.orchestra.run import CommandFailedError, Raw -from unittest import SkipTest log = logging.getLogger(__name__) @@ -44,7 +43,7 @@ class TestSnapshots(CephFSTestCase): check snaptable transcation """ if not isinstance(self.mount_a, FuseMount): - raise SkipTest("Require FUSE client to forcibly kill mount") + self.skipTest("Require FUSE client to forcibly kill mount") self.fs.set_allow_new_snaps(True); self.fs.set_max_mds(2) diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py index 7684a95565af7..2c2a8e936fa4f 100644 --- a/qa/tasks/mgr/mgr_test_case.py +++ b/qa/tasks/mgr/mgr_test_case.py @@ -1,5 +1,3 @@ - -from unittest import case import json import logging @@ -101,7 +99,7 @@ class MgrTestCase(CephTestCase): assert cls.mgr_cluster is not None if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED: - raise case.SkipTest("Only have {0} manager daemons, " + self.skipTest("Only have {0} manager daemons, " "{1} are required".format( len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) diff --git a/qa/tasks/mgr/test_progress.py b/qa/tasks/mgr/test_progress.py index bd9bc89eb7ae4..421ccf6e45b40 100644 --- a/qa/tasks/mgr/test_progress.py +++ b/qa/tasks/mgr/test_progress.py @@ -2,7 +2,6 @@ import json import logging import time -from unittest import SkipTest from mgr_test_case import MgrTestCase @@ -63,7 +62,7 @@ class TestProgress(MgrTestCase): def setUp(self): # Ensure we have at least four OSDs if self._osd_count() < 4: - raise SkipTest("Not enough OSDS!") + self.skipTest("Not enough OSDS!") # Remove any filesystems so that we can remove their pools if self.mds_cluster: -- 2.39.5