import unittest
-from unittest import case
import time
import logging
if objectstore != "memstore":
# You certainly *could* run this on a real OSD, but you don't want to sit
# here for hours waiting for the test to fill up a 1TB drive!
- raise case.SkipTest("Require `memstore` OSD backend (test " \
+ raise self.skipTest("Require `memstore` OSD backend (test " \
"would take too long on full sized OSDs")
import json
import logging
-from unittest import case
from tasks.ceph_test_case import CephTestCase
import os
import re
super(CephFSTestCase, self).setUp()
if len(self.mds_cluster.mds_ids) < self.MDSS_REQUIRED:
- raise case.SkipTest("Only have {0} MDSs, require {1}".format(
+ self.skipTest("Only have {0} MDSs, require {1}".format(
len(self.mds_cluster.mds_ids), self.MDSS_REQUIRED
))
if len(self.mounts) < self.CLIENTS_REQUIRED:
- raise case.SkipTest("Only have {0} clients, require {1}".format(
+ self.skipTest("Only have {0} clients, require {1}".format(
len(self.mounts), self.CLIENTS_REQUIRED
))
# kclient kill() power cycles nodes, so requires clients to each be on
# their own node
if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname:
- raise case.SkipTest("kclient clients must be on separate nodes")
+ self.skipTest("kclient clients must be on separate nodes")
if self.REQUIRE_ONE_CLIENT_REMOTE:
if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames():
- raise case.SkipTest("Require first client to be on separate server from MDSs")
+ self.skipTest("Require first client to be on separate server from MDSs")
# Create friendly mount_a, mount_b attrs
for i in range(0, self.CLIENTS_REQUIRED):
if self.REQUIRE_RECOVERY_FILESYSTEM:
if not self.REQUIRE_FILESYSTEM:
- raise case.SkipTest("Recovery filesystem requires a primary filesystem as well")
+ self.skipTest("Recovery filesystem requires a primary filesystem as well")
self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set',
'enable_multiple', 'true',
'--yes-i-really-mean-it')
import os
import time
from textwrap import dedent
-from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to inject client release failure")
+ self.skipTest("Require FUSE client to inject client release failure")
dir_path = os.path.join(self.mount_a.mountpoint, "testdir")
py_script = dedent("""
import logging
from textwrap import dedent
-from unittest import SkipTest
from teuthology.orchestra.run import CommandFailedError
from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming
from tasks.cephfs.fuse_mount import FuseMount
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to inject client release failure")
+ self.skipTest("Require FUSE client to inject client release failure")
self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true')
self.mount_a.teardown()
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to inject client release failure")
+ self.skipTest("Require FUSE client to inject client release failure")
self.set_conf('client', 'client inject fixed oldest tid', 'true')
self.mount_a.teardown()
# The debug hook to inject the failure only exists in the fuse client
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to inject client release failure")
+ self.skipTest("Require FUSE client to inject client release failure")
if mount_subdir:
# fuse assigns a fix inode number (1) to root inode. But in mounting into
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.packaging import get_package_version
-from unittest import SkipTest
-
log = logging.getLogger(__name__)
def test_stale_renew(self):
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to handle signal STOP/CONT")
+ self.skipTest("Require FUSE client to handle signal STOP/CONT")
session_timeout = self.fs.get_var("session_timeout")
Check that abort_conn() skips closing mds sessions.
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Testing libcephfs function")
+ self.skipTest("Testing libcephfs function")
session_timeout = self.fs.get_var("session_timeout")
-from unittest import case
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from tasks.cephfs.fuse_mount import FuseMount
"""
if not isinstance(self.mount_a, FuseMount):
- raise case.SkipTest("Test only applies to FUSE clients")
+ self.skipTest("Test only applies to FUSE clients")
test_key = "client_cache_size"
test_val = "123"
import signal
import json
import logging
-from unittest import case, SkipTest
from random import randint
from cephfs_test_case import CephFSTestCase
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Requires FUSE client to inject client metadata")
+ self.skipTest("Requires FUSE client to inject client metadata")
require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true"
if not require_active:
- raise case.SkipTest("fuse_require_active_mds is not set")
+ self.skipTest("fuse_require_active_mds is not set")
grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon"))
-from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client")
+ self.skipTest("Require FUSE client")
# Enable debug. Client will requests CEPH_CAP_XATTR_SHARED
# on lookup/open
from StringIO import StringIO
import json
import logging
-from unittest import SkipTest
from tasks.cephfs.fuse_mount import FuseMount
from teuthology.exceptions import CommandFailedError
def test_session_reject(self):
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Requires FUSE client to inject client metadata")
+ self.skipTest("Requires FUSE client to inject client metadata")
self.mount_a.run_shell(["mkdir", "foo"])
self.mount_a.run_shell(["mkdir", "foo/bar"])
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.orchestra.run import CommandFailedError, Raw
-from unittest import SkipTest
log = logging.getLogger(__name__)
check snaptable transcation
"""
if not isinstance(self.mount_a, FuseMount):
- raise SkipTest("Require FUSE client to forcibly kill mount")
+ self.skipTest("Require FUSE client to forcibly kill mount")
self.fs.set_allow_new_snaps(True);
self.fs.set_max_mds(2)
-
-from unittest import case
import json
import logging
assert cls.mgr_cluster is not None
if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
- raise case.SkipTest("Only have {0} manager daemons, "
+ self.skipTest("Only have {0} manager daemons, "
"{1} are required".format(
len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
import json
import logging
import time
-from unittest import SkipTest
from mgr_test_case import MgrTestCase
def setUp(self):
# Ensure we have at least four OSDs
if self._osd_count() < 4:
- raise SkipTest("Not enough OSDS!")
+ self.skipTest("Not enough OSDS!")
# Remove any filesystems so that we can remove their pools
if self.mds_cluster: