]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: generalise REQUIRE_MEMSTORE
authorJohn Spray <john.spray@redhat.com>
Wed, 14 Nov 2018 14:10:56 +0000 (09:10 -0500)
committerSage Weil <sage@redhat.com>
Tue, 18 Dec 2018 19:30:54 +0000 (13:30 -0600)
Move it up into CephTestCase so that mgr tests can
use it too, and pick it up in vstart_runner.py so
that these tests will work neatly there.

Signed-off-by: John Spray <john.spray@redhat.com>
qa/tasks/ceph_test_case.py
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/vstart_runner.py

index 05e22722deb0c9a8348b57a7768591a230c463c3..adcf3fbecb33a104c7fbee89cdaee0c2d077489c 100644 (file)
@@ -1,5 +1,6 @@
 
 import unittest
+from unittest import case
 import time
 import logging
 
@@ -26,10 +27,24 @@ class CephTestCase(unittest.TestCase):
 
     mon_manager = None
 
+    # Declarative test requirements: subclasses should override these to indicate
+    # their special needs.  If not met, tests will be skipped.
+    REQUIRE_MEMSTORE = False
+
     def setUp(self):
         self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
             "Starting test {0}".format(self.id()))
 
+        if self.REQUIRE_MEMSTORE:
+            objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd")
+            if objectstore != "memstore":
+                # You certainly *could* run this on a real OSD, but you don't want to sit
+                # here for hours waiting for the test to fill up a 1TB drive!
+                raise case.SkipTest("Require `memstore` OSD backend (test " \
+                        "would take too long on full sized OSDs")
+
+
+
     def tearDown(self):
         self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
             "Ended test {0}".format(self.id()))
index b1ce3e6c40abb34db54b8e7db91ab3c9879cff37..2b069a22730a7fbcfbf4ad992347b001878a9791 100644 (file)
@@ -51,7 +51,6 @@ class CephFSTestCase(CephTestCase):
     MDSS_REQUIRED = 1
     REQUIRE_KCLIENT_REMOTE = False
     REQUIRE_ONE_CLIENT_REMOTE = False
-    REQUIRE_MEMSTORE = False
 
     # Whether to create the default filesystem during setUp
     REQUIRE_FILESYSTEM = True
@@ -85,13 +84,6 @@ class CephFSTestCase(CephTestCase):
             if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames():
                 raise case.SkipTest("Require first client to be on separate server from MDSs")
 
-        if self.REQUIRE_MEMSTORE:
-            objectstore = self.mds_cluster.get_config("osd_objectstore", "osd")
-            if objectstore != "memstore":
-                # You certainly *could* run this on a real OSD, but you don't want to sit
-                # here for hours waiting for the test to fill up a 1TB drive!
-                raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
-
         # Create friendly mount_a, mount_b attrs
         for i in range(0, self.CLIENTS_REQUIRED):
             setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i])
index bf54af4921938ff11bdce3e3ca650bbb3cea29db..e0d805010991a6177fc8539c81d31defafba6fba 100644 (file)
@@ -825,6 +825,7 @@ def scan_tests(modules):
     max_required_mds = 0
     max_required_clients = 0
     max_required_mgr = 0
+    require_memstore = False
 
     for suite, case in enumerate_methods(overall_suite):
         max_required_mds = max(max_required_mds,
@@ -833,8 +834,11 @@ def scan_tests(modules):
                                getattr(case, "CLIENTS_REQUIRED", 0))
         max_required_mgr = max(max_required_mgr,
                                getattr(case, "MGRS_REQUIRED", 0))
+        require_memstore = getattr(case, "REQUIRE_MEMSTORE", False) \
+                               or require_memstore
 
-    return max_required_mds, max_required_clients, max_required_mgr
+    return max_required_mds, max_required_clients, \
+            max_required_mgr, require_memstore
 
 
 class LocalCluster(object):
@@ -896,7 +900,8 @@ def exec_test():
         log.error("Some ceph binaries missing, please build them: {0}".format(" ".join(missing_binaries)))
         sys.exit(-1)
 
-    max_required_mds, max_required_clients, max_required_mgr = scan_tests(modules)
+    max_required_mds, max_required_clients, \
+            max_required_mgr, require_memstore = scan_tests(modules)
 
     remote = LocalRemote()
 
@@ -924,8 +929,12 @@ def exec_test():
         vstart_env["OSD"] = "4"
         vstart_env["MGR"] = max(max_required_mgr, 1).__str__()
 
-        remote.run([os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d", "--nolockdep"],
-                   env=vstart_env)
+        args = [os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d",
+                    "--nolockdep"]
+        if require_memstore:
+            args.append("--memstore")
+
+        remote.run(args, env=vstart_env)
 
         # Wait for OSD to come up so that subsequent injectargs etc will
         # definitely succeed