From: John Spray Date: Wed, 14 Nov 2018 14:10:56 +0000 (-0500) Subject: qa: generalise REQUIRE_MEMSTORE X-Git-Tag: v14.1.0~582^2~23 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=e5fb5a1dddf8230e0611def6e202d773af549b83;p=ceph.git qa: generalise REQUIRE_MEMSTORE Move it up into CephTestCase so that mgr tests can use it too, and pick it up in vstart_runner.py so that these tests will work neatly there. Signed-off-by: John Spray --- diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py index 05e22722deb0..adcf3fbecb33 100644 --- a/qa/tasks/ceph_test_case.py +++ b/qa/tasks/ceph_test_case.py @@ -1,5 +1,6 @@ import unittest +from unittest import case import time import logging @@ -26,10 +27,24 @@ class CephTestCase(unittest.TestCase): mon_manager = None + # Declarative test requirements: subclasses should override these to indicate + # their special needs. If not met, tests will be skipped. + REQUIRE_MEMSTORE = False + def setUp(self): self.ceph_cluster.mon_manager.raw_cluster_cmd("log", "Starting test {0}".format(self.id())) + if self.REQUIRE_MEMSTORE: + objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd") + if objectstore != "memstore": + # You certainly *could* run this on a real OSD, but you don't want to sit + # here for hours waiting for the test to fill up a 1TB drive! + raise case.SkipTest("Require `memstore` OSD backend (test " \ + "would take too long on full sized OSDs") + + + def tearDown(self): self.ceph_cluster.mon_manager.raw_cluster_cmd("log", "Ended test {0}".format(self.id())) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index b1ce3e6c40ab..2b069a22730a 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -51,7 +51,6 @@ class CephFSTestCase(CephTestCase): MDSS_REQUIRED = 1 REQUIRE_KCLIENT_REMOTE = False REQUIRE_ONE_CLIENT_REMOTE = False - REQUIRE_MEMSTORE = False # Whether to create the default filesystem during setUp REQUIRE_FILESYSTEM = True @@ -85,13 +84,6 @@ class CephFSTestCase(CephTestCase): if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames(): raise case.SkipTest("Require first client to be on separate server from MDSs") - if self.REQUIRE_MEMSTORE: - objectstore = self.mds_cluster.get_config("osd_objectstore", "osd") - if objectstore != "memstore": - # You certainly *could* run this on a real OSD, but you don't want to sit - # here for hours waiting for the test to fill up a 1TB drive! - raise case.SkipTest("Require `memstore` OSD backend to simulate full drives") - # Create friendly mount_a, mount_b attrs for i in range(0, self.CLIENTS_REQUIRED): setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i]) diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index bf54af492193..e0d805010991 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -825,6 +825,7 @@ def scan_tests(modules): max_required_mds = 0 max_required_clients = 0 max_required_mgr = 0 + require_memstore = False for suite, case in enumerate_methods(overall_suite): max_required_mds = max(max_required_mds, @@ -833,8 +834,11 @@ def scan_tests(modules): getattr(case, "CLIENTS_REQUIRED", 0)) max_required_mgr = max(max_required_mgr, getattr(case, "MGRS_REQUIRED", 0)) + require_memstore = getattr(case, "REQUIRE_MEMSTORE", False) \ + or require_memstore - return max_required_mds, max_required_clients, max_required_mgr + return max_required_mds, max_required_clients, \ + max_required_mgr, require_memstore class LocalCluster(object): @@ -896,7 +900,8 @@ def exec_test(): log.error("Some ceph binaries missing, please build them: {0}".format(" ".join(missing_binaries))) sys.exit(-1) - max_required_mds, max_required_clients, max_required_mgr = scan_tests(modules) + max_required_mds, max_required_clients, \ + max_required_mgr, require_memstore = scan_tests(modules) remote = LocalRemote() @@ -924,8 +929,12 @@ def exec_test(): vstart_env["OSD"] = "4" vstart_env["MGR"] = max(max_required_mgr, 1).__str__() - remote.run([os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d", "--nolockdep"], - env=vstart_env) + args = [os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d", + "--nolockdep"] + if require_memstore: + args.append("--memstore") + + remote.run(args, env=vstart_env) # Wait for OSD to come up so that subsequent injectargs etc will # definitely succeed