import unittest
+from unittest import case
import time
import logging
mon_manager = None
+ # Declarative test requirements: subclasses should override these to indicate
+ # their special needs. If not met, tests will be skipped.
+ REQUIRE_MEMSTORE = False
+
def setUp(self):
self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
"Starting test {0}".format(self.id()))
+ if self.REQUIRE_MEMSTORE:
+ objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd")
+ if objectstore != "memstore":
+ # You certainly *could* run this on a real OSD, but you don't want to sit
+ # here for hours waiting for the test to fill up a 1TB drive!
+ raise case.SkipTest("Require `memstore` OSD backend (test " \
+ "would take too long on full sized OSDs")
+
+
+
def tearDown(self):
self.ceph_cluster.mon_manager.raw_cluster_cmd("log",
"Ended test {0}".format(self.id()))
MDSS_REQUIRED = 1
REQUIRE_KCLIENT_REMOTE = False
REQUIRE_ONE_CLIENT_REMOTE = False
- REQUIRE_MEMSTORE = False
# Whether to create the default filesystem during setUp
REQUIRE_FILESYSTEM = True
if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames():
raise case.SkipTest("Require first client to be on separate server from MDSs")
- if self.REQUIRE_MEMSTORE:
- objectstore = self.mds_cluster.get_config("osd_objectstore", "osd")
- if objectstore != "memstore":
- # You certainly *could* run this on a real OSD, but you don't want to sit
- # here for hours waiting for the test to fill up a 1TB drive!
- raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
-
# Create friendly mount_a, mount_b attrs
for i in range(0, self.CLIENTS_REQUIRED):
setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i])
max_required_mds = 0
max_required_clients = 0
max_required_mgr = 0
+ require_memstore = False
for suite, case in enumerate_methods(overall_suite):
max_required_mds = max(max_required_mds,
getattr(case, "CLIENTS_REQUIRED", 0))
max_required_mgr = max(max_required_mgr,
getattr(case, "MGRS_REQUIRED", 0))
+ require_memstore = getattr(case, "REQUIRE_MEMSTORE", False) \
+ or require_memstore
- return max_required_mds, max_required_clients, max_required_mgr
+ return max_required_mds, max_required_clients, \
+ max_required_mgr, require_memstore
class LocalCluster(object):
log.error("Some ceph binaries missing, please build them: {0}".format(" ".join(missing_binaries)))
sys.exit(-1)
- max_required_mds, max_required_clients, max_required_mgr = scan_tests(modules)
+ max_required_mds, max_required_clients, \
+ max_required_mgr, require_memstore = scan_tests(modules)
remote = LocalRemote()
vstart_env["OSD"] = "4"
vstart_env["MGR"] = max(max_required_mgr, 1).__str__()
- remote.run([os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d", "--nolockdep"],
- env=vstart_env)
+ args = [os.path.join(SRC_PREFIX, "vstart.sh"), "-n", "-d",
+ "--nolockdep"]
+ if require_memstore:
+ args.append("--memstore")
+
+ remote.run(args, env=vstart_env)
# Wait for OSD to come up so that subsequent injectargs etc will
# definitely succeed