MDSS_REQUIRED = 1
REQUIRE_KCLIENT_REMOTE = False
REQUIRE_ONE_CLIENT_REMOTE = False
+ REQUIRE_MEMSTORE = False
LOAD_SETTINGS = []
if self.mounts[0].client_remote.hostname in self.fs.get_mds_hostnames():
raise case.SkipTest("Require first client to be on separate server from MDSs")
+ if self.REQUIRE_MEMSTORE:
+ objectstore = self.fs.get_config("osd_objectstore", "osd")
+ if objectstore != "memstore":
+ # You certainly *could* run this on a real OSD, but you don't want to sit
+ # here for hours waiting for the test to fill up a 1TB drive!
+ raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
+
# Unmount all surplus clients
for i in range(self.CLIENTS_REQUIRED, len(self.mounts)):
mount = self.mounts[i]
from textwrap import dedent
import time
from teuthology.orchestra.run import CommandFailedError
-from unittest import case
from tasks.cephfs.cephfs_test_case import CephFSTestCase
Test cluster-wide fullness, which indicates that an OSD has become too full
"""
pool_capacity = None
+ REQUIRE_MEMSTORE = True
def setUp(self):
super(TestClusterFull, self).setUp()
mon_osd_full_ratio = float(self.fs.get_config("mon_osd_full_ratio"))
TestClusterFull.fill_mb = int(1.05 * mon_osd_full_ratio * (self.pool_capacity / (1024.0 * 1024.0)))
- objectstore = self.fs.get_config("osd_objectstore", "osd")
- if objectstore != "memstore":
- # You certainly *could* run this on a real OSD, but you don't want to sit
- # here for hours waiting for the test to fill up a 1TB drive!
- raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
-
def is_full(self):
return self.fs.is_full()