self.fs = None # is now invalid!
self.recovery_fs = None
- # In case the previous filesystem had filled up the RADOS cluster, wait for that
- # flag to pass.
- osd_mon_report_interval_max = int(self.mds_cluster.get_config("osd_mon_report_interval_max", service_type='osd'))
- self.wait_until_true(lambda: not self.mds_cluster.is_full(),
- timeout=osd_mon_report_interval_max * 5)
-
# In case anything is in the OSD blacklist list, clear it out. This is to avoid
# the OSD map changing in the background (due to blacklist expiry) while tests run.
try:
def get_mds_info(self, mds_id):
return FSStatus(self.mon_manager).get_mds(mds_id)
- def is_full(self):
- flags = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['flags']
- return 'full' in flags
-
def is_pool_full(self, pool_name):
pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
for pool in pools:
return workers[0].value
else:
return None
+
+ def is_full(self):
+ return self.is_pool_full(self.get_data_pool_name())
"max_bytes", "{0}".format(self.pool_capacity))
def is_full(self):
- return self.fs.is_pool_full(self.fs.get_data_pool_name())
+ return self.fs.is_full()
class TestClusterFull(FullnessTestCase):
"""
- Test cluster-wide fullness, which indicates that an OSD has become too full
+ Test data pool fullness, which indicates that an OSD has become too full
"""
pool_capacity = None
REQUIRE_MEMSTORE = True