def status(self):
return FSStatus(self.mon_manager)
- def delete_all_filesystems(self):
- """
- Remove all filesystems that exist, and any pools in use by them.
- """
- pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
- pool_id_name = {}
- for pool in pools:
- pool_id_name[pool['pool']] = pool['pool_name']
-
- # mark cluster down for each fs to prevent churn during deletion
- status = self.status()
- for fs in status.get_filesystems():
- self.mon_manager.raw_cluster_cmd("fs", "fail", str(fs['mdsmap']['fs_name']))
-
- # get a new copy as actives may have since changed
- status = self.status()
- for fs in status.get_filesystems():
- mdsmap = fs['mdsmap']
- metadata_pool = pool_id_name[mdsmap['metadata_pool']]
-
- self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it')
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- metadata_pool, metadata_pool,
- '--yes-i-really-really-mean-it')
- for data_pool in mdsmap['data_pools']:
- data_pool = pool_id_name[data_pool]
- try:
- self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
- data_pool, data_pool,
- '--yes-i-really-really-mean-it')
- except CommandFailedError as e:
- if e.exitstatus == 16: # EBUSY, this data pool is used
- pass # by two metadata pools, let the 2nd
- else: # pass delete it
- raise
-
def get_standby_daemons(self):
return set([s['name'] for s in self.status().get_standbys()])
raise RuntimeError("Pool not found '{0}'".format(pool_name))
+ def delete_all_filesystems(self):
+ """
+ Remove all filesystems that exist, and any pools in use by them.
+ """
+ for fs in self.status().get_filesystems():
+ Filesystem(ctx=self._ctx, fscid=fs['id']).destroy()
+
+
class Filesystem(MDSCluster):
"""
This object is for driving a CephFS filesystem. The MDS daemons driven by
That a new file system warns/fails with an EC default data pool.
"""
- self.fs.delete_all_filesystems()
+ self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec"
self._setup_ec_pools(n)
try:
That a new file system succeeds with an EC default data pool with --force.
"""
- self.fs.delete_all_filesystems()
+ self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_force"
self._setup_ec_pools(n)
self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
That a new file system fails with an EC default data pool without overwrite.
"""
- self.fs.delete_all_filesystems()
+ self.mds_cluster.delete_all_filesystems()
n = "test_new_default_ec_no_overwrite"
self._setup_ec_pools(n, overwrites=False)
try:
"""
That the application metadata set on the pools of a newly created filesystem are as expected.
"""
- self.fs.delete_all_filesystems()
+ self.mds_cluster.delete_all_filesystems()
fs_name = "test_fs_new_pool_application"
keys = ['metadata', 'data']
pool_names = [fs_name+'-'+key for key in keys]
# To avoid any issues with e.g. unlink bugs, we destroy and recreate
# the filesystem rather than just doing a rm -rf of files
- cls.mds_cluster.mds_stop()
- cls.mds_cluster.mds_fail()
cls.mds_cluster.delete_all_filesystems()
cls.fs = None # is now invalid!
class LocalMDSCluster(LocalCephCluster, MDSCluster):
def __init__(self, ctx):
super(LocalMDSCluster, self).__init__(ctx)
-
self.mds_ids = ctx.daemons.daemons['ceph.mds'].keys()
self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])
def newfs(self, name='cephfs', create=True):
return LocalFilesystem(self._ctx, name=name, create=create)
+ def delete_all_filesystems(self):
+ """
+ Remove all filesystems that exist, and any pools in use by them.
+ """
+ for fs in self.status().get_filesystems():
+ LocalFilesystem(ctx=self._ctx, fscid=fs['id']).destroy()
+
class LocalMgrCluster(LocalCephCluster, MgrCluster):
def __init__(self, ctx):
class LocalFilesystem(Filesystem, LocalMDSCluster):
- def __init__(self, ctx, fscid=None, name='cephfs', create=False, ec_profile=None):
+ def __init__(self, ctx, fscid=None, name=None, create=False, ec_profile=None):
# Deliberately skip calling parent constructor
self._ctx = ctx