From: Rishabh Dave Date: Wed, 24 Jun 2020 13:30:56 +0000 (+0530) Subject: qa/cephfs: modify delete_all_filesystems() in filesystem.py X-Git-Tag: v17.0.0~1150^2~4 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=04ed58fec2580002452cf5891a7311b1181c9df6;p=ceph.git qa/cephfs: modify delete_all_filesystems() in filesystem.py Modify filesystem.Filesystem.delete_all_filesystems() method to make it more succinct, move it to class MDSCluster instead and update every call to it accordingly. Signed-off-by: Rishabh Dave --- diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index c93ac27ee3220..084866f22ae0a 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -337,42 +337,6 @@ class MDSCluster(CephCluster): def status(self): return FSStatus(self.mon_manager) - def delete_all_filesystems(self): - """ - Remove all filesystems that exist, and any pools in use by them. - """ - pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] - pool_id_name = {} - for pool in pools: - pool_id_name[pool['pool']] = pool['pool_name'] - - # mark cluster down for each fs to prevent churn during deletion - status = self.status() - for fs in status.get_filesystems(): - self.mon_manager.raw_cluster_cmd("fs", "fail", str(fs['mdsmap']['fs_name'])) - - # get a new copy as actives may have since changed - status = self.status() - for fs in status.get_filesystems(): - mdsmap = fs['mdsmap'] - metadata_pool = pool_id_name[mdsmap['metadata_pool']] - - self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it') - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', - metadata_pool, metadata_pool, - '--yes-i-really-really-mean-it') - for data_pool in mdsmap['data_pools']: - data_pool = pool_id_name[data_pool] - try: - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', - data_pool, data_pool, - '--yes-i-really-really-mean-it') - except CommandFailedError as e: - if e.exitstatus == 16: # EBUSY, this data pool is used - pass # by two metadata pools, let the 2nd - else: # pass delete it - raise - def get_standby_daemons(self): return set([s['name'] for s in self.status().get_standbys()]) @@ -425,6 +389,14 @@ class MDSCluster(CephCluster): raise RuntimeError("Pool not found '{0}'".format(pool_name)) + def delete_all_filesystems(self): + """ + Remove all filesystems that exist, and any pools in use by them. + """ + for fs in self.status().get_filesystems(): + Filesystem(ctx=self._ctx, fscid=fs['id']).destroy() + + class Filesystem(MDSCluster): """ This object is for driving a CephFS filesystem. The MDS daemons driven by diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 84ace1a9b2755..7dc3951bc1fb2 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -120,7 +120,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system warns/fails with an EC default data pool. """ - self.fs.delete_all_filesystems() + self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec" self._setup_ec_pools(n) try: @@ -138,7 +138,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system succeeds with an EC default data pool with --force. """ - self.fs.delete_all_filesystems() + self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_force" self._setup_ec_pools(n) self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") @@ -148,7 +148,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system fails with an EC default data pool without overwrite. """ - self.fs.delete_all_filesystems() + self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_no_overwrite" self._setup_ec_pools(n, overwrites=False) try: @@ -175,7 +175,7 @@ class TestAdminCommands(CephFSTestCase): """ That the application metadata set on the pools of a newly created filesystem are as expected. """ - self.fs.delete_all_filesystems() + self.mds_cluster.delete_all_filesystems() fs_name = "test_fs_new_pool_application" keys = ['metadata', 'data'] pool_names = [fs_name+'-'+key for key in keys] diff --git a/qa/tasks/mgr/dashboard/helper.py b/qa/tasks/mgr/dashboard/helper.py index d65f23b04110c..ea4268da8469e 100644 --- a/qa/tasks/mgr/dashboard/helper.py +++ b/qa/tasks/mgr/dashboard/helper.py @@ -155,8 +155,6 @@ class DashboardTestCase(MgrTestCase): # To avoid any issues with e.g. unlink bugs, we destroy and recreate # the filesystem rather than just doing a rm -rf of files - cls.mds_cluster.mds_stop() - cls.mds_cluster.mds_fail() cls.mds_cluster.delete_all_filesystems() cls.fs = None # is now invalid! diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index bf7002c905629..0d278356d4e16 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -1082,7 +1082,6 @@ class LocalCephCluster(CephCluster): class LocalMDSCluster(LocalCephCluster, MDSCluster): def __init__(self, ctx): super(LocalMDSCluster, self).__init__(ctx) - self.mds_ids = ctx.daemons.daemons['ceph.mds'].keys() self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids]) @@ -1093,6 +1092,13 @@ class LocalMDSCluster(LocalCephCluster, MDSCluster): def newfs(self, name='cephfs', create=True): return LocalFilesystem(self._ctx, name=name, create=create) + def delete_all_filesystems(self): + """ + Remove all filesystems that exist, and any pools in use by them. + """ + for fs in self.status().get_filesystems(): + LocalFilesystem(ctx=self._ctx, fscid=fs['id']).destroy() + class LocalMgrCluster(LocalCephCluster, MgrCluster): def __init__(self, ctx): @@ -1103,7 +1109,7 @@ class LocalMgrCluster(LocalCephCluster, MgrCluster): class LocalFilesystem(Filesystem, LocalMDSCluster): - def __init__(self, ctx, fscid=None, name='cephfs', create=False, ec_profile=None): + def __init__(self, ctx, fscid=None, name=None, create=False, ec_profile=None): # Deliberately skip calling parent constructor self._ctx = ctx