]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: modify delete_all_filesystems() in filesystem.py
authorRishabh Dave <ridave@redhat.com>
Wed, 24 Jun 2020 13:30:56 +0000 (19:00 +0530)
committerRishabh Dave <ridave@redhat.com>
Thu, 10 Sep 2020 18:26:59 +0000 (23:56 +0530)
Modify filesystem.Filesystem.delete_all_filesystems() method to make it
more succinct, move it to class MDSCluster instead and update every call
to it accordingly.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_admin.py
qa/tasks/mgr/dashboard/helper.py
qa/tasks/vstart_runner.py

index c93ac27ee32204a1ba9bdeefaf6ea0e11ffdd3ae..084866f22ae0ab76ad7689bfab70c6b31ae78049 100644 (file)
@@ -337,42 +337,6 @@ class MDSCluster(CephCluster):
     def status(self):
         return FSStatus(self.mon_manager)
 
-    def delete_all_filesystems(self):
-        """
-        Remove all filesystems that exist, and any pools in use by them.
-        """
-        pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
-        pool_id_name = {}
-        for pool in pools:
-            pool_id_name[pool['pool']] = pool['pool_name']
-
-        # mark cluster down for each fs to prevent churn during deletion
-        status = self.status()
-        for fs in status.get_filesystems():
-            self.mon_manager.raw_cluster_cmd("fs", "fail", str(fs['mdsmap']['fs_name']))
-
-        # get a new copy as actives may have since changed
-        status = self.status()
-        for fs in status.get_filesystems():
-            mdsmap = fs['mdsmap']
-            metadata_pool = pool_id_name[mdsmap['metadata_pool']]
-
-            self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it')
-            self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
-                                             metadata_pool, metadata_pool,
-                                             '--yes-i-really-really-mean-it')
-            for data_pool in mdsmap['data_pools']:
-                data_pool = pool_id_name[data_pool]
-                try:
-                    self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete',
-                                                     data_pool, data_pool,
-                                                     '--yes-i-really-really-mean-it')
-                except CommandFailedError as e:
-                    if e.exitstatus == 16: # EBUSY, this data pool is used
-                        pass               # by two metadata pools, let the 2nd
-                    else:                  # pass delete it
-                        raise
-
     def get_standby_daemons(self):
         return set([s['name'] for s in self.status().get_standbys()])
 
@@ -425,6 +389,14 @@ class MDSCluster(CephCluster):
 
         raise RuntimeError("Pool not found '{0}'".format(pool_name))
 
+    def delete_all_filesystems(self):
+        """
+        Remove all filesystems that exist, and any pools in use by them.
+        """
+        for fs in self.status().get_filesystems():
+            Filesystem(ctx=self._ctx, fscid=fs['id']).destroy()
+
+
 class Filesystem(MDSCluster):
     """
     This object is for driving a CephFS filesystem.  The MDS daemons driven by
index 84ace1a9b27556178a1a8755acb9cb250ed7a492..7dc3951bc1fb287bb66b4822a02d28dfa7097b27 100644 (file)
@@ -120,7 +120,7 @@ class TestAdminCommands(CephFSTestCase):
         That a new file system warns/fails with an EC default data pool.
         """
 
-        self.fs.delete_all_filesystems()
+        self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec"
         self._setup_ec_pools(n)
         try:
@@ -138,7 +138,7 @@ class TestAdminCommands(CephFSTestCase):
         That a new file system succeeds with an EC default data pool with --force.
         """
 
-        self.fs.delete_all_filesystems()
+        self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec_force"
         self._setup_ec_pools(n)
         self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force")
@@ -148,7 +148,7 @@ class TestAdminCommands(CephFSTestCase):
         That a new file system fails with an EC default data pool without overwrite.
         """
 
-        self.fs.delete_all_filesystems()
+        self.mds_cluster.delete_all_filesystems()
         n = "test_new_default_ec_no_overwrite"
         self._setup_ec_pools(n, overwrites=False)
         try:
@@ -175,7 +175,7 @@ class TestAdminCommands(CephFSTestCase):
         """
         That the application metadata set on the pools of a newly created filesystem are as expected.
         """
-        self.fs.delete_all_filesystems()
+        self.mds_cluster.delete_all_filesystems()
         fs_name = "test_fs_new_pool_application"
         keys = ['metadata', 'data']
         pool_names = [fs_name+'-'+key for key in keys]
index d65f23b04110c91cddf1c8ef0b833874bca45e35..ea4268da8469e548dbab1c5897cb76214d89531b 100644 (file)
@@ -155,8 +155,6 @@ class DashboardTestCase(MgrTestCase):
 
             # To avoid any issues with e.g. unlink bugs, we destroy and recreate
             # the filesystem rather than just doing a rm -rf of files
-            cls.mds_cluster.mds_stop()
-            cls.mds_cluster.mds_fail()
             cls.mds_cluster.delete_all_filesystems()
             cls.fs = None  # is now invalid!
 
index bf7002c90562914fdeb37fe6e0ce411256c9976a..0d278356d4e1636c14604ae8edad52499e74390b 100644 (file)
@@ -1082,7 +1082,6 @@ class LocalCephCluster(CephCluster):
 class LocalMDSCluster(LocalCephCluster, MDSCluster):
     def __init__(self, ctx):
         super(LocalMDSCluster, self).__init__(ctx)
-
         self.mds_ids = ctx.daemons.daemons['ceph.mds'].keys()
         self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids])
 
@@ -1093,6 +1092,13 @@ class LocalMDSCluster(LocalCephCluster, MDSCluster):
     def newfs(self, name='cephfs', create=True):
         return LocalFilesystem(self._ctx, name=name, create=create)
 
+    def delete_all_filesystems(self):
+        """
+        Remove all filesystems that exist, and any pools in use by them.
+        """
+        for fs in self.status().get_filesystems():
+            LocalFilesystem(ctx=self._ctx, fscid=fs['id']).destroy()
+
 
 class LocalMgrCluster(LocalCephCluster, MgrCluster):
     def __init__(self, ctx):
@@ -1103,7 +1109,7 @@ class LocalMgrCluster(LocalCephCluster, MgrCluster):
 
 
 class LocalFilesystem(Filesystem, LocalMDSCluster):
-    def __init__(self, ctx, fscid=None, name='cephfs', create=False, ec_profile=None):
+    def __init__(self, ctx, fscid=None, name=None, create=False, ec_profile=None):
         # Deliberately skip calling parent constructor
         self._ctx = ctx