]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add test for fs rm idempotency 38835/head
authorPatrick Donnelly <pdonnell@redhat.com>
Fri, 8 Jan 2021 20:23:21 +0000 (12:23 -0800)
committerPatrick Donnelly <pdonnell@redhat.com>
Tue, 12 Jan 2021 15:24:33 +0000 (07:24 -0800)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_admin.py

index 3b195afed0f3fbe97e601dadfcc86a8864693b8d..226eb02d1922d94462c9855c3f87c7464d014674 100644 (file)
@@ -680,27 +680,19 @@ class Filesystem(MDSCluster):
         m.run_shell_payload(cmd)
         m.umount_wait(require_clean=True)
 
-    def destroy(self, reset_obj_attrs=True):
-        log.info(f'Destroying file system {self.name} and related pools')
+    def _remove_pool(self, name, **kwargs):
+        c = f'osd pool rm {name} {name} --yes-i-really-really-mean-it'
+        return self.mon_manager.ceph(c, **kwargs)
 
-        if self.dead():
-            log.debug('already dead...')
-            return
+    def rm(self, **kwargs):
+        c = f'fs rm {self.name} --yes-i-really-mean-it'
+        return self.mon_manager.ceph(c, **kwargs)
 
-        data_pools = self.get_data_pool_names(refresh=True)
-
-        # make sure no MDSs are attached to given FS.
-        self.mon_manager.raw_cluster_cmd('fs', 'fail', self.name)
-        self.mon_manager.raw_cluster_cmd(
-            'fs', 'rm', self.name, '--yes-i-really-mean-it')
-
-        self.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm',
-            self.get_metadata_pool_name(), self.get_metadata_pool_name(),
-            '--yes-i-really-really-mean-it')
+    def remove_pools(self, data_pools):
+        self._remove_pool(self.get_metadata_pool_name())
         for poolname in data_pools:
             try:
-                self.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', poolname,
-                    poolname, '--yes-i-really-really-mean-it')
+                self._remove_pool(poolname)
             except CommandFailedError as e:
                 # EBUSY, this data pool is used by two metadata pools, let the
                 # 2nd pass delete it
@@ -709,6 +701,21 @@ class Filesystem(MDSCluster):
                 else:
                     raise
 
+    def destroy(self, reset_obj_attrs=True):
+        log.info(f'Destroying file system {self.name} and related pools')
+
+        if self.dead():
+            log.debug('already dead...')
+            return
+
+        data_pools = self.get_data_pool_names(refresh=True)
+
+        # make sure no MDSs are attached to given FS.
+        self.fail()
+        self.rm()
+
+        self.remove_pools(data_pools)
+
         if reset_obj_attrs:
             self.id = None
             self.name = None
index 37bfaf5c31f81a9a841fc8613b4d977e94d1086c..744f0fdcfc242f0331a0910923e938e406134dee 100644 (file)
@@ -5,7 +5,7 @@ from os.path import join as os_path_join
 from teuthology.orchestra.run import CommandFailedError, Raw
 
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
-from tasks.cephfs.filesystem import FileLayout
+from tasks.cephfs.filesystem import FileLayout, FSMissing
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.caps_helper import CapsHelper
 
@@ -576,3 +576,29 @@ class TestSubCmdFsAuthorize(CapsHelper):
         mounts = (self.mount_a, )
 
         return filepaths, filedata, mounts, keyring
+
+class TestAdminCommandIdempotency(CephFSTestCase):
+    """
+    Tests for administration command idempotency.
+    """
+
+    CLIENTS_REQUIRED = 0
+    MDSS_REQUIRED = 1
+
+    def test_rm_idempotency(self):
+        """
+        That a removing a fs twice is idempotent.
+        """
+
+        data_pools = self.fs.get_data_pool_names(refresh=True)
+        self.fs.fail()
+        self.fs.rm()
+        try:
+            self.fs.get_mds_map()
+        except FSMissing:
+            pass
+        else:
+            self.fail("get_mds_map should raise")
+        p = self.fs.rm()
+        self.assertIn("does not exist", p.stderr.getvalue())
+        self.fs.remove_pools(data_pools)