From 780a1dd5ff14b862b50f136c67d919a953cd5313 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Fri, 8 Jan 2021 12:23:21 -0800 Subject: [PATCH] qa: add test for fs rm idempotency Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/filesystem.py | 41 ++++++++++++++++++++--------------- qa/tasks/cephfs/test_admin.py | 28 +++++++++++++++++++++++- 2 files changed, 51 insertions(+), 18 deletions(-) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 3b195afed0f..226eb02d192 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -680,27 +680,19 @@ class Filesystem(MDSCluster): m.run_shell_payload(cmd) m.umount_wait(require_clean=True) - def destroy(self, reset_obj_attrs=True): - log.info(f'Destroying file system {self.name} and related pools') + def _remove_pool(self, name, **kwargs): + c = f'osd pool rm {name} {name} --yes-i-really-really-mean-it' + return self.mon_manager.ceph(c, **kwargs) - if self.dead(): - log.debug('already dead...') - return + def rm(self, **kwargs): + c = f'fs rm {self.name} --yes-i-really-mean-it' + return self.mon_manager.ceph(c, **kwargs) - data_pools = self.get_data_pool_names(refresh=True) - - # make sure no MDSs are attached to given FS. - self.mon_manager.raw_cluster_cmd('fs', 'fail', self.name) - self.mon_manager.raw_cluster_cmd( - 'fs', 'rm', self.name, '--yes-i-really-mean-it') - - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', - self.get_metadata_pool_name(), self.get_metadata_pool_name(), - '--yes-i-really-really-mean-it') + def remove_pools(self, data_pools): + self._remove_pool(self.get_metadata_pool_name()) for poolname in data_pools: try: - self.mon_manager.raw_cluster_cmd('osd', 'pool', 'rm', poolname, - poolname, '--yes-i-really-really-mean-it') + self._remove_pool(poolname) except CommandFailedError as e: # EBUSY, this data pool is used by two metadata pools, let the # 2nd pass delete it @@ -709,6 +701,21 @@ class Filesystem(MDSCluster): else: raise + def destroy(self, reset_obj_attrs=True): + log.info(f'Destroying file system {self.name} and related pools') + + if self.dead(): + log.debug('already dead...') + return + + data_pools = self.get_data_pool_names(refresh=True) + + # make sure no MDSs are attached to given FS. + self.fail() + self.rm() + + self.remove_pools(data_pools) + if reset_obj_attrs: self.id = None self.name = None diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 37bfaf5c31f..744f0fdcfc2 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -5,7 +5,7 @@ from os.path import join as os_path_join from teuthology.orchestra.run import CommandFailedError, Raw from tasks.cephfs.cephfs_test_case import CephFSTestCase -from tasks.cephfs.filesystem import FileLayout +from tasks.cephfs.filesystem import FileLayout, FSMissing from tasks.cephfs.fuse_mount import FuseMount from tasks.cephfs.caps_helper import CapsHelper @@ -576,3 +576,29 @@ class TestSubCmdFsAuthorize(CapsHelper): mounts = (self.mount_a, ) return filepaths, filedata, mounts, keyring + +class TestAdminCommandIdempotency(CephFSTestCase): + """ + Tests for administration command idempotency. + """ + + CLIENTS_REQUIRED = 0 + MDSS_REQUIRED = 1 + + def test_rm_idempotency(self): + """ + That a removing a fs twice is idempotent. + """ + + data_pools = self.fs.get_data_pool_names(refresh=True) + self.fs.fail() + self.fs.rm() + try: + self.fs.get_mds_map() + except FSMissing: + pass + else: + self.fail("get_mds_map should raise") + p = self.fs.rm() + self.assertIn("does not exist", p.stderr.getvalue()) + self.fs.remove_pools(data_pools) -- 2.47.3