From: Patrick Donnelly Date: Mon, 5 Oct 2020 17:31:23 +0000 (-0700) Subject: qa: unmount all clients before deleting the file system X-Git-Tag: v16.1.0~814^2~4 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=bc25bd70f2a357046b300135c1b32f1cf120daa5;p=ceph.git qa: unmount all clients before deleting the file system Otherwise we have unnecessary timeout waits. Signed-off-by: Patrick Donnelly --- diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 5708f7671bcd..f322fb4f8ccd 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -121,6 +121,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system warns/fails with an EC default data pool. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec" self._setup_ec_pools(n) @@ -139,6 +140,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system succeeds with an EC default data pool with --force. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_force" self._setup_ec_pools(n) @@ -149,6 +151,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system fails with an EC default data pool without overwrite. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_no_overwrite" self._setup_ec_pools(n, overwrites=False) @@ -176,6 +179,7 @@ class TestAdminCommands(CephFSTestCase): """ That the application metadata set on the pools of a newly created filesystem are as expected. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() fs_name = "test_fs_new_pool_application" keys = ['metadata', 'data']