From bc25bd70f2a357046b300135c1b32f1cf120daa5 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Mon, 5 Oct 2020 10:31:23 -0700 Subject: [PATCH] qa: unmount all clients before deleting the file system Otherwise we have unnecessary timeout waits. Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_admin.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py index 5708f7671bc..f322fb4f8cc 100644 --- a/qa/tasks/cephfs/test_admin.py +++ b/qa/tasks/cephfs/test_admin.py @@ -121,6 +121,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system warns/fails with an EC default data pool. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec" self._setup_ec_pools(n) @@ -139,6 +140,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system succeeds with an EC default data pool with --force. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_force" self._setup_ec_pools(n) @@ -149,6 +151,7 @@ class TestAdminCommands(CephFSTestCase): That a new file system fails with an EC default data pool without overwrite. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() n = "test_new_default_ec_no_overwrite" self._setup_ec_pools(n, overwrites=False) @@ -176,6 +179,7 @@ class TestAdminCommands(CephFSTestCase): """ That the application metadata set on the pools of a newly created filesystem are as expected. """ + self.mount_a.umount_wait(require_clean=True) self.mds_cluster.delete_all_filesystems() fs_name = "test_fs_new_pool_application" keys = ['metadata', 'data'] -- 2.39.5