From 5ec2060f7566e3ef0cfcd7dc3e9274a6600519e4 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Fri, 30 Oct 2020 14:40:25 -0700 Subject: [PATCH] qa: unmount volumes before removal To avoid potential failures/hangs in umount. Fixes: https://tracker.ceph.com/issues/23718 Signed-off-by: Patrick Donnelly (cherry picked from commit 4ca8aaafa32bb7397eec80a8eb366d2dd781a72f) --- qa/tasks/cephfs/test_volumes.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 7984cea9205c..344907d1316d 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -308,6 +308,8 @@ class TestVolumes(CephFSTestCase): That the volume can only be removed when --yes-i-really-mean-it is used and verify that the deleted volume is not listed anymore. """ + for m in self.mounts: + m.umount_wait() try: self._fs_cmd("volume", "rm", self.volname) except CommandFailedError as ce: @@ -330,6 +332,8 @@ class TestVolumes(CephFSTestCase): That the arbitrary pool added to the volume out of band is removed successfully on volume removal. """ + for m in self.mounts: + m.umount_wait() new_pool = "new_pool" # add arbitrary data pool self.fs.add_data_pool(new_pool) @@ -351,6 +355,8 @@ class TestVolumes(CephFSTestCase): That the volume can only be removed when mon_allowd_pool_delete is set to true and verify that the pools are removed after volume deletion. """ + for m in self.mounts: + m.umount_wait() self.config_set('mon', 'mon_allow_pool_delete', False) try: self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") -- 2.47.3