From 9aaf6118a4f259c9450af75d7b879ae2616bf77c Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Mon, 11 Mar 2019 09:10:57 -0700 Subject: [PATCH] qa: unmount clients prior to marking fs down Evicted RHEL7.5 clients may hang. Fixes: http://tracker.ceph.com/issues/38677 Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_failover.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 63bfce6e8c2b7..f1bb0f22a5b37 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -96,6 +96,8 @@ class TestClusterResize(CephFSTestCase): That marking a FS down does not generate a health warning """ + self.mount_a.umount_wait() + self.fs.set_down() try: self.wait_for_health("", 30) @@ -111,6 +113,8 @@ class TestClusterResize(CephFSTestCase): That marking a FS down twice does not wipe old_max_mds. """ + self.mount_a.umount_wait() + self.grow(2) self.fs.set_down() self.fs.wait_for_daemons() @@ -123,6 +127,8 @@ class TestClusterResize(CephFSTestCase): That setting max_mds undoes down. """ + self.mount_a.umount_wait() + self.fs.set_down() self.fs.wait_for_daemons() self.grow(2) @@ -133,6 +139,8 @@ class TestClusterResize(CephFSTestCase): That down setting toggles and sets max_mds appropriately. """ + self.mount_a.umount_wait() + self.fs.set_down() self.fs.wait_for_daemons() self.assertEqual(self.fs.get_var("max_mds"), 0) -- 2.39.5