]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/cephfs: kill mount if it gets evicted by mds 17694/head
authorYan, Zheng <zyan@redhat.com>
Wed, 13 Sep 2017 08:34:40 +0000 (16:34 +0800)
committerYan, Zheng <zyan@redhat.com>
Wed, 13 Sep 2017 13:30:51 +0000 (21:30 +0800)
otherwise, teardown() hange at umount

Fixes: http://tracker.ceph.com/issues/21275
Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
qa/tasks/cephfs/test_client_recovery.py

index 4940ab89b7cb2185a603b06f26d5b61315ecd050..fd58c1427338e1cf1f234ec55361e33b56782730 100644 (file)
@@ -395,14 +395,21 @@ class TestClientRecovery(CephFSTestCase):
         self.assertFalse(lock_holder.finished)
         self.assertFalse(lock_taker.finished)
 
-        mount_a_client_id = self.mount_a.get_global_id()
-        self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
+        try:
+            mount_a_client_id = self.mount_a.get_global_id()
+            self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id])
 
-        # Evicting mount_a should let mount_b's attepmt to take the lock
-        # suceed
-        self.wait_until_true(
-            lambda: lock_taker.finished,
-            timeout=10)
+            # Evicting mount_a should let mount_b's attempt to take the lock
+            # succeed
+            self.wait_until_true(lambda: lock_taker.finished, timeout=10)
+        finally:
+            # teardown() doesn't quite handle this case cleanly, so help it out
+            self.mount_a.kill()
+            self.mount_a.kill_cleanup()
+
+        # Bring the client back
+        self.mount_a.mount()
+        self.mount_a.wait_until_mounted()
 
     def test_dir_fsync(self):
        self._test_fsync(True);