]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa/tasks/cephfs: add mount_wait() support to simplify the code
authorXiubo Li <xiubli@redhat.com>
Fri, 3 Apr 2020 09:26:22 +0000 (05:26 -0400)
committerXiubo Li <xiubli@redhat.com>
Tue, 14 Apr 2020 11:47:04 +0000 (07:47 -0400)
Mostly we should wait the mountpoint to get ready, especially for
the fuse mountpoint, sometimes it may take a few seconds to get
ready.

Fixes: https://tracker.ceph.com/issues/44044
Signed-off-by: Xiubo Li <xiubli@redhat.com>
21 files changed:
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_auto_repair.py
qa/tasks/cephfs/test_client_limits.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_damage.py
qa/tasks/cephfs/test_data_scan.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_flush.py
qa/tasks/cephfs/test_forward_scrub.py
qa/tasks/cephfs/test_journal_repair.py
qa/tasks/cephfs/test_misc.py
qa/tasks/cephfs/test_pool_perm.py
qa/tasks/cephfs/test_readahead.py
qa/tasks/cephfs/test_recovery_pool.py
qa/tasks/cephfs/test_scrub_checks.py
qa/tasks/cephfs/test_sessionmap.py
qa/tasks/cephfs/test_snapshots.py
qa/tasks/cephfs/test_strays.py
qa/tasks/cephfs/test_volume_client.py
qa/tasks/cephfs/test_volumes.py

index d18a081981042c28f01ace420e8abcfdd6814b23..5c78bef808eca079e3f307b9b4eb535c5275baaa 100644 (file)
@@ -142,8 +142,7 @@ class CephFSTestCase(CephTestCase):
 
             # Mount the requested number of clients
             for i in range(0, self.CLIENTS_REQUIRED):
-                self.mounts[i].mount()
-                self.mounts[i].wait_until_mounted()
+                self.mounts[i].mount_wait()
 
         if self.REQUIRE_RECOVERY_FILESYSTEM:
             if not self.REQUIRE_FILESYSTEM:
index 3ebcd6fc2d704641d92425bab677a8e73a909e8d..dfea9cf154f37d7296d4b261765d7f7d124481ea 100644 (file)
@@ -415,6 +415,11 @@ class CephFSMount(object):
     def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
         raise NotImplementedError()
 
+    def mount_wait(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
+        self.mount(mount_path=mount_path, mount_fs_name=mount_fs_name, mountpoint=mountpoint,
+                   mount_options=mount_options)
+        self.wait_until_mounted()
+
     def umount(self):
         raise NotImplementedError()
 
index c0aa2e4c70fae8e085b28a900cd635e8beb86249..141be9883d02225d2c497338ff787c9abbdc2a24 100644 (file)
@@ -44,8 +44,7 @@ class TestMDSAutoRepair(CephFSTestCase):
         self.fs.rados(["rmxattr", dir_objname, "parent"])
 
         # readdir (fetch dirfrag) should fix testdir1's backtrace
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["ls", "testdir1"])
 
         # flush journal entries to dirfrag objects
index f23c00b2a7bc77c4d367dfeabadbf1184ba35ecc..8491c3d7d888207ae6e1d2f084605e6c66ea1af1 100644 (file)
@@ -122,8 +122,7 @@ class TestClientLimits(CephFSTestCase):
 
         self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true')
         self.mount_a.teardown()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         mount_a_client_id = self.mount_a.get_global_id()
 
         # Client A creates a file.  He will hold the write caps on the file, and later (simulated bug) fail
@@ -164,8 +163,7 @@ class TestClientLimits(CephFSTestCase):
 
         self.set_conf('client', 'client inject fixed oldest tid', 'true')
         self.mount_a.teardown()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         self.fs.mds_asok(['config', 'set', 'mds_max_completed_requests', '{0}'.format(max_requests)])
 
@@ -195,8 +193,7 @@ class TestClientLimits(CephFSTestCase):
             self.mount_a.run_shell(["mkdir", "subdir"])
             self.mount_a.umount_wait()
             self.set_conf('client', 'client mountpoint', '/subdir')
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
             root_ino = self.mount_a.path_to_ino(".")
             self.assertEqual(root_ino, 1);
 
index d28388028fda612802ae420b820e9ffbef750061..d1f9e245153c0de9796685a52e114eca498cf122 100644 (file)
@@ -103,8 +103,7 @@ class TestClientRecovery(CephFSTestCase):
 
         self.mount_b.check_files()
 
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # Check that the admin socket interface is correctly reporting
         # two sessions
@@ -169,8 +168,7 @@ class TestClientRecovery(CephFSTestCase):
 
         # Check that the client that timed out during reconnect can
         # mount again and do I/O
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.mount_a.create_destroy()
 
         self.assert_session_count(2)
@@ -212,8 +210,7 @@ class TestClientRecovery(CephFSTestCase):
         self.mount_a.kill_cleanup()
 
         # Bring the client back
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.mount_a.create_destroy()
 
     def _test_stale_caps(self, write):
@@ -226,8 +223,7 @@ class TestClientRecovery(CephFSTestCase):
         else:
             self.mount_a.run_shell(["touch", "background_file"])
             self.mount_a.umount_wait()
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
             cap_holder = self.mount_a.open_background(write=False)
 
         self.assert_session_count(2)
@@ -438,8 +434,7 @@ class TestClientRecovery(CephFSTestCase):
             self.mount_a.kill_cleanup()
 
         # Bring the client back
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
     def test_dir_fsync(self):
         self._test_fsync(True);
@@ -499,8 +494,7 @@ class TestClientRecovery(CephFSTestCase):
         log.info("Reached active...")
 
         # Is the child dentry visible from mount B?
-        self.mount_b.mount()
-        self.mount_b.wait_until_mounted()
+        self.mount_b.mount_wait()
         self.mount_b.run_shell(["ls", "subdir/childfile"])
 
     def test_unmount_for_evicted_client(self):
index d03e027e7b61c532db51fa3fd772b65478d4b752..9a79392191b600cc5f50b6567801b6e904c08604 100644 (file)
@@ -305,8 +305,7 @@ class TestDamage(CephFSTestCase):
                 log.info("Daemons came up after mutation '{0}', proceeding to ls".format(mutation.desc))
 
             # MDS is up, should go damaged on ls or client mount
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
             if mutation.ls_path == ".":
                 proc = self.mount_a.run_shell(["ls", "-R", mutation.ls_path], wait=False)
             else:
@@ -401,8 +400,7 @@ class TestDamage(CephFSTestCase):
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         dentries = self.mount_a.ls("subdir/")
 
         # The damaged guy should have disappeared
@@ -461,8 +459,7 @@ class TestDamage(CephFSTestCase):
         self.assertEqual(scrub_json["raw_stats"]["passed"], False)
 
         # Check that the file count is now correct
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files")
         self.assertEqual(nfiles, "1")
 
@@ -505,7 +502,7 @@ class TestDamage(CephFSTestCase):
         self.mds_cluster.mds_fail_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # Case 1: un-decodeable backtrace
 
index dcc48a2a2ff6047844b78ca6a5c4c839db8a6140..e83bca6c767c452854beb8a509a6efe3577feb52 100644 (file)
@@ -381,8 +381,7 @@ class TestDataScan(CephFSTestCase):
         log.info(str(self.mds_cluster.status()))
 
         # Mount a client
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # See that the files are present and correct
         errors = workload.validate()
@@ -471,8 +470,7 @@ class TestDataScan(CephFSTestCase):
         # Start filesystem back up, observe that the file appears to be gone in an `ls`
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         files = self.mount_a.run_shell(["ls", "subdir/"]).stdout.getvalue().strip().split("\n")
         self.assertListEqual(sorted(files), sorted(list(set(file_names) - set([victim_dentry]))))
 
@@ -491,8 +489,7 @@ class TestDataScan(CephFSTestCase):
         # and points to the correct file data.
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip()
         self.assertEqual(out, victim_dentry)
 
@@ -594,8 +591,7 @@ class TestDataScan(CephFSTestCase):
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # link count was adjusted?
         file1_nlink = self.mount_a.path_to_nlink("testdir1/file1")
index c9eb8fbb68853976cf6c501134bf694986affdbd..ec21e61614a8ca43ef71b396f5384f8d5fca64aa 100644 (file)
@@ -414,8 +414,7 @@ class TestFailover(CephFSTestCase):
         self.mounts[0].umount_wait()
 
         # Control: that we can mount and unmount usually, while the cluster is healthy
-        self.mounts[0].mount()
-        self.mounts[0].wait_until_mounted()
+        self.mounts[0].mount_wait()
         self.mounts[0].umount_wait()
 
         # Stop the daemon processes
@@ -432,7 +431,7 @@ class TestFailover(CephFSTestCase):
 
         self.wait_until_true(laggy, grace * 2)
         with self.assertRaises(CommandFailedError):
-            self.mounts[0].mount()
+            self.mounts[0].mount_wait()
 
     def test_standby_count_wanted(self):
         """
index ee0b1c92b1992e64bae7cfff36a31f205dad890a..17cb849700eb3c7f88fa31a38757b41f16246eca 100644 (file)
@@ -88,8 +88,7 @@ class TestFlush(CephFSTestCase):
         initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued']
 
         # Use a client to delete a file
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["rm", "-rf", "mydir"])
 
         # Flush the journal so that the directory inode can be purged
index cc861b385a40cf2ea38ebcb307639fd67f08954e..7ed8564d22887bed10b536ec1bfbbda8c69b5d63 100644 (file)
@@ -136,7 +136,7 @@ class TestForwardScrub(CephFSTestCase):
         # Create a new inode that's just in the log, i.e. would
         # look orphaned to backward scan if backward scan wisnae
         # respectin' tha scrub_tag xattr.
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["mkdir", "parent/unflushed"])
         self.mount_a.run_shell(["dd", "if=/dev/urandom",
                                 "of=./parent/unflushed/jfile",
@@ -159,7 +159,7 @@ class TestForwardScrub(CephFSTestCase):
         self.fs.wait_for_daemons()
 
         # See that the orphaned file is indeed missing from a client's POV
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         damaged_state = self._get_paths_to_ino()
         self.assertNotIn("./parent/flushed/bravo", damaged_state)
         self.mount_a.umount_wait()
@@ -196,7 +196,7 @@ class TestForwardScrub(CephFSTestCase):
         # and no lost+found, and no extra inodes!
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self._validate_linkage(inos)
 
     def _stash_inotable(self):
@@ -222,7 +222,7 @@ class TestForwardScrub(CephFSTestCase):
 
         inotable_copy = self._stash_inotable()
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         self.mount_a.write_n_mb("file2_sixmegs", 6)
         self.mount_a.write_n_mb("file3_sixmegs", 6)
index a52455d7100072895e23b18c3ac1018d24e62c62..61037b96d7320d3f4d48d67e2cfbf10bfdb8c324 100644 (file)
@@ -92,8 +92,7 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.wait_for_daemons()
 
         # List files
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # First ls -R to populate MDCache, such that hardlinks will
         # resolve properly (recover_dentries does not create backtraces,
@@ -102,8 +101,7 @@ class TestJournalRepair(CephFSTestCase):
         # FIXME: hook in forward scrub here to regenerate backtraces
         proc = self.mount_a.run_shell(['ls', '-R'])
         self.mount_a.umount_wait()  # remount to clear client cache before our second ls
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         proc = self.mount_a.run_shell(['ls', '-R'])
         self.assertEqual(proc.stdout.getvalue().strip(),
@@ -278,7 +276,7 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_fail_restart(active_mds_names[0])
         self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30,
                               reject_fn=lambda v: len(v) > 1)
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["ls", "-R"], wait=True)
 
     def test_table_tool(self):
@@ -434,7 +432,7 @@ class TestJournalRepair(CephFSTestCase):
         self.fs.mds_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # trivial sync moutn a
         workunit(self.ctx, {
index 67e1498499553623df67e7bb1dc9de3fb53a3690..44d9ee420bedc5091edd09b8fbde2613bdcc2b35 100644 (file)
@@ -25,8 +25,7 @@ class TestMisc(CephFSTestCase):
         # on lookup/open
         self.mount_b.umount_wait()
         self.set_conf('client', 'client debug getattr caps', 'true')
-        self.mount_b.mount()
-        self.mount_b.wait_until_mounted()
+        self.mount_b.mount_wait()
 
         # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_*
         # to mount_a
index a1f234a22538bfae7086e5a0ea8eb36d7846fcd7..9912debedee3a1cb87daa823fcfa7480758e76d0 100644 (file)
@@ -35,8 +35,7 @@ class TestPoolPerm(CephFSTestCase):
             'allow r pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # write should fail
         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
@@ -47,8 +46,7 @@ class TestPoolPerm(CephFSTestCase):
             'allow w pool={0}'.format(self.fs.get_data_pool_name()))
 
         self.mount_a.umount_wait()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # read should fail
         self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
@@ -77,8 +75,7 @@ class TestPoolPerm(CephFSTestCase):
             ))
 
         self.mount_a.umount_wait()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         with self.assertRaises(CommandFailedError):
             self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
@@ -96,8 +93,7 @@ class TestPoolPerm(CephFSTestCase):
                 self.fs.get_data_pool_names()[0],
                 self.fs.get_data_pool_names()[1],
             ))
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool",
                               new_pool_name)
         self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool",
index 31e7bf187546a604e5359776c595ab6a7f820192..e936a94b9aa7ce78095b3db94432a12206dae611 100644 (file)
@@ -15,8 +15,7 @@ class TestReadahead(CephFSTestCase):
 
         # Unmount and remount the client to flush cache
         self.mount_a.umount_wait()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         initial_op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r']
         self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"])
index 36b4e58ec8c17f985e5824575abe5fb04e4843b7..0b3dc56bff933df86e39592481adcc7b57634ac9 100644 (file)
@@ -186,10 +186,8 @@ class TestRecoveryPool(CephFSTestCase):
         log.info(str(self.mds_cluster.status()))
 
         # Mount a client
-        self.mount_a.mount()
-        self.mount_b.mount(mount_fs_name=recovery_fs)
-        self.mount_a.wait_until_mounted()
-        self.mount_b.wait_until_mounted()
+        self.mount_a.mount_wait()
+        self.mount_b.mount_wait(mount_fs_name=recovery_fs)
 
         # See that the files are present and correct
         errors = workload.validate()
index 012b6c009fda2884a278074bdac9dc8e3432359f..b6ca0b89802abe903b822593065e3f19522c6d98 100644 (file)
@@ -298,8 +298,7 @@ class TestScrubChecks(CephFSTestCase):
         self.fs.mds_fail_restart()
         self.fs.wait_for_daemons()
 
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # fragstat indicates the directory is not empty, rmdir should fail
         with self.assertRaises(CommandFailedError) as ar:
index a4642de0b679e2de2ba92cf39e249ebaf12540d0..bdcde71d095eb571b8760abce07d75aa80378fb6 100644 (file)
@@ -62,8 +62,7 @@ class TestSessionMap(CephFSTestCase):
 
         status = self.fs.status()
         s = self._get_connection_count(status=status)
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
         self.assertGreater(self._get_connection_count(status=status), s)
         self.mount_a.umount_wait()
         e = self._get_connection_count(status=status)
@@ -85,8 +84,8 @@ class TestSessionMap(CephFSTestCase):
         status = self.fs.wait_for_daemons()
 
         # Bring the clients back
-        self.mount_a.mount()
-        self.mount_b.mount()
+        self.mount_a.mount_wait()
+        self.mount_b.mount_wait()
 
         # See that they've got sessions
         self.assert_session_count(2, mds_id=self.fs.get_rank(status=status)['name'])
@@ -178,8 +177,7 @@ class TestSessionMap(CephFSTestCase):
         # Configure a client that is limited to /foo/bar
         self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar")
         # Check he can mount that dir and do IO
-        self.mount_b.mount(mount_path="/foo/bar")
-        self.mount_b.wait_until_mounted()
+        self.mount_b.mount_wait(mount_path="/foo/bar")
         self.mount_b.create_destroy()
         self.mount_b.umount_wait()
 
@@ -225,5 +223,4 @@ class TestSessionMap(CephFSTestCase):
         self.assert_session_count(1, mds_id=self.fs.get_rank(rank=1, status=status)['name'])
 
         self.mount_a.kill_cleanup()
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
index a1dcc23d340228768dee711e3757320d5acc1202..7cd91ce611f72f3e7c17ee0fc567942096f1f2e7 100644 (file)
@@ -131,8 +131,7 @@ class TestSnapshots(CephFSTestCase):
                 else:
                     self.assertGreater(self._get_last_created_snap(rank=0), last_created)
 
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
 
         self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
 
@@ -173,8 +172,7 @@ class TestSnapshots(CephFSTestCase):
                 else:
                     self.assertGreater(self._get_last_created_snap(rank=0), last_created)
 
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
 
         self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")])
 
@@ -216,8 +214,7 @@ class TestSnapshots(CephFSTestCase):
         self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30)
         self.assertEqual(self._get_last_created_snap(rank=0), last_created)
 
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
     def test_snapclient_cache(self):
         """
@@ -345,8 +342,7 @@ class TestSnapshots(CephFSTestCase):
             self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"])
             self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache));
 
-            self.mount_a.mount()
-            self.mount_a.wait_until_mounted()
+            self.mount_a.mount_wait()
 
         self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")])
 
index 51732c195d96b20c5f06798366b3af9ac72bf044..a5058441e7cadbd92cdc65443c8d53d6eb5ed137 100644 (file)
@@ -373,7 +373,7 @@ class TestStrays(CephFSTestCase):
         self.fs.mds_asok(['flush', 'journal'])
         self.fs.mds_fail_restart()
         self.fs.wait_for_daemons()
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # Unlink file_a
         self.mount_a.run_shell(["rm", "-f", "dir_1/file_a"])
@@ -628,7 +628,7 @@ class TestStrays(CephFSTestCase):
         rank_0_id = active_mds_names[0]
         rank_1_id = active_mds_names[1]
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         self.mount_a.run_shell(["rm", "-f", "dir_1/original"])
         self.mount_a.umount_wait()
@@ -772,7 +772,7 @@ class TestStrays(CephFSTestCase):
         # zero, but there's actually still a stray, so at the very
         # least the StrayManager stats code is slightly off
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # See that the data from the snapshotted revision of the file is still present
         # and correct
@@ -873,8 +873,7 @@ class TestStrays(CephFSTestCase):
         # remount+flush (release client caps)
         self.mount_a.umount_wait()
         self.fs.mds_asok(["flush", "journal"], mds_id)
-        self.mount_a.mount()
-        self.mount_a.wait_until_mounted()
+        self.mount_a.mount_wait()
 
         # Create 50% more files than the current fragment limit
         self.mount_a.run_python(dedent("""
index 470e63d09fa1710e18bdd4eb88d39d993f8fad8c..b0b1b2e0fd44ee0dc014e8f502969b6896a82f43 100644 (file)
@@ -390,13 +390,13 @@ vc.disconnect()
             m.umount_wait()
 
         # Create a dir on mount A
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.run_shell(["mkdir", "parent1"])
         self.mount_a.run_shell(["mkdir", "parent2"])
         self.mount_a.run_shell(["mkdir", "parent1/mydir"])
 
         # Put some files in it from mount B
-        self.mount_b.mount()
+        self.mount_b.mount_wait()
         self.mount_b.run_shell(["touch", "parent1/mydir/afile"])
         self.mount_b.umount_wait()
 
index 6f5bdec237a3ccf154499ed16b96af2fc309454e..794c15cedb5c6128c0af52e6860d0490561efa3a 100644 (file)
@@ -1280,7 +1280,7 @@ class TestVolumes(CephFSTestCase):
         for subvolume in subvolumes:
             self._fs_cmd("subvolume", "rm", self.volname, subvolume)
 
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
 
         # verify trash dir is clean
         self._wait_for_trash_empty(timeout=300)