From 5c24d91327f598ea9fee59ea87ddc6582d48bd96 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Fri, 3 Apr 2020 05:26:22 -0400 Subject: [PATCH] qa/tasks/cephfs: add mount_wait() support to simplify the code Mostly we should wait the mountpoint to get ready, especially for the fuse mountpoint, sometimes it may take a few seconds to get ready. Fixes: https://tracker.ceph.com/issues/44044 Signed-off-by: Xiubo Li --- qa/tasks/cephfs/cephfs_test_case.py | 3 +-- qa/tasks/cephfs/mount.py | 5 +++++ qa/tasks/cephfs/test_auto_repair.py | 3 +-- qa/tasks/cephfs/test_client_limits.py | 9 +++------ qa/tasks/cephfs/test_client_recovery.py | 18 ++++++------------ qa/tasks/cephfs/test_damage.py | 11 ++++------- qa/tasks/cephfs/test_data_scan.py | 12 ++++-------- qa/tasks/cephfs/test_failover.py | 5 ++--- qa/tasks/cephfs/test_flush.py | 3 +-- qa/tasks/cephfs/test_forward_scrub.py | 8 ++++---- qa/tasks/cephfs/test_journal_repair.py | 10 ++++------ qa/tasks/cephfs/test_misc.py | 3 +-- qa/tasks/cephfs/test_pool_perm.py | 12 ++++-------- qa/tasks/cephfs/test_readahead.py | 3 +-- qa/tasks/cephfs/test_recovery_pool.py | 6 ++---- qa/tasks/cephfs/test_scrub_checks.py | 3 +-- qa/tasks/cephfs/test_sessionmap.py | 13 +++++-------- qa/tasks/cephfs/test_snapshots.py | 12 ++++-------- qa/tasks/cephfs/test_strays.py | 9 ++++----- qa/tasks/cephfs/test_volume_client.py | 4 ++-- qa/tasks/cephfs/test_volumes.py | 2 +- 21 files changed, 60 insertions(+), 94 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index d18a0819810..5c78bef808e 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -142,8 +142,7 @@ class CephFSTestCase(CephTestCase): # Mount the requested number of clients for i in range(0, self.CLIENTS_REQUIRED): - self.mounts[i].mount() - self.mounts[i].wait_until_mounted() + self.mounts[i].mount_wait() if self.REQUIRE_RECOVERY_FILESYSTEM: if not self.REQUIRE_FILESYSTEM: diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py index 3ebcd6fc2d7..dfea9cf154f 100644 --- a/qa/tasks/cephfs/mount.py +++ b/qa/tasks/cephfs/mount.py @@ -415,6 +415,11 @@ class CephFSMount(object): def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): raise NotImplementedError() + def mount_wait(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): + self.mount(mount_path=mount_path, mount_fs_name=mount_fs_name, mountpoint=mountpoint, + mount_options=mount_options) + self.wait_until_mounted() + def umount(self): raise NotImplementedError() diff --git a/qa/tasks/cephfs/test_auto_repair.py b/qa/tasks/cephfs/test_auto_repair.py index c0aa2e4c70f..141be9883d0 100644 --- a/qa/tasks/cephfs/test_auto_repair.py +++ b/qa/tasks/cephfs/test_auto_repair.py @@ -44,8 +44,7 @@ class TestMDSAutoRepair(CephFSTestCase): self.fs.rados(["rmxattr", dir_objname, "parent"]) # readdir (fetch dirfrag) should fix testdir1's backtrace - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.run_shell(["ls", "testdir1"]) # flush journal entries to dirfrag objects diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index f23c00b2a7b..8491c3d7d88 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -122,8 +122,7 @@ class TestClientLimits(CephFSTestCase): self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true') self.mount_a.teardown() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() mount_a_client_id = self.mount_a.get_global_id() # Client A creates a file. He will hold the write caps on the file, and later (simulated bug) fail @@ -164,8 +163,7 @@ class TestClientLimits(CephFSTestCase): self.set_conf('client', 'client inject fixed oldest tid', 'true') self.mount_a.teardown() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.fs.mds_asok(['config', 'set', 'mds_max_completed_requests', '{0}'.format(max_requests)]) @@ -195,8 +193,7 @@ class TestClientLimits(CephFSTestCase): self.mount_a.run_shell(["mkdir", "subdir"]) self.mount_a.umount_wait() self.set_conf('client', 'client mountpoint', '/subdir') - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() root_ino = self.mount_a.path_to_ino(".") self.assertEqual(root_ino, 1); diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index d28388028fd..d1f9e245153 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -103,8 +103,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_b.check_files() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # Check that the admin socket interface is correctly reporting # two sessions @@ -169,8 +168,7 @@ class TestClientRecovery(CephFSTestCase): # Check that the client that timed out during reconnect can # mount again and do I/O - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.create_destroy() self.assert_session_count(2) @@ -212,8 +210,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.kill_cleanup() # Bring the client back - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.create_destroy() def _test_stale_caps(self, write): @@ -226,8 +223,7 @@ class TestClientRecovery(CephFSTestCase): else: self.mount_a.run_shell(["touch", "background_file"]) self.mount_a.umount_wait() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() cap_holder = self.mount_a.open_background(write=False) self.assert_session_count(2) @@ -438,8 +434,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.kill_cleanup() # Bring the client back - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() def test_dir_fsync(self): self._test_fsync(True); @@ -499,8 +494,7 @@ class TestClientRecovery(CephFSTestCase): log.info("Reached active...") # Is the child dentry visible from mount B? - self.mount_b.mount() - self.mount_b.wait_until_mounted() + self.mount_b.mount_wait() self.mount_b.run_shell(["ls", "subdir/childfile"]) def test_unmount_for_evicted_client(self): diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py index d03e027e7b6..9a79392191b 100644 --- a/qa/tasks/cephfs/test_damage.py +++ b/qa/tasks/cephfs/test_damage.py @@ -305,8 +305,7 @@ class TestDamage(CephFSTestCase): log.info("Daemons came up after mutation '{0}', proceeding to ls".format(mutation.desc)) # MDS is up, should go damaged on ls or client mount - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() if mutation.ls_path == ".": proc = self.mount_a.run_shell(["ls", "-R", mutation.ls_path], wait=False) else: @@ -401,8 +400,7 @@ class TestDamage(CephFSTestCase): self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() dentries = self.mount_a.ls("subdir/") # The damaged guy should have disappeared @@ -461,8 +459,7 @@ class TestDamage(CephFSTestCase): self.assertEqual(scrub_json["raw_stats"]["passed"], False) # Check that the file count is now correct - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files") self.assertEqual(nfiles, "1") @@ -505,7 +502,7 @@ class TestDamage(CephFSTestCase): self.mds_cluster.mds_fail_restart() self.fs.wait_for_daemons() - self.mount_a.mount() + self.mount_a.mount_wait() # Case 1: un-decodeable backtrace diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py index dcc48a2a2ff..e83bca6c767 100644 --- a/qa/tasks/cephfs/test_data_scan.py +++ b/qa/tasks/cephfs/test_data_scan.py @@ -381,8 +381,7 @@ class TestDataScan(CephFSTestCase): log.info(str(self.mds_cluster.status())) # Mount a client - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # See that the files are present and correct errors = workload.validate() @@ -471,8 +470,7 @@ class TestDataScan(CephFSTestCase): # Start filesystem back up, observe that the file appears to be gone in an `ls` self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() files = self.mount_a.run_shell(["ls", "subdir/"]).stdout.getvalue().strip().split("\n") self.assertListEqual(sorted(files), sorted(list(set(file_names) - set([victim_dentry])))) @@ -491,8 +489,7 @@ class TestDataScan(CephFSTestCase): # and points to the correct file data. self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip() self.assertEqual(out, victim_dentry) @@ -594,8 +591,7 @@ class TestDataScan(CephFSTestCase): self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # link count was adjusted? file1_nlink = self.mount_a.path_to_nlink("testdir1/file1") diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index c9eb8fbb688..ec21e61614a 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -414,8 +414,7 @@ class TestFailover(CephFSTestCase): self.mounts[0].umount_wait() # Control: that we can mount and unmount usually, while the cluster is healthy - self.mounts[0].mount() - self.mounts[0].wait_until_mounted() + self.mounts[0].mount_wait() self.mounts[0].umount_wait() # Stop the daemon processes @@ -432,7 +431,7 @@ class TestFailover(CephFSTestCase): self.wait_until_true(laggy, grace * 2) with self.assertRaises(CommandFailedError): - self.mounts[0].mount() + self.mounts[0].mount_wait() def test_standby_count_wanted(self): """ diff --git a/qa/tasks/cephfs/test_flush.py b/qa/tasks/cephfs/test_flush.py index ee0b1c92b19..17cb849700e 100644 --- a/qa/tasks/cephfs/test_flush.py +++ b/qa/tasks/cephfs/test_flush.py @@ -88,8 +88,7 @@ class TestFlush(CephFSTestCase): initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] # Use a client to delete a file - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.run_shell(["rm", "-rf", "mydir"]) # Flush the journal so that the directory inode can be purged diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py index cc861b385a4..7ed8564d228 100644 --- a/qa/tasks/cephfs/test_forward_scrub.py +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -136,7 +136,7 @@ class TestForwardScrub(CephFSTestCase): # Create a new inode that's just in the log, i.e. would # look orphaned to backward scan if backward scan wisnae # respectin' tha scrub_tag xattr. - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.run_shell(["mkdir", "parent/unflushed"]) self.mount_a.run_shell(["dd", "if=/dev/urandom", "of=./parent/unflushed/jfile", @@ -159,7 +159,7 @@ class TestForwardScrub(CephFSTestCase): self.fs.wait_for_daemons() # See that the orphaned file is indeed missing from a client's POV - self.mount_a.mount() + self.mount_a.mount_wait() damaged_state = self._get_paths_to_ino() self.assertNotIn("./parent/flushed/bravo", damaged_state) self.mount_a.umount_wait() @@ -196,7 +196,7 @@ class TestForwardScrub(CephFSTestCase): # and no lost+found, and no extra inodes! self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() + self.mount_a.mount_wait() self._validate_linkage(inos) def _stash_inotable(self): @@ -222,7 +222,7 @@ class TestForwardScrub(CephFSTestCase): inotable_copy = self._stash_inotable() - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.write_n_mb("file2_sixmegs", 6) self.mount_a.write_n_mb("file3_sixmegs", 6) diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py index a52455d7100..61037b96d73 100644 --- a/qa/tasks/cephfs/test_journal_repair.py +++ b/qa/tasks/cephfs/test_journal_repair.py @@ -92,8 +92,7 @@ class TestJournalRepair(CephFSTestCase): self.fs.wait_for_daemons() # List files - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # First ls -R to populate MDCache, such that hardlinks will # resolve properly (recover_dentries does not create backtraces, @@ -102,8 +101,7 @@ class TestJournalRepair(CephFSTestCase): # FIXME: hook in forward scrub here to regenerate backtraces proc = self.mount_a.run_shell(['ls', '-R']) self.mount_a.umount_wait() # remount to clear client cache before our second ls - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() proc = self.mount_a.run_shell(['ls', '-R']) self.assertEqual(proc.stdout.getvalue().strip(), @@ -278,7 +276,7 @@ class TestJournalRepair(CephFSTestCase): self.fs.mds_fail_restart(active_mds_names[0]) self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30, reject_fn=lambda v: len(v) > 1) - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.run_shell(["ls", "-R"], wait=True) def test_table_tool(self): @@ -434,7 +432,7 @@ class TestJournalRepair(CephFSTestCase): self.fs.mds_restart() self.fs.wait_for_daemons() - self.mount_a.mount() + self.mount_a.mount_wait() # trivial sync moutn a workunit(self.ctx, { diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index 67e14984995..44d9ee420be 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -25,8 +25,7 @@ class TestMisc(CephFSTestCase): # on lookup/open self.mount_b.umount_wait() self.set_conf('client', 'client debug getattr caps', 'true') - self.mount_b.mount() - self.mount_b.wait_until_mounted() + self.mount_b.mount_wait() # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_* # to mount_a diff --git a/qa/tasks/cephfs/test_pool_perm.py b/qa/tasks/cephfs/test_pool_perm.py index a1f234a2253..9912debedee 100644 --- a/qa/tasks/cephfs/test_pool_perm.py +++ b/qa/tasks/cephfs/test_pool_perm.py @@ -35,8 +35,7 @@ class TestPoolPerm(CephFSTestCase): 'allow r pool={0}'.format(self.fs.get_data_pool_name())) self.mount_a.umount_wait() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # write should fail self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False))) @@ -47,8 +46,7 @@ class TestPoolPerm(CephFSTestCase): 'allow w pool={0}'.format(self.fs.get_data_pool_name())) self.mount_a.umount_wait() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # read should fail self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True))) @@ -77,8 +75,7 @@ class TestPoolPerm(CephFSTestCase): )) self.mount_a.umount_wait() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() with self.assertRaises(CommandFailedError): self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool", @@ -96,8 +93,7 @@ class TestPoolPerm(CephFSTestCase): self.fs.get_data_pool_names()[0], self.fs.get_data_pool_names()[1], )) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool", new_pool_name) self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool", diff --git a/qa/tasks/cephfs/test_readahead.py b/qa/tasks/cephfs/test_readahead.py index 31e7bf18754..e936a94b9aa 100644 --- a/qa/tasks/cephfs/test_readahead.py +++ b/qa/tasks/cephfs/test_readahead.py @@ -15,8 +15,7 @@ class TestReadahead(CephFSTestCase): # Unmount and remount the client to flush cache self.mount_a.umount_wait() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() initial_op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r'] self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"]) diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py index 36b4e58ec8c..0b3dc56bff9 100644 --- a/qa/tasks/cephfs/test_recovery_pool.py +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -186,10 +186,8 @@ class TestRecoveryPool(CephFSTestCase): log.info(str(self.mds_cluster.status())) # Mount a client - self.mount_a.mount() - self.mount_b.mount(mount_fs_name=recovery_fs) - self.mount_a.wait_until_mounted() - self.mount_b.wait_until_mounted() + self.mount_a.mount_wait() + self.mount_b.mount_wait(mount_fs_name=recovery_fs) # See that the files are present and correct errors = workload.validate() diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py index 012b6c009fd..b6ca0b89802 100644 --- a/qa/tasks/cephfs/test_scrub_checks.py +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -298,8 +298,7 @@ class TestScrubChecks(CephFSTestCase): self.fs.mds_fail_restart() self.fs.wait_for_daemons() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # fragstat indicates the directory is not empty, rmdir should fail with self.assertRaises(CommandFailedError) as ar: diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index a4642de0b67..bdcde71d095 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -62,8 +62,7 @@ class TestSessionMap(CephFSTestCase): status = self.fs.status() s = self._get_connection_count(status=status) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.assertGreater(self._get_connection_count(status=status), s) self.mount_a.umount_wait() e = self._get_connection_count(status=status) @@ -85,8 +84,8 @@ class TestSessionMap(CephFSTestCase): status = self.fs.wait_for_daemons() # Bring the clients back - self.mount_a.mount() - self.mount_b.mount() + self.mount_a.mount_wait() + self.mount_b.mount_wait() # See that they've got sessions self.assert_session_count(2, mds_id=self.fs.get_rank(status=status)['name']) @@ -178,8 +177,7 @@ class TestSessionMap(CephFSTestCase): # Configure a client that is limited to /foo/bar self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar") # Check he can mount that dir and do IO - self.mount_b.mount(mount_path="/foo/bar") - self.mount_b.wait_until_mounted() + self.mount_b.mount_wait(mount_path="/foo/bar") self.mount_b.create_destroy() self.mount_b.umount_wait() @@ -225,5 +223,4 @@ class TestSessionMap(CephFSTestCase): self.assert_session_count(1, mds_id=self.fs.get_rank(rank=1, status=status)['name']) self.mount_a.kill_cleanup() - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py index a1dcc23d340..7cd91ce611f 100644 --- a/qa/tasks/cephfs/test_snapshots.py +++ b/qa/tasks/cephfs/test_snapshots.py @@ -131,8 +131,7 @@ class TestSnapshots(CephFSTestCase): else: self.assertGreater(self._get_last_created_snap(rank=0), last_created) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")]) @@ -173,8 +172,7 @@ class TestSnapshots(CephFSTestCase): else: self.assertGreater(self._get_last_created_snap(rank=0), last_created) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")]) @@ -216,8 +214,7 @@ class TestSnapshots(CephFSTestCase): self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) self.assertEqual(self._get_last_created_snap(rank=0), last_created) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() def test_snapclient_cache(self): """ @@ -345,8 +342,7 @@ class TestSnapshots(CephFSTestCase): self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"]) self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache)); - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")]) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 51732c195d9..a5058441e7c 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -373,7 +373,7 @@ class TestStrays(CephFSTestCase): self.fs.mds_asok(['flush', 'journal']) self.fs.mds_fail_restart() self.fs.wait_for_daemons() - self.mount_a.mount() + self.mount_a.mount_wait() # Unlink file_a self.mount_a.run_shell(["rm", "-f", "dir_1/file_a"]) @@ -628,7 +628,7 @@ class TestStrays(CephFSTestCase): rank_0_id = active_mds_names[0] rank_1_id = active_mds_names[1] - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.run_shell(["rm", "-f", "dir_1/original"]) self.mount_a.umount_wait() @@ -772,7 +772,7 @@ class TestStrays(CephFSTestCase): # zero, but there's actually still a stray, so at the very # least the StrayManager stats code is slightly off - self.mount_a.mount() + self.mount_a.mount_wait() # See that the data from the snapshotted revision of the file is still present # and correct @@ -873,8 +873,7 @@ class TestStrays(CephFSTestCase): # remount+flush (release client caps) self.mount_a.umount_wait() self.fs.mds_asok(["flush", "journal"], mds_id) - self.mount_a.mount() - self.mount_a.wait_until_mounted() + self.mount_a.mount_wait() # Create 50% more files than the current fragment limit self.mount_a.run_python(dedent(""" diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 470e63d09fa..b0b1b2e0fd4 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -390,13 +390,13 @@ vc.disconnect() m.umount_wait() # Create a dir on mount A - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.run_shell(["mkdir", "parent1"]) self.mount_a.run_shell(["mkdir", "parent2"]) self.mount_a.run_shell(["mkdir", "parent1/mydir"]) # Put some files in it from mount B - self.mount_b.mount() + self.mount_b.mount_wait() self.mount_b.run_shell(["touch", "parent1/mydir/afile"]) self.mount_b.umount_wait() diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 6f5bdec237a..794c15cedb5 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -1280,7 +1280,7 @@ class TestVolumes(CephFSTestCase): for subvolume in subvolumes: self._fs_cmd("subvolume", "rm", self.volname, subvolume) - self.mount_a.mount() + self.mount_a.mount_wait() # verify trash dir is clean self._wait_for_trash_empty(timeout=300) -- 2.39.5