From 4fc4ccb7a112e16f564cbc01a3ced8d2b6be2d68 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Tue, 29 Jun 2021 14:51:43 -0700 Subject: [PATCH] qa: convert mount calls to mount_wait These tests want to immediately use the mount anyway. But the main problem is, without waiting for the mount to complete, the command: chmod 1777 /path/to/mount is not run so the mount cannot be written to by normal users without sudo. Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/filesystem.py | 2 +- qa/tasks/cephfs/test_client_recovery.py | 4 ++-- qa/tasks/cephfs/test_failover.py | 4 ++-- qa/tasks/cephfs/test_fragment.py | 2 +- qa/tasks/cephfs/test_mirroring.py | 14 +++++++------- qa/tasks/cephfs/test_volumes.py | 10 +++++----- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 34dfdc9525fbf..a84f2bbd57b17 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -666,7 +666,7 @@ class Filesystem(MDSCluster): from tasks.cephfs.fuse_mount import FuseMount d = misc.get_testdir(self._ctx) m = FuseMount(self._ctx, {}, d, "admin", self.client_remote, cephfs_name=self.name) - m.mount() + m.mount_wait() m.run_shell_payload(cmd) m.umount_wait(require_clean=True) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index 3ae208a69925b..e02088278449c 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -615,10 +615,10 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.umount_wait() if isinstance(self.mount_a, FuseMount): - self.mount_a.mount(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1']) + self.mount_a.mount_wait(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1']) else: try: - self.mount_a.mount(mntopts=['recover_session=clean']) + self.mount_a.mount_wait(mntopts=['recover_session=clean']) except CommandFailedError: self.mount_a.kill_cleanup() self.skipTest("Not implemented in current kernel") diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 45e343dcd2ae9..304d27c2c8cff 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -650,14 +650,14 @@ class TestMultiFilesystems(CephFSTestCase): fs_a, fs_b = self._setup_two() # Mount a client on fs_a - self.mount_a.mount(cephfs_name=fs_a.name) + self.mount_a.mount_wait(cephfs_name=fs_a.name) self.mount_a.write_n_mb("pad.bin", 1) self.mount_a.write_n_mb("test.bin", 2) a_created_ino = self.mount_a.path_to_ino("test.bin") self.mount_a.create_files() # Mount a client on fs_b - self.mount_b.mount(cephfs_name=fs_b.name) + self.mount_b.mount_wait(cephfs_name=fs_b.name) self.mount_b.write_n_mb("test.bin", 1) b_created_ino = self.mount_b.path_to_ino("test.bin") self.mount_b.create_files() diff --git a/qa/tasks/cephfs/test_fragment.py b/qa/tasks/cephfs/test_fragment.py index 6e2823b4a2156..41977ca202836 100644 --- a/qa/tasks/cephfs/test_fragment.py +++ b/qa/tasks/cephfs/test_fragment.py @@ -297,7 +297,7 @@ class TestFragmentation(CephFSTestCase): self.mount_a.run_shell(["ln", "testdir1/{0}".format(i), "testdir2/"]) self.mount_a.umount_wait() - self.mount_a.mount() + self.mount_a.mount_wait() self.mount_a.wait_until_mounted() # flush journal and restart mds. after restart, testdir2 is not in mds' cache diff --git a/qa/tasks/cephfs/test_mirroring.py b/qa/tasks/cephfs/test_mirroring.py index 6ea3d5d302255..ef2672c84e791 100644 --- a/qa/tasks/cephfs/test_mirroring.py +++ b/qa/tasks/cephfs/test_mirroring.py @@ -480,7 +480,7 @@ class TestMirroring(CephFSTestCase): log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) # create a bunch of files in a directory to snap self.mount_a.run_shell(["mkdir", "d0"]) @@ -546,7 +546,7 @@ class TestMirroring(CephFSTestCase): log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) # create a bunch of files in a directory to snap self.mount_a.run_shell(["mkdir", "d0"]) @@ -582,7 +582,7 @@ class TestMirroring(CephFSTestCase): log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) # create a bunch of files in a directory to snap self.mount_a.run_shell(["mkdir", "d0"]) @@ -818,7 +818,7 @@ class TestMirroring(CephFSTestCase): log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) # create a bunch of files w/ symbolic links in a directory to snap self.mount_a.run_shell(["mkdir", "d0"]) @@ -955,7 +955,7 @@ class TestMirroring(CephFSTestCase): self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) repo = 'ceph-qa-suite' repo_dir = 'ceph_repo' @@ -1033,7 +1033,7 @@ class TestMirroring(CephFSTestCase): self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) typs = deque(['reg', 'dir', 'sym']) def cleanup_and_create_with_type(dirname, fnames): @@ -1104,7 +1104,7 @@ class TestMirroring(CephFSTestCase): self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name())) log.debug(f'mounting filesystem {self.secondary_fs_name}') self.mount_b.umount_wait() - self.mount_b.mount(cephfs_name=self.secondary_fs_name) + self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name) repo = 'ceph-qa-suite' repo_dir = 'ceph_repo' diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py index 8f015106cff62..573d988e1b294 100644 --- a/qa/tasks/cephfs/test_volumes.py +++ b/qa/tasks/cephfs/test_volumes.py @@ -1216,7 +1216,7 @@ class TestSubvolumes(TestVolumesHelper): self._configure_guest_auth(guest_mount, authid, key) # mount the subvolume, and write to it - guest_mount.mount(cephfs_mntpt=mount_path) + guest_mount.mount_wait(cephfs_mntpt=mount_path) guest_mount.write_n_mb("data.bin", 1) # authorize guest authID read access to subvolume @@ -1226,7 +1226,7 @@ class TestSubvolumes(TestVolumesHelper): # guest client sees the change in access level to read only after a # remount of the subvolume. guest_mount.umount_wait() - guest_mount.mount(cephfs_mntpt=mount_path) + guest_mount.mount_wait(cephfs_mntpt=mount_path) # read existing content of the subvolume self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) @@ -1272,7 +1272,7 @@ class TestSubvolumes(TestVolumesHelper): self._configure_guest_auth(guest_mount, authid, key) # mount the subvolume, and write to it - guest_mount.mount(cephfs_mntpt=mount_path) + guest_mount.mount_wait(cephfs_mntpt=mount_path) guest_mount.write_n_mb("data.bin", 1) # authorize guest authID read access to subvolume @@ -1282,7 +1282,7 @@ class TestSubvolumes(TestVolumesHelper): # guest client sees the change in access level to read only after a # remount of the subvolume. guest_mount.umount_wait() - guest_mount.mount(cephfs_mntpt=mount_path) + guest_mount.mount_wait(cephfs_mntpt=mount_path) # read existing content of the subvolume self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) @@ -1887,7 +1887,7 @@ class TestSubvolumes(TestVolumesHelper): self._configure_guest_auth(guest_mounts[i], auth_id, key) # mount the subvolume, and write to it - guest_mounts[i].mount(cephfs_mntpt=mount_path) + guest_mounts[i].mount_wait(cephfs_mntpt=mount_path) guest_mounts[i].write_n_mb("data.bin", 1) # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted -- 2.47.3