]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: convert mount calls to mount_wait
authorPatrick Donnelly <pdonnell@redhat.com>
Tue, 29 Jun 2021 21:51:43 +0000 (14:51 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Fri, 2 Jul 2021 23:17:34 +0000 (16:17 -0700)
These tests want to immediately use the mount anyway. But the main
problem is, without waiting for the mount to complete, the command:

    chmod 1777 /path/to/mount

is not run so the mount cannot be written to by normal users without
sudo.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
(cherry picked from commit 4fc4ccb7a112e16f564cbc01a3ced8d2b6be2d68)

qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/test_client_recovery.py
qa/tasks/cephfs/test_failover.py
qa/tasks/cephfs/test_fragment.py
qa/tasks/cephfs/test_mirroring.py
qa/tasks/cephfs/test_volumes.py

index 34ee42f13ec6d0cb12c1744292698113129d4a24..3f7e612e09e85bdc456bbf84e9a57326c701e601 100644 (file)
@@ -698,7 +698,7 @@ class Filesystem(MDSCluster):
         from tasks.cephfs.fuse_mount import FuseMount
         d = misc.get_testdir(self._ctx)
         m = FuseMount(self._ctx, {}, d, "admin", self.client_remote, cephfs_name=self.name)
-        m.mount()
+        m.mount_wait()
         m.run_shell_payload(cmd)
         m.umount_wait(require_clean=True)
 
index 3ae208a69925b6706c7cee264238af471dd6b321..e02088278449c42ee702c9f315fa02360214f63a 100644 (file)
@@ -615,10 +615,10 @@ class TestClientRecovery(CephFSTestCase):
         self.mount_a.umount_wait()
 
         if isinstance(self.mount_a, FuseMount):
-            self.mount_a.mount(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1'])
+            self.mount_a.mount_wait(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1'])
         else:
             try:
-                self.mount_a.mount(mntopts=['recover_session=clean'])
+                self.mount_a.mount_wait(mntopts=['recover_session=clean'])
             except CommandFailedError:
                 self.mount_a.kill_cleanup()
                 self.skipTest("Not implemented in current kernel")
index 45e343dcd2ae902fa41c2d7363fdf0b142bc2b3a..304d27c2c8cff731e7b2bf006d268c9477a6ae31 100644 (file)
@@ -650,14 +650,14 @@ class TestMultiFilesystems(CephFSTestCase):
         fs_a, fs_b = self._setup_two()
 
         # Mount a client on fs_a
-        self.mount_a.mount(cephfs_name=fs_a.name)
+        self.mount_a.mount_wait(cephfs_name=fs_a.name)
         self.mount_a.write_n_mb("pad.bin", 1)
         self.mount_a.write_n_mb("test.bin", 2)
         a_created_ino = self.mount_a.path_to_ino("test.bin")
         self.mount_a.create_files()
 
         # Mount a client on fs_b
-        self.mount_b.mount(cephfs_name=fs_b.name)
+        self.mount_b.mount_wait(cephfs_name=fs_b.name)
         self.mount_b.write_n_mb("test.bin", 1)
         b_created_ino = self.mount_b.path_to_ino("test.bin")
         self.mount_b.create_files()
index 6e2823b4a215635fa9b375dd551d9b9980743850..41977ca202836d8e51642c7aafea640f1db65033 100644 (file)
@@ -297,7 +297,7 @@ class TestFragmentation(CephFSTestCase):
             self.mount_a.run_shell(["ln", "testdir1/{0}".format(i), "testdir2/"])
 
         self.mount_a.umount_wait()
-        self.mount_a.mount()
+        self.mount_a.mount_wait()
         self.mount_a.wait_until_mounted()
 
         # flush journal and restart mds. after restart, testdir2 is not in mds' cache
index 6ea3d5d3022550caa05e8eae63206609493bf399..ef2672c84e791744e7f91f3a7c3e3cf1adb646a2 100644 (file)
@@ -480,7 +480,7 @@ class TestMirroring(CephFSTestCase):
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         # create a bunch of files in a directory to snap
         self.mount_a.run_shell(["mkdir", "d0"])
@@ -546,7 +546,7 @@ class TestMirroring(CephFSTestCase):
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         # create a bunch of files in a directory to snap
         self.mount_a.run_shell(["mkdir", "d0"])
@@ -582,7 +582,7 @@ class TestMirroring(CephFSTestCase):
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         # create a bunch of files in a directory to snap
         self.mount_a.run_shell(["mkdir", "d0"])
@@ -818,7 +818,7 @@ class TestMirroring(CephFSTestCase):
 
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         # create a bunch of files w/ symbolic links in a directory to snap
         self.mount_a.run_shell(["mkdir", "d0"])
@@ -955,7 +955,7 @@ class TestMirroring(CephFSTestCase):
                 self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         repo = 'ceph-qa-suite'
         repo_dir = 'ceph_repo'
@@ -1033,7 +1033,7 @@ class TestMirroring(CephFSTestCase):
                     self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         typs = deque(['reg', 'dir', 'sym'])
         def cleanup_and_create_with_type(dirname, fnames):
@@ -1104,7 +1104,7 @@ class TestMirroring(CephFSTestCase):
                 self.backup_fs.get_data_pool_name(), self.backup_fs.get_data_pool_name()))
         log.debug(f'mounting filesystem {self.secondary_fs_name}')
         self.mount_b.umount_wait()
-        self.mount_b.mount(cephfs_name=self.secondary_fs_name)
+        self.mount_b.mount_wait(cephfs_name=self.secondary_fs_name)
 
         repo = 'ceph-qa-suite'
         repo_dir = 'ceph_repo'
index a759d55ec3a09849b3ee26d0d11ec7d1c4f23085..43be4c19c5f2deecd597e51d77480a77b6a074ac 100644 (file)
@@ -1216,7 +1216,7 @@ class TestSubvolumes(TestVolumesHelper):
         self._configure_guest_auth(guest_mount, authid, key)
 
         # mount the subvolume, and write to it
-        guest_mount.mount(cephfs_mntpt=mount_path)
+        guest_mount.mount_wait(cephfs_mntpt=mount_path)
         guest_mount.write_n_mb("data.bin", 1)
 
         # authorize guest authID read access to subvolume
@@ -1226,7 +1226,7 @@ class TestSubvolumes(TestVolumesHelper):
         # guest client sees the change in access level to read only after a
         # remount of the subvolume.
         guest_mount.umount_wait()
-        guest_mount.mount(cephfs_mntpt=mount_path)
+        guest_mount.mount_wait(cephfs_mntpt=mount_path)
 
         # read existing content of the subvolume
         self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
@@ -1272,7 +1272,7 @@ class TestSubvolumes(TestVolumesHelper):
         self._configure_guest_auth(guest_mount, authid, key)
 
         # mount the subvolume, and write to it
-        guest_mount.mount(cephfs_mntpt=mount_path)
+        guest_mount.mount_wait(cephfs_mntpt=mount_path)
         guest_mount.write_n_mb("data.bin", 1)
 
         # authorize guest authID read access to subvolume
@@ -1282,7 +1282,7 @@ class TestSubvolumes(TestVolumesHelper):
         # guest client sees the change in access level to read only after a
         # remount of the subvolume.
         guest_mount.umount_wait()
-        guest_mount.mount(cephfs_mntpt=mount_path)
+        guest_mount.mount_wait(cephfs_mntpt=mount_path)
 
         # read existing content of the subvolume
         self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
@@ -1887,7 +1887,7 @@ class TestSubvolumes(TestVolumesHelper):
             self._configure_guest_auth(guest_mounts[i], auth_id, key)
 
             # mount the subvolume, and write to it
-            guest_mounts[i].mount(cephfs_mntpt=mount_path)
+            guest_mounts[i].mount_wait(cephfs_mntpt=mount_path)
             guest_mounts[i].write_n_mb("data.bin", 1)
 
         # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted