]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: add test for opening a file via a hard link that is not in the same mds as the... 51688/head
authorZhansong Gao <zhsgao@hotmail.com>
Fri, 3 Feb 2023 13:22:50 +0000 (21:22 +0800)
committerVenky Shankar <vshankar@redhat.com>
Tue, 23 May 2023 04:54:49 +0000 (10:24 +0530)
Signed-off-by: Zhansong Gao <zhsgao@hotmail.com>
(cherry picked from commit 9945f3b51fd10905987ecedba3a012cac6fdd57e)

qa/tasks/cephfs/filesystem.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_exports.py

index 83babc13bb62e70aa81aa02d20bce2e8ca61785b..57f1d5d229b51dd9828813314693bf0601bbeac8 100644 (file)
@@ -368,6 +368,9 @@ class MDSCluster(CephCluster):
         """
         self.mds_daemons[mds_id].signal(sig, silent);
 
+    def mds_is_running(self, mds_id):
+        return self.mds_daemons[mds_id].running()
+
     def newfs(self, name='cephfs', create=True):
         return Filesystem(self._ctx, name=name, create=create)
 
@@ -1047,6 +1050,10 @@ class Filesystem(MDSCluster):
     def rank_fail(self, rank=0):
         self.mon_manager.raw_cluster_cmd("mds", "fail", "{}:{}".format(self.id, rank))
 
+    def rank_is_running(self, rank=0, status=None):
+        name = self.get_rank(rank=rank, status=status)['name']
+        return self.mds_is_running(name)
+
     def get_ranks(self, status=None):
         if status is None:
             status = self.getinfo()
index d3e3e4587ace0495af7501c217ae832b35b46814..4b3a7b9f2e5cbff2b3878b0589755ad78d901aac 100644 (file)
@@ -782,6 +782,46 @@ class CephFSMount(object):
         self._verify(proc, retval, errmsg)
         return proc
 
+    def open_for_reading(self, basename):
+        """
+        Open a file for reading only.
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.hostfs_mntpt, basename)
+
+        return self._run_python(dedent(
+            """
+            import os
+            mode = os.O_RDONLY
+            fd = os.open("{path}", mode)
+            os.close(fd)
+            """.format(path=path)
+        ))
+
+    def open_for_writing(self, basename, creat=True, trunc=True, excl=False):
+        """
+        Open a file for writing only.
+        """
+        assert(self.is_mounted())
+
+        path = os.path.join(self.hostfs_mntpt, basename)
+
+        return self._run_python(dedent(
+            """
+            import os
+            mode = os.O_WRONLY
+            if {creat}:
+                mode |= os.O_CREAT
+            if {trunc}:
+                mode |= os.O_TRUNC
+            if {excl}:
+                mode |= os.O_EXCL
+            fd = os.open("{path}", mode)
+            os.close(fd)
+            """.format(path=path, creat=creat, trunc=trunc, excl=excl)
+        ))
+
     def open_no_data(self, basename):
         """
         A pure metadata operation
index 4961edb520d015b205fe422b58ed78b7d3b6fa77..4b7e884ec33fc3cc9c7a380edcce99b7d9b9776e 100644 (file)
@@ -155,6 +155,69 @@ class TestExportPin(CephFSTestCase):
         # drop cache multiple times to clear replica pins
         self._wait_subtrees([], status=self.status, action=_drop)
 
+    def test_open_file(self):
+        """
+        Test opening a file via a hard link that is not in the same mds as the inode.
+
+        See https://tracker.ceph.com/issues/58411
+        """
+
+        self.mount_a.run_shell_payload("mkdir -p target link")
+        self.mount_a.touch("target/test.txt")
+        self.mount_a.run_shell_payload("ln target/test.txt link/test.txt")
+        self.mount_a.setfattr("target", "ceph.dir.pin", "0")
+        self.mount_a.setfattr("link", "ceph.dir.pin", "1")
+        self._wait_subtrees([("/target", 0), ("/link", 1)], status=self.status)
+
+        # Release client cache, otherwise the bug may not be triggered even if buggy.
+        self.mount_a.remount()
+
+        # Open the file with access mode(O_CREAT|O_WRONLY|O_TRUNC),
+        # this should cause the rank 1 to crash if buggy.
+        # It's OK to use 'truncate -s 0 link/test.txt' here,
+        # its access mode is (O_CREAT|O_WRONLY), it can also trigger this bug.
+        log.info("test open mode (O_CREAT|O_WRONLY|O_TRUNC)")
+        proc = self.mount_a.open_for_writing("link/test.txt")
+        time.sleep(1)
+        success = proc.finished and self.fs.rank_is_running(rank=1)
+
+        # Test other write modes too.
+        if success:
+            self.mount_a.remount()
+            log.info("test open mode (O_WRONLY|O_TRUNC)")
+            proc = self.mount_a.open_for_writing("link/test.txt", creat=False)
+            time.sleep(1)
+            success = proc.finished and self.fs.rank_is_running(rank=1)
+        if success:
+            self.mount_a.remount()
+            log.info("test open mode (O_CREAT|O_WRONLY)")
+            proc = self.mount_a.open_for_writing("link/test.txt", trunc=False)
+            time.sleep(1)
+            success = proc.finished and self.fs.rank_is_running(rank=1)
+
+        # Test open modes too.
+        if success:
+            self.mount_a.remount()
+            log.info("test open mode (O_RDONLY)")
+            proc = self.mount_a.open_for_reading("link/test.txt")
+            time.sleep(1)
+            success = proc.finished and self.fs.rank_is_running(rank=1)
+
+        if success:
+            # All tests done, rank 1 didn't crash.
+            return
+
+        if not proc.finished:
+            log.warning("open operation is blocked, kill it")
+            proc.kill()
+
+        if not self.fs.rank_is_running(rank=1):
+            log.warning("rank 1 crashed")
+
+        self.mount_a.umount_wait(force=True)
+
+        self.assertTrue(success, "open operation failed")
+
 class TestEphemeralPins(CephFSTestCase):
     MDSS_REQUIRED = 3
     CLIENTS_REQUIRED = 1