"""
self.mds_daemons[mds_id].signal(sig, silent);
+ def mds_is_running(self, mds_id):
+ return self.mds_daemons[mds_id].running()
+
def newfs(self, name='cephfs', create=True):
return Filesystem(self._ctx, name=name, create=create)
def rank_fail(self, rank=0):
self.mon_manager.raw_cluster_cmd("mds", "fail", "{}:{}".format(self.id, rank))
+ def rank_is_running(self, rank=0, status=None):
+ name = self.get_rank(rank=rank, status=status)['name']
+ return self.mds_is_running(name)
+
def get_ranks(self, status=None):
if status is None:
status = self.getinfo()
self._verify(proc, retval, errmsg)
return proc
+ def open_for_reading(self, basename):
+ """
+ Open a file for reading only.
+ """
+ assert(self.is_mounted())
+
+ path = os.path.join(self.hostfs_mntpt, basename)
+
+ return self._run_python(dedent(
+ """
+ import os
+ mode = os.O_RDONLY
+ fd = os.open("{path}", mode)
+ os.close(fd)
+ """.format(path=path)
+ ))
+
+ def open_for_writing(self, basename, creat=True, trunc=True, excl=False):
+ """
+ Open a file for writing only.
+ """
+ assert(self.is_mounted())
+
+ path = os.path.join(self.hostfs_mntpt, basename)
+
+ return self._run_python(dedent(
+ """
+ import os
+ mode = os.O_WRONLY
+ if {creat}:
+ mode |= os.O_CREAT
+ if {trunc}:
+ mode |= os.O_TRUNC
+ if {excl}:
+ mode |= os.O_EXCL
+ fd = os.open("{path}", mode)
+ os.close(fd)
+ """.format(path=path, creat=creat, trunc=trunc, excl=excl)
+ ))
+
def open_no_data(self, basename):
"""
A pure metadata operation
# drop cache multiple times to clear replica pins
self._wait_subtrees([], status=self.status, action=_drop)
+ def test_open_file(self):
+ """
+ Test opening a file via a hard link that is not in the same mds as the inode.
+
+ See https://tracker.ceph.com/issues/58411
+ """
+
+ self.mount_a.run_shell_payload("mkdir -p target link")
+ self.mount_a.touch("target/test.txt")
+ self.mount_a.run_shell_payload("ln target/test.txt link/test.txt")
+ self.mount_a.setfattr("target", "ceph.dir.pin", "0")
+ self.mount_a.setfattr("link", "ceph.dir.pin", "1")
+ self._wait_subtrees([("/target", 0), ("/link", 1)], status=self.status)
+
+ # Release client cache, otherwise the bug may not be triggered even if buggy.
+ self.mount_a.remount()
+
+ # Open the file with access mode(O_CREAT|O_WRONLY|O_TRUNC),
+ # this should cause the rank 1 to crash if buggy.
+ # It's OK to use 'truncate -s 0 link/test.txt' here,
+ # its access mode is (O_CREAT|O_WRONLY), it can also trigger this bug.
+ log.info("test open mode (O_CREAT|O_WRONLY|O_TRUNC)")
+ proc = self.mount_a.open_for_writing("link/test.txt")
+ time.sleep(1)
+ success = proc.finished and self.fs.rank_is_running(rank=1)
+
+ # Test other write modes too.
+ if success:
+ self.mount_a.remount()
+ log.info("test open mode (O_WRONLY|O_TRUNC)")
+ proc = self.mount_a.open_for_writing("link/test.txt", creat=False)
+ time.sleep(1)
+ success = proc.finished and self.fs.rank_is_running(rank=1)
+ if success:
+ self.mount_a.remount()
+ log.info("test open mode (O_CREAT|O_WRONLY)")
+ proc = self.mount_a.open_for_writing("link/test.txt", trunc=False)
+ time.sleep(1)
+ success = proc.finished and self.fs.rank_is_running(rank=1)
+
+ # Test open modes too.
+ if success:
+ self.mount_a.remount()
+ log.info("test open mode (O_RDONLY)")
+ proc = self.mount_a.open_for_reading("link/test.txt")
+ time.sleep(1)
+ success = proc.finished and self.fs.rank_is_running(rank=1)
+
+ if success:
+ # All tests done, rank 1 didn't crash.
+ return
+
+ if not proc.finished:
+ log.warning("open operation is blocked, kill it")
+ proc.kill()
+
+ if not self.fs.rank_is_running(rank=1):
+ log.warning("rank 1 crashed")
+
+ self.mount_a.umount_wait(force=True)
+
+ self.assertTrue(success, "open operation failed")
+
class TestEphemeralPins(CephFSTestCase):
MDSS_REQUIRED = 3
CLIENTS_REQUIRED = 1