MDSS_REQUIRED = 2
CLIENTS_REQUIRED = 2
- def test_export_pin(self):
+ def test_session_race(self):
+ """
+ Test session creation race.
+
+ See: https://tracker.ceph.com/issues/24072#change-113056
+ """
+
self.fs.set_max_mds(2)
- self.fs.wait_for_daemons()
+ status = self.fs.wait_for_daemons()
- status = self.fs.status()
+ rank1 = self.fs.get_rank(rank=1, status=status)
- self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
- self._wait_subtrees([], status=status)
+ # Create a directory that is pre-exported to rank 1
+ self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
+ self.mount_a.setfattr("a", "ceph.dir.pin", "1")
+ self._wait_subtrees([('/a', 1)], status=status, rank=1)
- # NOP
- self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
- self._wait_subtrees([], status=status)
+ # Now set the mds config to allow the race
+ self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
- # NOP (rank < -1)
- self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
- self._wait_subtrees([], status=status)
+ # Now create another directory and try to export it
+ self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
+ self.mount_b.setfattr("b", "ceph.dir.pin", "1")
- # pin /1 to rank 1
- self.mount_a.setfattr("1", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1)], status=status, rank=1)
+ time.sleep(5)
- # Check export_targets is set properly
- status = self.fs.status()
- log.info(status)
- r0 = status.get_rank(self.fs.id, 0)
- self.assertTrue(sorted(r0['export_targets']) == [1])
+ # Now turn off the race so that it doesn't wait again
+ self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
- # redundant pin /1/2 to rank 1
- self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
+ # Now try to create a session with rank 1 by accessing a dir known to
+ # be there, if buggy, this should cause the rank 1 to crash:
+ self.mount_b.run_shell(["ls", "a"])
- # change pin /1/2 to rank 0
- self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
+ # Check if rank1 changed (standby tookover?)
+ new_rank1 = self.fs.get_rank(rank=1)
+ self.assertEqual(rank1['gid'], new_rank1['gid'])
- # change pin /1/2/3 to (presently) non-existent rank 2
- self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
+class TestExportPin(CephFSTestCase):
+ MDSS_REQUIRED = 3
+ CLIENTS_REQUIRED = 1
- # change pin /1/2 back to rank 1
- self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
+ def setUp(self):
+ CephFSTestCase.setUp(self)
+
+ self.fs.set_max_mds(3)
+ self.status = self.fs.wait_for_daemons()
- # add another directory pinned to 1
- self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
- self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1), ('/1/2', 1), ('/1/4/5', 1)], status=status, rank=1)
+ self.mount_a.run_shell_payload("mkdir -p 1/2/3/4")
- # change pin /1 to 0
- self.mount_a.setfattr("1", "ceph.dir.pin", "0")
- self._wait_subtrees([('/1', 0), ('/1/2', 1), ('/1/4/5', 1)], status=status)
+ def test_noop(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
+ time.sleep(30) # for something to not happen
+ self._wait_subtrees([], status=self.status)
- # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
- self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1)], status=status)
-
- if len(list(status.get_standbys())):
- self.fs.set_max_mds(3)
- self.fs.wait_for_state('up:active', rank=2)
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)], status=status)
-
- # Check export_targets is set properly
- status = self.fs.status()
- log.info(status)
- r0 = status.get_rank(self.fs.id, 0)
- self.assertTrue(sorted(r0['export_targets']) == [1,2])
- r1 = status.get_rank(self.fs.id, 1)
- self.assertTrue(sorted(r1['export_targets']) == [0])
- r2 = status.get_rank(self.fs.id, 2)
- self.assertTrue(sorted(r2['export_targets']) == [])
-
- # Test rename
- self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"])
- self.mount_a.setfattr("a", "ceph.dir.pin", "1")
- self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
- if (len(self.fs.get_active_names()) > 2):
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)], status=status)
- else:
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)], status=status)
- self.mount_a.run_shell(["mv", "aa", "a/b/"])
- if (len(self.fs.get_active_names()) > 2):
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
- else:
- self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
+ def test_negative(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
+ time.sleep(30) # for something to not happen
+ self._wait_subtrees([], status=self.status)
- def test_export_pin_getfattr(self):
- self.fs.set_max_mds(2)
- status = self.fs.wait_for_daemons()
+ def test_empty_pin(self):
+ self.mount_a.setfattr("1/2/3/4", "ceph.dir.pin", "1")
+ time.sleep(30) # for something to not happen
+ self._wait_subtrees([], status=self.status)
- self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
- self._wait_subtrees([], status=status)
+ def test_trivial(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self._wait_subtrees([('/1', 1)], status=self.status, rank=1)
- # pin /1 to rank 0
+ def test_export_targets(self):
self.mount_a.setfattr("1", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1)], status=status, rank=1)
+ self._wait_subtrees([('/1', 1)], status=self.status, rank=1)
+ self.status = self.fs.status()
+ r0 = self.status.get_rank(self.fs.id, 0)
+ self.assertTrue(sorted(r0['export_targets']) == [1])
- # pin /1/2 to rank 1
+ def test_redundant(self):
+ # redundant pin /1/2 to rank 1
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self._wait_subtrees([('/1', 1)], status=self.status, rank=1)
self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
+ self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=self.status, rank=1)
- # change pin /1/2 to rank 0
+ def test_reassignment(self):
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
+ self._wait_subtrees([('/1/2', 1)], status=self.status, rank=1)
self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
+ self._wait_subtrees([('/1/2', 0)], status=self.status, rank=0)
+
+ def test_phantom_rank(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "0")
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "10")
+ time.sleep(30) # wait for nothing weird to happen
+ self._wait_subtrees([('/1', 0)], status=self.status)
- # change pin /1/2/3 to (presently) non-existent rank 2
+ def test_nested(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
- self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
+ self._wait_subtrees([('/1', 1), ('/1/2', 0), ('/1/2/3', 2)], status=self.status, rank=2)
+
+ def test_nested_unset(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "2")
+ self._wait_subtrees([('/1', 1), ('/1/2', 2)], status=self.status, rank=1)
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
+ self._wait_subtrees([('/1', 1)], status=self.status, rank=1)
+
+ def test_rename(self):
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self.mount_a.run_shell_payload("mkdir -p 9/8/7")
+ self.mount_a.setfattr("9/8", "ceph.dir.pin", "0")
+ self._wait_subtrees([('/1', 1), ("/9/8", 0)], status=self.status, rank=0)
+ self.mount_a.run_shell_payload("mv 9/8 1/2")
+ self._wait_subtrees([('/1', 1), ("/1/2/8", 0)], status=self.status, rank=0)
- if len(list(status.get_standbys())):
- self.fs.set_max_mds(3)
- self.fs.wait_for_state('up:active', rank=2)
- self._wait_subtrees([('/1', 1), ('/1/2', 0), ('/1/2/3', 2)], status=status)
+ def test_getfattr(self):
+ # pin /1 to rank 0
+ self.mount_a.setfattr("1", "ceph.dir.pin", "1")
+ self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=self.status, rank=1)
if not isinstance(self.mount_a, FuseMount):
p = self.mount_a.client_remote.sh('uname -r', wait=True)
self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
self.assertEqual(self.mount_a.getfattr("1", "ceph.dir.pin"), '1')
self.assertEqual(self.mount_a.getfattr("1/2", "ceph.dir.pin"), '0')
- if (len(self.fs.get_active_names()) > 2):
- self.assertEqual(self.mount_a.getfattr("1/2/3", "ceph.dir.pin"), '2')
def test_export_pin_cache_drop(self):
"""
That the export pin does not prevent empty (nothing in cache) subtree merging.
"""
- self.fs.set_max_mds(2)
- status = self.fs.wait_for_daemons()
- self.mount_a.run_shell_payload(f"mkdir -p foo")
- self.mount_a.setfattr(f"foo", "ceph.dir.pin", "0")
- self.mount_a.run_shell_payload(f"mkdir -p foo/bar/baz && setfattr -n ceph.dir.pin -v 1 foo/bar")
- self._wait_subtrees([('/foo/bar', 1), ('/foo', 0)], status=status)
+ self.mount_a.setfattr(f"1", "ceph.dir.pin", "0")
+ self.mount_a.setfattr(f"1/2", "ceph.dir.pin", "1")
+ self._wait_subtrees([('/1', 0), ('/1/2', 1)], status=self.status)
self.mount_a.umount_wait() # release all caps
def _drop():
- self.fs.ranks_tell(["cache", "drop"], status=status)
+ self.fs.ranks_tell(["cache", "drop"], status=self.status)
# drop cache multiple times to clear replica pins
- self._wait_subtrees([], status=status, action=_drop)
-
- def test_session_race(self):
- """
- Test session creation race.
-
- See: https://tracker.ceph.com/issues/24072#change-113056
- """
-
- self.fs.set_max_mds(2)
- status = self.fs.wait_for_daemons()
-
- rank1 = self.fs.get_rank(rank=1, status=status)
-
- # Create a directory that is pre-exported to rank 1
- self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
- self.mount_a.setfattr("a", "ceph.dir.pin", "1")
- self._wait_subtrees([('/a', 1)], status=status, rank=1)
-
- # Now set the mds config to allow the race
- self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
-
- # Now create another directory and try to export it
- self.mount_b.run_shell(["mkdir", "-p", "b/bb"])
- self.mount_b.setfattr("b", "ceph.dir.pin", "1")
-
- time.sleep(5)
-
- # Now turn off the race so that it doesn't wait again
- self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1)
-
- # Now try to create a session with rank 1 by accessing a dir known to
- # be there, if buggy, this should cause the rank 1 to crash:
- self.mount_b.run_shell(["ls", "a"])
-
- # Check if rank1 changed (standby tookover?)
- new_rank1 = self.fs.get_rank(rank=1)
- self.assertEqual(rank1['gid'], new_rank1['gid'])
+ self._wait_subtrees([], status=self.status, action=_drop)
class TestEphemeralPins(CephFSTestCase):
MDSS_REQUIRED = 3