from tasks.cephfs.fuse_mount import FuseMount
+from teuthology import contextutil
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError
from teuthology.contextutil import safe_while
else:
log.info("No core_pattern directory set, nothing to clear (internal.coredump not enabled?)")
- def _wait_subtrees(self, status, rank, test):
- timeout = 30
- pause = 2
+ def _get_subtrees(self, status=None, rank=None):
+ try:
+ with contextutil.safe_while(sleep=1, tries=3) as proceed:
+ while proceed():
+ try:
+ subtrees = self.fs.rank_asok(["get", "subtrees"], status=status, rank=rank)
+ subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
+ return list(subtrees)
+ except CommandFailedError as e:
+ # Sometimes we get transient errors
+ if e.exitstatus == 22:
+ pass
+ else:
+ raise
+ except contextutil.MaxWhileTries as e:
+ raise RuntimeError(f"could not get subtree state from rank {rank}") from e
+
+ def _wait_subtrees(self, test, status=None, rank=None, timeout=30, sleep=2, action=None):
test = sorted(test)
- for i in range(timeout // pause):
- subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
- subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
- filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
- log.info("%s =?= %s", filtered, test)
- if filtered == test:
- # Confirm export_pin in output is correct:
- for s in subtrees:
- self.assertTrue(s['export_pin'] == s['auth_first'])
- return subtrees
- time.sleep(pause)
- raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank))
+ try:
+ with contextutil.safe_while(sleep=sleep, tries=timeout//sleep) as proceed:
+ while proceed():
+ subtrees = self._get_subtrees(status=status, rank=rank)
+ filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
+ log.info("%s =?= %s", filtered, test)
+ if filtered == test:
+ # Confirm export_pin in output is correct:
+ for s in subtrees:
+ if s['export_pin'] >= 0:
+ self.assertTrue(s['export_pin'] == s['auth_first'])
+ return subtrees
+ if action is not None:
+ action()
+ except contextutil.MaxWhileTries as e:
+ raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
def _wait_until_scrub_complete(self, path="/", recursive=True):
out_json = self.fs.rank_tell(["scrub", "start", path] + ["recursive"] if recursive else [])
out_json = self.fs.rank_tell(["scrub", "status"])
if out_json['status'] == "no active scrubs running":
break;
-
import time
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.orchestra.run import CommandFailedError, Raw
log = logging.getLogger(__name__)
status = self.fs.status()
self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
- self._wait_subtrees(status, 0, [])
+ self._wait_subtrees([], status=status)
# NOP
self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
- self._wait_subtrees(status, 0, [])
+ self._wait_subtrees([], status=status)
# NOP (rank < -1)
self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
- self._wait_subtrees(status, 0, [])
+ self._wait_subtrees([], status=status)
# pin /1 to rank 1
self.mount_a.setfattr("1", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1)])
+ self._wait_subtrees([('/1', 1)], status=status, rank=1)
# Check export_targets is set properly
status = self.fs.status()
# redundant pin /1/2 to rank 1
self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
# change pin /1/2 to rank 0
self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
- self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
# change pin /1/2/3 to (presently) non-existent rank 2
self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
- self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
# change pin /1/2 back to rank 1
self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
# add another directory pinned to 1
self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 1), ('/1/4/5', 1)], status=status, rank=1)
# change pin /1 to 0
self.mount_a.setfattr("1", "ceph.dir.pin", "0")
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
+ self._wait_subtrees([('/1', 0), ('/1/2', 1), ('/1/4/5', 1)], status=status)
# change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1)], status=status)
if len(list(status.get_standbys())):
self.fs.set_max_mds(3)
self.fs.wait_for_state('up:active', rank=2)
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)], status=status)
# Check export_targets is set properly
status = self.fs.status()
self.mount_a.setfattr("a", "ceph.dir.pin", "1")
self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
if (len(self.fs.get_active_names()) > 2):
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)], status=status)
else:
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)], status=status)
self.mount_a.run_shell(["mv", "aa", "a/b/"])
if (len(self.fs.get_active_names()) > 2):
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
else:
- self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
+ self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
def test_export_pin_getfattr(self):
self.fs.set_max_mds(2)
status = self.fs.wait_for_daemons()
self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
- self._wait_subtrees(status, 0, [])
+ self._wait_subtrees([], status=status)
# pin /1 to rank 0
self.mount_a.setfattr("1", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1)])
+ self._wait_subtrees([('/1', 1)], status=status, rank=1)
# pin /1/2 to rank 1
self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
# change pin /1/2 to rank 0
self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
- self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
- self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
# change pin /1/2/3 to (presently) non-existent rank 2
self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
- self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
if len(list(status.get_standbys())):
self.fs.set_max_mds(3)
self.fs.wait_for_state('up:active', rank=2)
- self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
+ self._wait_subtrees([('/1', 1), ('/1/2', 0), ('/1/2/3', 2)], status=status)
if not isinstance(self.mount_a, FuseMount):
p = self.mount_a.client_remote.sh('uname -r', wait=True)
# Create a directory that is pre-exported to rank 1
self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
self.mount_a.setfattr("a", "ceph.dir.pin", "1")
- self._wait_subtrees(status, 1, [('/a', 1)])
+ self._wait_subtrees([('/a', 1)], status=status, rank=1)
# Now set the mds config to allow the race
self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)