]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: refactor _wait_subtree and _get_subtree
authorPatrick Donnelly <pdonnell@redhat.com>
Fri, 5 Jun 2020 02:40:00 +0000 (19:40 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Wed, 24 Jun 2020 22:43:30 +0000 (15:43 -0700)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/mount.py
qa/tasks/cephfs/test_exports.py
qa/tasks/cephfs/test_sessionmap.py

index e69941dfd849d7370464ccd7e0b2a1ecf3591c5e..69e1e141bd956db80485fd7d644888b3bed56f56 100644 (file)
@@ -7,6 +7,7 @@ import re
 
 from tasks.cephfs.fuse_mount import FuseMount
 
+from teuthology import contextutil
 from teuthology.orchestra import run
 from teuthology.orchestra.run import CommandFailedError
 from teuthology.contextutil import safe_while
@@ -287,22 +288,41 @@ class CephFSTestCase(CephTestCase):
         else:
             log.info("No core_pattern directory set, nothing to clear (internal.coredump not enabled?)")
 
-    def _wait_subtrees(self, status, rank, test):
-        timeout = 30
-        pause = 2
+    def _get_subtrees(self, status=None, rank=None):
+        try:
+            with contextutil.safe_while(sleep=1, tries=3) as proceed:
+                while proceed():
+                    try:
+                        subtrees = self.fs.rank_asok(["get", "subtrees"], status=status, rank=rank)
+                        subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
+                        return list(subtrees)
+                    except CommandFailedError as e:
+                        # Sometimes we get transient errors
+                        if e.exitstatus == 22:
+                            pass
+                        else:
+                            raise
+        except contextutil.MaxWhileTries as e:
+            raise RuntimeError(f"could not get subtree state from rank {rank}") from e
+
+    def _wait_subtrees(self, test, status=None, rank=None, timeout=30, sleep=2, action=None):
         test = sorted(test)
-        for i in range(timeout // pause):
-            subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name'])
-            subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees)
-            filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
-            log.info("%s =?= %s", filtered, test)
-            if filtered == test:
-                # Confirm export_pin in output is correct:
-                for s in subtrees:
-                    self.assertTrue(s['export_pin'] == s['auth_first'])
-                return subtrees
-            time.sleep(pause)
-        raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank))
+        try:
+            with contextutil.safe_while(sleep=sleep, tries=timeout//sleep) as proceed:
+                while proceed():
+                    subtrees = self._get_subtrees(status=status, rank=rank)
+                    filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees])
+                    log.info("%s =?= %s", filtered, test)
+                    if filtered == test:
+                        # Confirm export_pin in output is correct:
+                        for s in subtrees:
+                            if s['export_pin'] >= 0:
+                                self.assertTrue(s['export_pin'] == s['auth_first'])
+                        return subtrees
+                    if action is not None:
+                        action()
+        except contextutil.MaxWhileTries as e:
+            raise RuntimeError("rank {0} failed to reach desired subtree state".format(rank)) from e
 
     def _wait_until_scrub_complete(self, path="/", recursive=True):
         out_json = self.fs.rank_tell(["scrub", "start", path] + ["recursive"] if recursive else [])
@@ -311,4 +331,3 @@ class CephFSTestCase(CephTestCase):
                 out_json = self.fs.rank_tell(["scrub", "status"])
                 if out_json['status'] == "no active scrubs running":
                     break;
-
index 92e322f19d2f755fa6b35b7b0ec82bfb14567295..08a9f3a932ac1e1edc8aa172c11b329f54a4071a 100644 (file)
@@ -10,7 +10,7 @@ import os
 import re
 from IPy import IP
 from teuthology.orchestra import run
-from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
+from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw
 from tasks.cephfs.filesystem import Filesystem
 
 log = logging.getLogger(__name__)
@@ -513,6 +513,9 @@ class CephFSMount(object):
                                       stdout=StringIO(), stderr=StringIO(),
                                       cwd=cwd, check_status=check_status)
 
+    def run_shell_payload(self, payload, **kwargs):
+        return self.run_shell(["bash", "-c", Raw(f"'{payload}'")], **kwargs)
+
     def run_as_user(self, **kwargs):
         """
         Besides the arguments defined for run_shell() this method also
index c56f6aaf867520e049fa7c00d21c69be00d393e7..d53a33ef3a11397896398e0910f0b5016375acd4 100644 (file)
@@ -2,6 +2,7 @@ import logging
 import time
 from tasks.cephfs.fuse_mount import FuseMount
 from tasks.cephfs.cephfs_test_case import CephFSTestCase
+from teuthology.orchestra.run import CommandFailedError, Raw
 
 log = logging.getLogger(__name__)
 
@@ -16,19 +17,19 @@ class TestExports(CephFSTestCase):
         status = self.fs.status()
 
         self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
-        self._wait_subtrees(status, 0, [])
+        self._wait_subtrees([], status=status)
 
         # NOP
         self.mount_a.setfattr("1", "ceph.dir.pin", "-1")
-        self._wait_subtrees(status, 0, [])
+        self._wait_subtrees([], status=status)
 
         # NOP (rank < -1)
         self.mount_a.setfattr("1", "ceph.dir.pin", "-2341")
-        self._wait_subtrees(status, 0, [])
+        self._wait_subtrees([], status=status)
 
         # pin /1 to rank 1
         self.mount_a.setfattr("1", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1)])
+        self._wait_subtrees([('/1', 1)], status=status, rank=1)
 
         # Check export_targets is set properly
         status = self.fs.status()
@@ -38,39 +39,39 @@ class TestExports(CephFSTestCase):
 
         # redundant pin /1/2 to rank 1
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
 
         # change pin /1/2 to rank 0
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
-        self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
 
         # change pin /1/2/3 to (presently) non-existent rank 2
         self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
-        self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
 
         # change pin /1/2 back to rank 1
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
 
         # add another directory pinned to 1
         self.mount_a.run_shell(["mkdir", "-p", "1/4/5"])
         self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 1), ('/1/4/5', 1)], status=status, rank=1)
 
         # change pin /1 to 0
         self.mount_a.setfattr("1", "ceph.dir.pin", "0")
-        self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)])
+        self._wait_subtrees([('/1', 0), ('/1/2', 1), ('/1/4/5', 1)], status=status)
 
         # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin?
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1")
-        self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)])
+        self._wait_subtrees([('/1', 0), ('/1/4/5', 1)], status=status)
 
         if len(list(status.get_standbys())):
             self.fs.set_max_mds(3)
             self.fs.wait_for_state('up:active', rank=2)
-            self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)])
+            self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)], status=status)
 
             # Check export_targets is set properly
             status = self.fs.status()
@@ -87,43 +88,43 @@ class TestExports(CephFSTestCase):
         self.mount_a.setfattr("a", "ceph.dir.pin", "1")
         self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0")
         if (len(self.fs.get_active_names()) > 2):
-            self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)])
+            self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)], status=status)
         else:
-            self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)])
+            self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)], status=status)
         self.mount_a.run_shell(["mv", "aa", "a/b/"])
         if (len(self.fs.get_active_names()) > 2):
-            self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)])
+            self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
         else:
-            self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)])
+            self._wait_subtrees([('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)], status=status)
 
     def test_export_pin_getfattr(self):
         self.fs.set_max_mds(2)
         status = self.fs.wait_for_daemons()
 
         self.mount_a.run_shell(["mkdir", "-p", "1/2/3"])
-        self._wait_subtrees(status, 0, [])
+        self._wait_subtrees([], status=status)
 
         # pin /1 to rank 0
         self.mount_a.setfattr("1", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1)])
+        self._wait_subtrees([('/1', 1)], status=status, rank=1)
 
         # pin /1/2 to rank 1
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 1)], status=status, rank=1)
 
         # change pin /1/2 to rank 0
         self.mount_a.setfattr("1/2", "ceph.dir.pin", "0")
-        self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)])
-        self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status, rank=1)
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
 
          # change pin /1/2/3 to (presently) non-existent rank 2
         self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2")
-        self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)])
+        self._wait_subtrees([('/1', 1), ('/1/2', 0)], status=status)
 
         if len(list(status.get_standbys())):
             self.fs.set_max_mds(3)
             self.fs.wait_for_state('up:active', rank=2)
-            self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)])
+            self._wait_subtrees([('/1', 1), ('/1/2', 0), ('/1/2/3', 2)], status=status)
 
         if not isinstance(self.mount_a, FuseMount):
             p = self.mount_a.client_remote.sh('uname -r', wait=True)
@@ -151,7 +152,7 @@ class TestExports(CephFSTestCase):
         # Create a directory that is pre-exported to rank 1
         self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
         self.mount_a.setfattr("a", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 1, [('/a', 1)])
+        self._wait_subtrees([('/a', 1)], status=status, rank=1)
 
         # Now set the mds config to allow the race
         self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1)
index 4446a15c17d2c423e8b0c0d642d80b1f51ab6913..4628a4a1c9b6f7ec996ee1bb816a4749bda96958 100644 (file)
@@ -202,7 +202,7 @@ class TestSessionMap(CephFSTestCase):
         self.mount_a.run_shell(["mkdir", "d0", "d1"])
         self.mount_a.setfattr("d0", "ceph.dir.pin", "0")
         self.mount_a.setfattr("d1", "ceph.dir.pin", "1")
-        self._wait_subtrees(status, 0, [('/d0', 0), ('/d1', 1)])
+        self._wait_subtrees([('/d0', 0), ('/d1', 1)], status=status)
 
         self.mount_a.run_shell(["touch", "d0/f0"])
         self.mount_a.run_shell(["touch", "d1/f0"])