]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa: remove usage of mds dump
authorPatrick Donnelly <pdonnell@redhat.com>
Tue, 24 Oct 2017 18:32:43 +0000 (11:32 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Tue, 24 Oct 2017 18:32:43 +0000 (11:32 -0700)
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/ceph_manager.py
qa/tasks/cephfs/cephfs_test_case.py
qa/tasks/cephfs/test_failover.py
qa/tasks/mds_creation_failure.py
qa/tasks/vstart_runner.py

index 3cc74711e5768d7f743046647c38f73f9470bb07..5374378b0cfc0db24f2bee8124db4565580454d9 100644 (file)
@@ -2510,18 +2510,6 @@ class CephManager:
             self.log('health:\n{h}'.format(h=out))
         return json.loads(out)
 
-    def get_mds_status(self, mds):
-        """
-        Run cluster commands for the mds in order to get mds information
-        """
-        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
-        j = json.loads(' '.join(out.splitlines()[1:]))
-        # collate; for dup ids, larger gid wins.
-        for info in j['info'].itervalues():
-            if info['name'] == mds:
-                return info
-        return None
-
     def get_filepath(self):
         """
         Return path to osd data with {id} needing to be replaced
index 801d0d3114c4adb6971cee81cabce98d0921eeda..e6261ecccf294dae82865b4977dadbfc8a93cc82 100644 (file)
@@ -287,7 +287,7 @@ class CephFSTestCase(CephTestCase):
 
                 # Determine the PID of the crashed MDS by inspecting the MDSMap, it had
                 # to talk to the mons to get assigned a rank to reach the point of crashing
-                addr = self.mds_cluster.mon_manager.get_mds_status(daemon_id)['addr']
+                addr = self.mds_cluster.status().get_mds(daemon_id)['addr']
                 pid_str = addr.split("/")[1]
                 log.info("Determined crasher PID was {0}".format(pid_str))
 
index 3dfb29889904fa7d2ff56c32b4244896d13507fe..3306e9441c712a8462230a4c21e7db11ff5ed93a 100644 (file)
@@ -69,7 +69,7 @@ class TestFailover(CephFSTestCase):
 
         # Check it's not laggy to begin with
         (original_active, ) = self.fs.get_active_names()
-        self.assertNotIn("laggy_since", self.fs.mon_manager.get_mds_status(original_active))
+        self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active))
 
         self.mounts[0].umount_wait()
 
index 16422929adb9a8f8070736013c64826198c8bfdf..aa2d6dbf2c062bd10cfee205402e0d6086d616ee 100644 (file)
@@ -54,30 +54,14 @@ def task(ctx, config):
     mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))])
 
     # It should have left the MDS map state still in CREATING
-    status = manager.get_mds_status(mds_id)
+    status = self.fs.status().get_mds(mds_id)
     assert status['state'] == 'up:creating'
 
     # Start the MDS again without the kill flag set, it should proceed with creation successfully
     mds.restart()
 
     # Wait for state ACTIVE
-    t = 0
-    create_timeout = 120
-    while True:
-        status = manager.get_mds_status(mds_id)
-        if status['state'] == 'up:active':
-            log.info("MDS creation completed successfully")
-            break
-        elif status['state'] == 'up:creating':
-            log.info("MDS still in creating state")
-            if t > create_timeout:
-                log.error("Creating did not complete within %ss" % create_timeout)
-                raise RuntimeError("Creating did not complete within %ss" % create_timeout)
-            t += 1
-            time.sleep(1)
-        else:
-            log.error("Unexpected MDS state: %s" % status['state'])
-            assert(status['state'] in ['up:active', 'up:creating'])
+    self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id)
 
     # The system should be back up in a happy healthy state, go ahead and run any further tasks
     # inside this context.
index 1526a9414d4d9b382f6a17c75c741c9d85f6c4e4..47eb93800e6f0c8fcd327208d3775827d127d7d0 100644 (file)
@@ -572,40 +572,6 @@ class LocalCephManager(CephManager):
             args=[os.path.join(BIN_PREFIX, "ceph"), "daemon", "{0}.{1}".format(daemon_type, daemon_id)] + command, check_status=check_status
         )
 
-    # FIXME: copypasta
-    def get_mds_status(self, mds):
-        """
-        Run cluster commands for the mds in order to get mds information
-        """
-        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
-        j = json.loads(' '.join(out.splitlines()[1:]))
-        # collate; for dup ids, larger gid wins.
-        for info in j['info'].itervalues():
-            if info['name'] == mds:
-                return info
-        return None
-
-    # FIXME: copypasta
-    def get_mds_status_by_rank(self, rank):
-        """
-        Run cluster commands for the mds in order to get mds information
-        check rank.
-        """
-        j = self.get_mds_status_all()
-        # collate; for dup ids, larger gid wins.
-        for info in j['info'].itervalues():
-            if info['rank'] == rank:
-                return info
-        return None
-
-    def get_mds_status_all(self):
-        """
-        Run cluster command to extract all the mds status.
-        """
-        out = self.raw_cluster_cmd('mds', 'dump', '--format=json')
-        j = json.loads(' '.join(out.splitlines()[1:]))
-        return j
-
 
 class LocalCephCluster(CephCluster):
     def __init__(self, ctx):