From 3a5f090a1e56e50f0468463b597df9f969dd5ac4 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Tue, 24 Oct 2017 11:32:43 -0700 Subject: [PATCH] qa: remove usage of mds dump Signed-off-by: Patrick Donnelly --- qa/tasks/ceph_manager.py | 12 ---------- qa/tasks/cephfs/cephfs_test_case.py | 2 +- qa/tasks/cephfs/test_failover.py | 2 +- qa/tasks/mds_creation_failure.py | 20 ++--------------- qa/tasks/vstart_runner.py | 34 ----------------------------- 5 files changed, 4 insertions(+), 66 deletions(-) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 3cc74711e57..5374378b0cf 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -2510,18 +2510,6 @@ class CephManager: self.log('health:\n{h}'.format(h=out)) return json.loads(out) - def get_mds_status(self, mds): - """ - Run cluster commands for the mds in order to get mds information - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - # collate; for dup ids, larger gid wins. - for info in j['info'].itervalues(): - if info['name'] == mds: - return info - return None - def get_filepath(self): """ Return path to osd data with {id} needing to be replaced diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index 801d0d3114c..e6261ecccf2 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -287,7 +287,7 @@ class CephFSTestCase(CephTestCase): # Determine the PID of the crashed MDS by inspecting the MDSMap, it had # to talk to the mons to get assigned a rank to reach the point of crashing - addr = self.mds_cluster.mon_manager.get_mds_status(daemon_id)['addr'] + addr = self.mds_cluster.status().get_mds(daemon_id)['addr'] pid_str = addr.split("/")[1] log.info("Determined crasher PID was {0}".format(pid_str)) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 3dfb2988990..3306e9441c7 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -69,7 +69,7 @@ class TestFailover(CephFSTestCase): # Check it's not laggy to begin with (original_active, ) = self.fs.get_active_names() - self.assertNotIn("laggy_since", self.fs.mon_manager.get_mds_status(original_active)) + self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active)) self.mounts[0].umount_wait() diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py index 16422929adb..aa2d6dbf2c0 100644 --- a/qa/tasks/mds_creation_failure.py +++ b/qa/tasks/mds_creation_failure.py @@ -54,30 +54,14 @@ def task(ctx, config): mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))]) # It should have left the MDS map state still in CREATING - status = manager.get_mds_status(mds_id) + status = self.fs.status().get_mds(mds_id) assert status['state'] == 'up:creating' # Start the MDS again without the kill flag set, it should proceed with creation successfully mds.restart() # Wait for state ACTIVE - t = 0 - create_timeout = 120 - while True: - status = manager.get_mds_status(mds_id) - if status['state'] == 'up:active': - log.info("MDS creation completed successfully") - break - elif status['state'] == 'up:creating': - log.info("MDS still in creating state") - if t > create_timeout: - log.error("Creating did not complete within %ss" % create_timeout) - raise RuntimeError("Creating did not complete within %ss" % create_timeout) - t += 1 - time.sleep(1) - else: - log.error("Unexpected MDS state: %s" % status['state']) - assert(status['state'] in ['up:active', 'up:creating']) + self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id) # The system should be back up in a happy healthy state, go ahead and run any further tasks # inside this context. diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 1526a9414d4..47eb93800e6 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -572,40 +572,6 @@ class LocalCephManager(CephManager): args=[os.path.join(BIN_PREFIX, "ceph"), "daemon", "{0}.{1}".format(daemon_type, daemon_id)] + command, check_status=check_status ) - # FIXME: copypasta - def get_mds_status(self, mds): - """ - Run cluster commands for the mds in order to get mds information - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - # collate; for dup ids, larger gid wins. - for info in j['info'].itervalues(): - if info['name'] == mds: - return info - return None - - # FIXME: copypasta - def get_mds_status_by_rank(self, rank): - """ - Run cluster commands for the mds in order to get mds information - check rank. - """ - j = self.get_mds_status_all() - # collate; for dup ids, larger gid wins. - for info in j['info'].itervalues(): - if info['rank'] == rank: - return info - return None - - def get_mds_status_all(self): - """ - Run cluster command to extract all the mds status. - """ - out = self.raw_cluster_cmd('mds', 'dump', '--format=json') - j = json.loads(' '.join(out.splitlines()[1:])) - return j - class LocalCephCluster(CephCluster): def __init__(self, ctx): -- 2.39.5