From b89f3ca9417c3e14cb9291c38db77d2ec0706977 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Tue, 14 Jan 2020 16:10:34 -0800 Subject: [PATCH] qa: prefer rank_asok This is a trivial refactor. Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/test_full.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py index 279c21ebe2d..93b583574ff 100644 --- a/qa/tasks/cephfs/test_full.py +++ b/qa/tasks/cephfs/test_full.py @@ -91,7 +91,7 @@ class FullnessTestCase(CephFSTestCase): self.assertEqual(mount_b_epoch, mount_b_initial_epoch) # Set a barrier on the MDS - self.fs.mds_asok(["osdmap", "barrier", new_epoch.__str__()], mds_id=self.active_mds_id) + self.fs.rank_asok(["osdmap", "barrier", new_epoch.__str__()]) # Do an operation on client B, witness that it ends up with # the latest OSD map from the barrier. This shouldn't generate any @@ -161,7 +161,7 @@ class FullnessTestCase(CephFSTestCase): # while in the full state. osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch'] self.wait_until_true( - lambda: self.fs.mds_asok(['status'], mds_id=self.active_mds_id)['osdmap_epoch'] >= osd_epoch, + lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch, timeout=10) if not self.data_only: @@ -190,7 +190,7 @@ class FullnessTestCase(CephFSTestCase): # be applying the free space policy osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch'] self.wait_until_true( - lambda: self.fs.mds_asok(['status'], mds_id=self.active_mds_id)['osdmap_epoch'] >= osd_epoch, + lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch, timeout=10) # Now I should be able to write again -- 2.39.5