From 6a00ba0e26a562a55b11e7565db44533726d9f01 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 20 Jun 2017 12:07:25 -0400 Subject: [PATCH] qa/tasks/ceph_manager: get osds all in after thrashing Otherwise we might end up with some PGs remapped, which means they won't get scrubbed. Signed-off-by: Sage Weil --- qa/tasks/ceph_manager.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index d976795ec45..465da73d235 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -607,6 +607,17 @@ class Thrasher: self.log("inning osd") self.in_osd() + def all_up_in(self): + """ + Make sure all osds are up and fully in. + """ + self.all_up(); + for osd in self.live_osds: + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(1)) + self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', + str(osd), str(1)) + def do_join(self): """ Break out of this Ceph loop @@ -978,7 +989,7 @@ class Thrasher: for service, opt, saved_value in self.saved_options: self._set_config(service, '*', opt, saved_value) self.saved_options = [] - self.all_up() + self.all_up_in() class ObjectStoreTool: -- 2.39.5