From: Sage Weil Date: Tue, 20 Jun 2017 16:07:25 +0000 (-0400) Subject: qa/tasks/ceph_manager: get osds all in after thrashing X-Git-Tag: v12.1.0~22^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F15784%2Fhead;p=ceph.git qa/tasks/ceph_manager: get osds all in after thrashing Otherwise we might end up with some PGs remapped, which means they won't get scrubbed. Signed-off-by: Sage Weil --- diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index d976795ec453..465da73d2358 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -607,6 +607,17 @@ class Thrasher: self.log("inning osd") self.in_osd() + def all_up_in(self): + """ + Make sure all osds are up and fully in. + """ + self.all_up(); + for osd in self.live_osds: + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(1)) + self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', + str(osd), str(1)) + def do_join(self): """ Break out of this Ceph loop @@ -978,7 +989,7 @@ class Thrasher: for service, opt, saved_value in self.saved_options: self._set_config(service, '*', opt, saved_value) self.saved_options = [] - self.all_up() + self.all_up_in() class ObjectStoreTool: