]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/ceph_manager: fix post-osd-kill pg peered check 32737/head
authorSage Weil <sage@redhat.com>
Mon, 20 Jan 2020 15:45:00 +0000 (09:45 -0600)
committerSage Weil <sage@redhat.com>
Mon, 20 Jan 2020 15:47:36 +0000 (09:47 -0600)
This was asserting that all PGs are active or peered, but that assertion
could fail if the concurrent workload created a new pool.

Switch to a loop that checks several times for the condition to be true.

Fixes: https://tracker.ceph.com/issues/43656
Signed-off-by: Sage Weil <sage@redhat.com>
qa/tasks/ceph_manager.py

index f83b5d58a796b00e28a9d885447fe7278344207a..d7257c80b4f81623612507214f194d3afa8acc92 100644 (file)
@@ -842,9 +842,16 @@ class OSDThrasher(Thrasher):
                     self.log("chose to kill {n} OSDs".format(n=most_killable))
                     for i in range(1, most_killable):
                         self.kill_osd(mark_out=True)
-                    time.sleep(15)
-                    assert self.ceph_manager.all_active_or_peered(), \
-                            'not all PGs are active or peered 15 seconds after marking out OSDs'
+                    time.sleep(10)
+                    # try a few times since there might be a concurrent pool
+                    # creation or deletion
+                    with safe_while(
+                            sleep=5, tries=5,
+                            action='check for active or peered') as proceed:
+                        while proceed():
+                            if self.ceph_manager.all_active_or_peered():
+                                break
+                            self.log('not all PGs are active or peered')
                 else: # chose to revive OSDs, bring up a random fraction of the dead ones
                     self.log("chose to revive osds")
                     for i in range(1, int(rand_val * len(self.dead_osds))):