]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
qa/tasks/ceph_manager.py: Increase timeout 47445/head
authorKamoltat <ksirivad@redhat.com>
Tue, 12 Jul 2022 19:36:26 +0000 (19:36 +0000)
committerKamoltat <ksirivad@redhat.com>
Wed, 3 Aug 2022 18:47:32 +0000 (18:47 +0000)
In test_pool_min_size():

1. Provided buffer time before we check
for recovery in ceph_manager.wait_for_recovery()

2. Increased timeout in ceph_manager.wait_for_clean()

3. Increased sleep time for
ceph_manager.all_active_or_peered()

Fixes:
https://tracker.ceph.com/issues/49777
https://tracker.ceph.com/issues/54511
https://tracker.ceph.com/issues/51904

Signed-off-by: Kamoltat <ksirivad@redhat.com>
(cherry picked from commit ed73288102f4aa67da3485c70b2409b9eca1873b)

qa/tasks/ceph_manager.py

index b837cddeb696a9cf86ecee14dbc79a559768255e..652d7d9ae3152b30dd3836f815aa1eef06618a8b 100644 (file)
@@ -899,15 +899,15 @@ class OSDThrasher(Thrasher):
         """
         self.log("test_pool_min_size")
         self.all_up()
+        time.sleep(60) # buffer time for recovery to start.
         self.ceph_manager.wait_for_recovery(
             timeout=self.config.get('timeout')
             )
-
         minout = int(self.config.get("min_out", 1))
         minlive = int(self.config.get("min_live", 2))
         mindead = int(self.config.get("min_dead", 1))
         self.log("doing min_size thrashing")
-        self.ceph_manager.wait_for_clean(timeout=60)
+        self.ceph_manager.wait_for_clean(timeout=180)
         assert self.ceph_manager.is_clean(), \
             'not clean before minsize thrashing starts'
         while not self.stopping:
@@ -981,7 +981,7 @@ class OSDThrasher(Thrasher):
                     # try a few times since there might be a concurrent pool
                     # creation or deletion
                     with safe_while(
-                            sleep=5, tries=5,
+                            sleep=25, tries=5,
                             action='check for active or peered') as proceed:
                         while proceed():
                             if self.ceph_manager.all_active_or_peered():