]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks: fix non-existent sleep function
authorJosh Durgin <jdurgin@redhat.com>
Thu, 2 Jun 2016 22:24:56 +0000 (15:24 -0700)
committerJosh Durgin <jdurgin@redhat.com>
Fri, 3 Jun 2016 18:15:36 +0000 (11:15 -0700)
CephManager has no sleep function. Use time.sleep() instead.

Ran into this while testing a branch. Apparently it doesn't happen
much since this hasn't changed in years, but the error was copied
into several tasks.

Signed-off-by: Josh Durgin <jdurgin@redhat.com>
tasks/object_source_down.py
tasks/osd_backfill.py
tasks/osd_recovery.py
tasks/peer.py
tasks/recovery_bench.py
tasks/rep_lost_unfound_delete.py
tasks/scrub.py

index 17b94490668849db9bf53a2f3815f6a32b0c7a38..bea3d18c8d17e05b9fc9abff41af516177e2f484 100644 (file)
@@ -3,6 +3,7 @@ Test Object locations going down
 """
 import logging
 import ceph_manager
+import time
 from teuthology import misc as teuthology
 from util.rados import rados
 
@@ -26,7 +27,7 @@ def task(ctx, config):
         )
 
     while len(manager.get_osd_status()['up']) < 3:
-        manager.sleep(10)
+        time.sleep(10)
     manager.wait_for_clean()
 
     # something that is always there
index f3b59e398cb0ac64bcc4bb576e980628814017f1..f0bba7963eed98763ca8e1891a3083661ee8ee74 100644 (file)
@@ -51,7 +51,7 @@ def task(ctx, config):
         )
 
     while len(manager.get_osd_status()['up']) < 3:
-        manager.sleep(10)
+        time.sleep(10)
     manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
index a22fdb9423e9c89385f14ecde2fd62e11a3d4cd3..6252a95a6997c2a54e919d176d1bd58b717f1bfb 100644 (file)
@@ -51,7 +51,7 @@ def task(ctx, config):
         )
 
     while len(manager.get_osd_status()['up']) < 3:
-        manager.sleep(10)
+        time.sleep(10)
     manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
index f1789cf12d6eaeb872474dba6de5a124172532b3..a189ae06a7792a3e67cd9aa75919cf870acaf6d6 100644 (file)
@@ -3,6 +3,7 @@ Peer test (Single test, not much configurable here)
 """
 import logging
 import json
+import time
 
 import ceph_manager
 from teuthology import misc as teuthology
@@ -28,7 +29,7 @@ def task(ctx, config):
         )
 
     while len(manager.get_osd_status()['up']) < 3:
-        manager.sleep(10)
+        time.sleep(10)
     manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
index 1984b97d31effc699846f2e2f44fa2bc7d6a306e..5eb9fd21d46caf4dc9fdcae0281d8d3c3b12b84f 100644 (file)
@@ -58,7 +58,7 @@ def task(ctx, config):
 
     num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
     while len(manager.get_osd_status()['up']) < num_osds:
-        manager.sleep(10)
+        time.sleep(10)
 
     bench_proc = RecoveryBencher(
         manager,
index be3bc74ed6012f17f24e54f98e1202938e1fd14a..b0ba3dc0ed02d1436f6c8216c5f1f43e20103bcb 100644 (file)
@@ -31,7 +31,7 @@ def task(ctx, config):
         )
 
     while len(manager.get_osd_status()['up']) < 3:
-        manager.sleep(10)
+        time.sleep(10)
     manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
     manager.raw_cluster_cmd('tell', 'osd.2', 'flush_pg_stats')
index 7a25300a677dc8e38f53e7d577a26a2aacb4e80b..9800d1e98a55d1dd032ef33208b6bec4e5fab3cb 100644 (file)
@@ -49,7 +49,7 @@ def task(ctx, config):
 
     num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd')
     while len(manager.get_osd_status()['up']) < num_osds:
-        manager.sleep(10)
+        time.sleep(10)
 
     scrub_proc = Scrubber(
         manager,