]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
qa: fix broken ceph.restart marking of OSDs down
authorPatrick Donnelly <pdonnell@redhat.com>
Thu, 22 Aug 2019 04:13:37 +0000 (21:13 -0700)
committerPatrick Donnelly <pdonnell@redhat.com>
Thu, 22 Aug 2019 15:55:52 +0000 (08:55 -0700)
Sage noticed `osd down` was not being performed. Bug was that the role
format had changed so splitting no longer worked correctly.

Fixes: https://tracker.ceph.com/issues/40773
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
qa/tasks/ceph.py

index 65383282f89114f2c85a933a7f3501a968501ba4..0feee57cc3eccd527d0ac979f7c3ca4b1386b709 100644 (file)
@@ -1634,14 +1634,10 @@ def restart(ctx, config):
             ctx.daemons.get_daemon(type_, id_, cluster).restart()
             clusters.add(cluster)
     
-    for cluster in clusters:
-        manager = ctx.managers[cluster]
-        for dmon in daemons:
-            if '.' in dmon:
-                dm_parts = dmon.split('.')
-                if dm_parts[1].isdigit():
-                    if dm_parts[0] == 'osd':
-                        manager.mark_down_osd(int(dm_parts[1]))
+    for role in daemons:
+        cluster, type_, id_ = teuthology.split_role(role)
+        if type_ == 'osd':
+            ctx.managers[cluster].mark_down_osd(id_)
 
     if config.get('wait-for-healthy', True):
         for cluster in clusters: