]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/ceph.restart osd fix
authorWarren Usui <wusui@redhat.com>
Wed, 25 May 2016 23:59:05 +0000 (16:59 -0700)
committerWarren Usui <wusui@redhat.com>
Wed, 25 May 2016 23:59:05 +0000 (16:59 -0700)
ceph.restart should mark restarted osds down in order to avoid a
race condition with ceph_manager.wait_for_clean

Fixes: http://tracker.ceph.com/issues/15778
Signed-off-by: Warren Usui <wusui@redhat.com>
tasks/ceph.py

index 114f25b33cb401455319b4a600e0fd91d1aa8707..43735bea620c4557d843a55afd1326a35d88fb57 100644 (file)
@@ -1214,6 +1214,13 @@ def restart(ctx, config):
     if config.get('wait-for-osds-up', False):
         for cluster in clusters:
             wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+    manager = ctx.managers['ceph']
+    for dmon in daemons:
+        if '.' in dmon:
+            dm_parts = dmon.split('.')
+            if dm_parts[1].isdigit():
+                if dm_parts[0] == 'osd':
+                    manager.mark_down_osd(int(dm_parts[1]))
     yield