]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tasks/ceph.restart osd fix
authorWarren Usui <wusui@redhat.com>
Wed, 25 May 2016 23:59:05 +0000 (16:59 -0700)
committerNathan Cutler <ncutler@suse.com>
Wed, 21 Feb 2018 13:02:29 +0000 (14:02 +0100)
ceph.restart should mark restarted osds down in order to avoid a
race condition with ceph_manager.wait_for_clean

Fixes: http://tracker.ceph.com/issues/15778
Signed-off-by: Warren Usui <wusui@redhat.com>
(manual cherry pick of 1b7552c9cb331978cb0bfd4d7dc4dcde4186c176)

Conflicts:
    qa/tasks/ceph.py (original commit was in ceph/ceph-qa-suite.git)

qa/tasks/ceph.py

index ec86ac81319517ab000ada4f93f07b1b86b4e7f7..efed134bc7db649ec050408b0bdc943b1ff0e9b7 100644 (file)
@@ -1248,6 +1248,13 @@ def restart(ctx, config):
     if config.get('wait-for-osds-up', False):
         for cluster in clusters:
             wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+    manager = ctx.managers['ceph']
+    for dmon in daemons:
+        if '.' in dmon:
+            dm_parts = dmon.split('.')
+            if dm_parts[1].isdigit():
+                if dm_parts[0] == 'osd':
+                    manager.mark_down_osd(int(dm_parts[1]))
     yield