From: Patrick Donnelly Date: Thu, 22 Aug 2019 04:13:37 +0000 (-0700) Subject: qa: fix broken ceph.restart marking of OSDs down X-Git-Tag: v15.1.0~1788^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=73c7d14eab03b60d425822fe04180f4002e8182e;p=ceph.git qa: fix broken ceph.restart marking of OSDs down Sage noticed `osd down` was not being performed. Bug was that the role format had changed so splitting no longer worked correctly. Fixes: https://tracker.ceph.com/issues/40773 Signed-off-by: Patrick Donnelly --- diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index 65383282f891..0feee57cc3ec 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -1634,14 +1634,10 @@ def restart(ctx, config): ctx.daemons.get_daemon(type_, id_, cluster).restart() clusters.add(cluster) - for cluster in clusters: - manager = ctx.managers[cluster] - for dmon in daemons: - if '.' in dmon: - dm_parts = dmon.split('.') - if dm_parts[1].isdigit(): - if dm_parts[0] == 'osd': - manager.mark_down_osd(int(dm_parts[1])) + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + if type_ == 'osd': + ctx.managers[cluster].mark_down_osd(id_) if config.get('wait-for-healthy', True): for cluster in clusters: