If we are trying to deploy new or newly-found osds, we can skip the ones
that already have cephadm daemons deployed.
Fixes: https://tracker.ceph.com/issues/53491
Signed-off-by: Sage Weil <sage@newdream.net>
(cherry picked from commit
dc3d45bbe8c3bfedee57da619616c0be489cd233)
Conflicts:
src/pybind/mgr/cephadm/services/osd.py
raise orchestrator.OrchestratorError(f'Unable to find {daemon_name} daemon(s)')
- def has_daemon(self, daemon_name: str) -> bool:
+ def has_daemon(self, daemon_name: str, host: Optional[str] = None) -> bool:
try:
- self.get_daemon(daemon_name)
+ self.get_daemon(daemon_name, host)
except orchestrator.OrchestratorError:
return False
return True
if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids:
# if it exists but is part of the replacement operation, don't skip
continue
+ if self.mgr.cache.has_daemon(f'osd.{osd_id}', host):
+ # cephadm daemon instance already exists
+ logger.debug(f'osd id {osd_id} daemon already exists')
+ continue
if osd_id not in osd_uuid_map:
logger.debug('osd id {} does not exist in cluster'.format(osd_id))
continue