From: Sebastian Wagner Date: Fri, 26 Feb 2021 10:14:52 +0000 (+0100) Subject: cephadm: Make path to cephadm binary unique X-Git-Tag: v17.1.0~2665^2~4 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=8f05520d03c9fb9d57a0d7ed674d4d9dba8d699c;p=ceph.git cephadm: Make path to cephadm binary unique right now, an upgrade might overwrite the existing binary which would force us to have the CLI stable. Signed-off-by: Sebastian Wagner --- diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index abb160173cde..51559e4495eb 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -301,7 +301,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, def __init__(self, *args: Any, **kwargs: Any): super(CephadmOrchestrator, self).__init__(*args, **kwargs) - self._cluster_fsid = self.get('mon_map')['fsid'] + self._cluster_fsid: str = self.get('mon_map')['fsid'] self.last_monmap: Optional[datetime.datetime] = None # for serve() @@ -357,6 +357,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, raise RuntimeError("unable to read cephadm at '%s': %s" % ( path, str(e))) + self.cephadm_binary_path = self._get_cephadm_binary_path() + self._worker_pool = multiprocessing.pool.ThreadPool(10) self._reconfig_ssh() @@ -443,6 +445,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES return self.cephadm_services[service_type] + def _get_cephadm_binary_path(self) -> str: + import hashlib + m = hashlib.sha256() + m.update(self._cephadm.encode()) + return f'/var/lib/ceph/{self._cluster_fsid}/cephadm.{m.hexdigest()}' + def _kick_serve_loop(self) -> None: self.log.debug('_kick_serve_loop') self.event.set() diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index 7a90a1d25b5b..af3c91a29950 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -794,11 +794,7 @@ class CephadmServe: if daemon_spec.daemon_type == 'cephadm-exporter': if not reconfig: assert daemon_spec.host - deploy_ok = self._deploy_cephadm_binary(daemon_spec.host) - if not deploy_ok: - msg = f"Unable to deploy the cephadm binary to {daemon_spec.host}" - self.log.warning(msg) - return msg + self._deploy_cephadm_binary(daemon_spec.host) if daemon_spec.daemon_type == 'haproxy': haspec = cast(HA_RGWSpec, self.mgr.spec_store[daemon_spec.service_name].spec) @@ -1068,16 +1064,19 @@ class CephadmServe: return f"Host {host} failed to login to {url} as {username} with given password" return None - def _deploy_cephadm_binary(self, host: str) -> bool: + def _deploy_cephadm_binary(self, host: str) -> None: # Use tee (from coreutils) to create a copy of cephadm on the target machine self.log.info(f"Deploying cephadm binary to {host}") with self._remote_connection(host) as tpl: conn, _connr = tpl _out, _err, code = remoto.process.check( conn, - ['tee', '-', '/var/lib/ceph/{}/cephadm'.format(self.mgr._cluster_fsid)], + ['tee', '-', self.mgr.cephadm_binary_path], stdin=self.mgr._cephadm.encode('utf-8')) - return code == 0 + if code: + msg = f"Unable to deploy the cephadm binary to {host}: {_err}" + self.log.warning(msg) + raise OrchestratorError(msg) @contextmanager def _remote_connection(self,