]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: Make path to cephadm binary unique
authorSebastian Wagner <sebastian.wagner@suse.com>
Fri, 26 Feb 2021 10:14:52 +0000 (11:14 +0100)
committerSebastian Wagner <sebastian.wagner@suse.com>
Thu, 4 Mar 2021 17:27:04 +0000 (18:27 +0100)
right now, an upgrade might overwrite the existing
binary which would force us to have the CLI stable.

Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/serve.py

index abb160173cdecf0947e31fcfdafd7da4ce5773b4..51559e4495eb72f79202aa57c7bdd107b0c2f628 100644 (file)
@@ -301,7 +301,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
 
     def __init__(self, *args: Any, **kwargs: Any):
         super(CephadmOrchestrator, self).__init__(*args, **kwargs)
-        self._cluster_fsid = self.get('mon_map')['fsid']
+        self._cluster_fsid: str = self.get('mon_map')['fsid']
         self.last_monmap: Optional[datetime.datetime] = None
 
         # for serve()
@@ -357,6 +357,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             raise RuntimeError("unable to read cephadm at '%s': %s" % (
                 path, str(e)))
 
+        self.cephadm_binary_path = self._get_cephadm_binary_path()
+
         self._worker_pool = multiprocessing.pool.ThreadPool(10)
 
         self._reconfig_ssh()
@@ -443,6 +445,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
         assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES
         return self.cephadm_services[service_type]
 
+    def _get_cephadm_binary_path(self) -> str:
+        import hashlib
+        m = hashlib.sha256()
+        m.update(self._cephadm.encode())
+        return f'/var/lib/ceph/{self._cluster_fsid}/cephadm.{m.hexdigest()}'
+
     def _kick_serve_loop(self) -> None:
         self.log.debug('_kick_serve_loop')
         self.event.set()
index 7a90a1d25b5b69eba6c5a31b92fa1a1aef4e1476..af3c91a29950874795adc335c788d8e525abb77c 100644 (file)
@@ -794,11 +794,7 @@ class CephadmServe:
                 if daemon_spec.daemon_type == 'cephadm-exporter':
                     if not reconfig:
                         assert daemon_spec.host
-                        deploy_ok = self._deploy_cephadm_binary(daemon_spec.host)
-                        if not deploy_ok:
-                            msg = f"Unable to deploy the cephadm binary to {daemon_spec.host}"
-                            self.log.warning(msg)
-                            return msg
+                        self._deploy_cephadm_binary(daemon_spec.host)
 
                 if daemon_spec.daemon_type == 'haproxy':
                     haspec = cast(HA_RGWSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
@@ -1068,16 +1064,19 @@ class CephadmServe:
             return f"Host {host} failed to login to {url} as {username} with given password"
         return None
 
-    def _deploy_cephadm_binary(self, host: str) -> bool:
+    def _deploy_cephadm_binary(self, host: str) -> None:
         # Use tee (from coreutils) to create a copy of cephadm on the target machine
         self.log.info(f"Deploying cephadm binary to {host}")
         with self._remote_connection(host) as tpl:
             conn, _connr = tpl
             _out, _err, code = remoto.process.check(
                 conn,
-                ['tee', '-', '/var/lib/ceph/{}/cephadm'.format(self.mgr._cluster_fsid)],
+                ['tee', '-', self.mgr.cephadm_binary_path],
                 stdin=self.mgr._cephadm.encode('utf-8'))
-        return code == 0
+        if code:
+            msg = f"Unable to deploy the cephadm binary to {host}: {_err}"
+            self.log.warning(msg)
+            raise OrchestratorError(msg)
 
     @contextmanager
     def _remote_connection(self,