]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: Make path to cephadm binary unique
authorSebastian Wagner <sebastian.wagner@suse.com>
Fri, 26 Feb 2021 10:14:52 +0000 (11:14 +0100)
committerSage Weil <sage@newdream.net>
Tue, 16 Mar 2021 12:56:18 +0000 (07:56 -0500)
right now, an upgrade might overwrite the existing
binary which would force us to have the CLI stable.

Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
(cherry picked from commit 8f05520d03c9fb9d57a0d7ed674d4d9dba8d699c)

src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/serve.py

index 119fd12943d7a56ebaf2d9e544689a47bafd5d4b..fa3c858e12d8bff90c369c1590b50af5fc63bd20 100644 (file)
@@ -317,7 +317,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
 
     def __init__(self, *args: Any, **kwargs: Any):
         super(CephadmOrchestrator, self).__init__(*args, **kwargs)
-        self._cluster_fsid = self.get('mon_map')['fsid']
+        self._cluster_fsid: str = self.get('mon_map')['fsid']
         self.last_monmap: Optional[datetime.datetime] = None
 
         # for serve()
@@ -373,6 +373,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             raise RuntimeError("unable to read cephadm at '%s': %s" % (
                 path, str(e)))
 
+        self.cephadm_binary_path = self._get_cephadm_binary_path()
+
         self._worker_pool = multiprocessing.pool.ThreadPool(10)
 
         self._reconfig_ssh()
@@ -461,6 +463,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
         assert service_type in ServiceSpec.KNOWN_SERVICE_TYPES
         return self.cephadm_services[service_type]
 
+    def _get_cephadm_binary_path(self) -> str:
+        import hashlib
+        m = hashlib.sha256()
+        m.update(self._cephadm.encode())
+        return f'/var/lib/ceph/{self._cluster_fsid}/cephadm.{m.hexdigest()}'
+
     def _kick_serve_loop(self) -> None:
         self.log.debug('_kick_serve_loop')
         self.event.set()
index 4a9a4eac63f0ca9b51aa5cbc4edeb572c151cf3a..05f905a646c7a56014b1d3aa0b1072e6b74ef575 100644 (file)
@@ -810,11 +810,7 @@ class CephadmServe:
                 if daemon_spec.daemon_type == 'cephadm-exporter':
                     if not reconfig:
                         assert daemon_spec.host
-                        deploy_ok = self._deploy_cephadm_binary(daemon_spec.host)
-                        if not deploy_ok:
-                            msg = f"Unable to deploy the cephadm binary to {daemon_spec.host}"
-                            self.log.warning(msg)
-                            return msg
+                        self._deploy_cephadm_binary(daemon_spec.host)
 
                 if daemon_spec.daemon_type == 'haproxy':
                     haspec = cast(HA_RGWSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
@@ -1090,16 +1086,19 @@ class CephadmServe:
             return f"Host {host} failed to login to {url} as {username} with given password"
         return None
 
-    def _deploy_cephadm_binary(self, host: str) -> bool:
+    def _deploy_cephadm_binary(self, host: str) -> None:
         # Use tee (from coreutils) to create a copy of cephadm on the target machine
         self.log.info(f"Deploying cephadm binary to {host}")
         with self._remote_connection(host) as tpl:
             conn, _connr = tpl
             _out, _err, code = remoto.process.check(
                 conn,
-                ['tee', '-', '/var/lib/ceph/{}/cephadm'.format(self.mgr._cluster_fsid)],
+                ['tee', '-', self.mgr.cephadm_binary_path],
                 stdin=self.mgr._cephadm.encode('utf-8'))
-        return code == 0
+        if code:
+            msg = f"Unable to deploy the cephadm binary to {host}: {_err}"
+            self.log.warning(msg)
+            raise OrchestratorError(msg)
 
     @contextmanager
     def _remote_connection(self,