]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: fix placement of new daemons (mds,rgw,rbd-m)
authorSebastian Wagner <sebastian.wagner@suse.com>
Tue, 11 Feb 2020 11:59:47 +0000 (12:59 +0100)
committerSebastian Wagner <sebastian.wagner@suse.com>
Tue, 11 Feb 2020 16:55:08 +0000 (17:55 +0100)
* Don't modify spec.count, as this is irritating to me.
* Place daemons on unused hosts
* Reduced code duplication

Fixes: https://tracker.ceph.com/issues/44019
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index 99f2010b98a6941fd7056f291e8e1b59c2262ff2..9c32b2fe4ff4bf863b5c065cf1f378735633865c 100644 (file)
@@ -1747,12 +1747,38 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
                     )
                 return self._remove_daemon(args)
             elif len(daemons) < spec.count:
-                # add some
-                spec.count -= len(daemons)
                 return add_func(spec)
             return []
         return self._get_services(daemon_type, service_name=spec.name).then(___update_service)
 
+    def _add_new_daemon(self, svc_type: str, daemons: List[orchestrator.ServiceDescription],
+                        spec: orchestrator.ServiceSpec,
+                        create_func: Callable):
+        args = []
+        num_added = 0
+        assert spec.count is not None
+        prefix = f'{svc_type}.{spec.name}'
+        our_daemons = [d for d in daemons if d.name().startswith(prefix)]
+        hosts_with_daemons = {d.nodename for d in daemons}
+        hosts_without_daemons = {p for p in spec.placement.hosts if p.hostname not in hosts_with_daemons}
+
+        for host, _, name in hosts_without_daemons:
+            if (len(our_daemons) + num_added) >= spec.count:
+                break
+            svc_id = self.get_unique_name(host, daemons, spec.name, name)
+            self.log.debug('placing %s.%s on host %s' % (svc_type, svc_id, host))
+            args.append((svc_id, host))
+            # add to daemon list so next name(s) will also be unique
+            sd = orchestrator.ServiceDescription(
+                nodename=host,
+                service_type=svc_type,
+                service_instance=svc_id,
+            )
+            daemons.append(sd)
+            num_added += 1
+        return create_func(args)
+
+
     @async_map_completion
     def _create_mon(self, host, network, name):
         """
@@ -1943,26 +1969,12 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             'name': 'mds_join_fs',
             'value': spec.name,
         })
-        return self._get_services('mds').then(lambda ds: self._add_mds(ds, spec))
 
-    def _add_mds(self, daemons, spec):
-        # type: (List[orchestrator.ServiceDescription], orchestrator.ServiceSpec) -> AsyncCompletion
-        args = []
-        num_added = 0
-        for host, _, name in spec.placement.hosts:
-            if num_added >= spec.count:
-                break
-            mds_id = self.get_unique_name(host, daemons, spec.name, name)
-            self.log.debug('placing mds.%s on host %s' % (mds_id, host))
-            args.append((mds_id, host))
-            # add to daemon list so next name(s) will also be unique
-            sd = orchestrator.ServiceDescription()
-            sd.service_instance = mds_id
-            sd.service_type = 'mds'
-            sd.nodename = host
-            daemons.append(sd)
-            num_added += 1
-        return self._create_mds(args)
+        def _add_mds(daemons):
+            # type: (List[orchestrator.ServiceDescription]) -> AsyncCompletion
+            return self._add_new_daemon('mds', daemons, spec, self._create_mds)
+
+        return self._get_services('mds').then(_add_mds)
 
     def update_mds(self, spec):
         # type: (orchestrator.ServiceSpec) -> AsyncCompletion
@@ -2015,22 +2027,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         })
 
         def _add_rgw(daemons):
-            args = []
-            num_added = 0
-            for host, _, name in spec.placement.hosts:
-                if num_added >= spec.count:
-                    break
-                rgw_id = self.get_unique_name(host, daemons, spec.name, name)
-                self.log.debug('placing rgw.%s on host %s' % (rgw_id, host))
-                args.append((rgw_id, host))
-                # add to daemon list so next name(s) will also be unique
-                sd = orchestrator.ServiceDescription()
-                sd.service_instance = rgw_id
-                sd.service_type = 'rgw'
-                sd.nodename = host
-                daemons.append(sd)
-                num_added += 1
-            return self._create_rgw(args)
+            return self._add_new_daemon('rgw', daemons, spec, self._create_rgw)
 
         return self._get_services('rgw').then(_add_rgw)
 
@@ -2069,24 +2066,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         self.log.debug('nodes %s' % spec.placement.hosts)
 
         def _add_rbd_mirror(daemons):
-            args = []
-            num_added = 0
-            for host, _, name in spec.placement.hosts:
-                if num_added >= spec.count:
-                    break
-                daemon_id = self.get_unique_name(host, daemons, None, name)
-                self.log.debug('placing rbd-mirror.%s on host %s' % (daemon_id,
-                                                                     host))
-                args.append((daemon_id, host))
-
-                # add to daemon list so next name(s) will also be unique
-                sd = orchestrator.ServiceDescription()
-                sd.service_instance = daemon_id
-                sd.service_type = 'rbd-mirror'
-                sd.nodename = host
-                daemons.append(sd)
-                num_added += 1
-            return self._create_rbd_mirror(args)
+            return self._add_new_daemon('rbd-mirror', daemons, spec, self._create_rbd_mirror)
 
         return self._get_services('rbd-mirror').then(_add_rbd_mirror)
 
index 4dac6de8b67eddd0e54494924bde1a9968607582..d7399667c88d4f13622f7b5e627d88ace3bfe950 100644 (file)
@@ -174,8 +174,28 @@ class TestCephadm(object):
             ps = PlacementSpec(hosts=['test'], count=1)
             c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps))
             [out] = wait(cephadm_module, c)
-            assert "Deployed rgw.realm.zone." in out
-            assert " on host 'test'" in out
+            match_glob(out, "Deployed rgw.realm.zone.* on host 'test'")
+
+
+    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    @mock.patch("cephadm.module.CephadmOrchestrator.send_command")
+    @mock.patch("cephadm.module.CephadmOrchestrator.mon_command", mon_command)
+    @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
+    def test_rgw_update(self, _send_command, _get_connection, cephadm_module):
+
+        with self._with_host(cephadm_module, 'host1'):
+            with self._with_host(cephadm_module, 'host2'):
+                ps = PlacementSpec(hosts=['host1'], count=1)
+                c = cephadm_module.add_rgw(RGWSpec('realm', 'zone', placement=ps))
+                [out] = wait(cephadm_module, c)
+                match_glob(out, "Deployed rgw.realm.zone.host1.* on host 'host1'")
+
+
+                ps = PlacementSpec(hosts=['host1', 'host2'], count=2)
+                c = cephadm_module.update_rgw(RGWSpec('realm', 'zone', placement=ps))
+                [out] = wait(cephadm_module, c)
+                match_glob(out, "Deployed rgw.realm.zone.host2.* on host 'host2'")
+
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
         json.dumps([
@@ -217,4 +237,3 @@ class TestCephadm(object):
         with self._with_host(cephadm_module, 'test'):
             c = cephadm_module.blink_device_light('ident', True, [('test', '', '')])
             assert wait(cephadm_module, c) == ['Set ident light for test: on']
-