]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm/schedule: handle multiple ports per daemon
authorSage Weil <sage@newdream.net>
Tue, 23 Mar 2021 20:37:20 +0000 (16:37 -0400)
committerSage Weil <sage@newdream.net>
Fri, 23 Apr 2021 12:24:13 +0000 (07:24 -0500)
Signed-off-by: Sage Weil <sage@newdream.net>
(cherry picked from commit 9256f1c374ab1e1e6d45f3a912048ab486357606)

src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/schedule.py
src/pybind/mgr/cephadm/serve.py
src/pybind/mgr/cephadm/tests/test_scheduling.py
src/python-common/ceph/deployment/service_spec.py

index 7c5008ec3a8a0e745cbe9940ef80a6eb0d8d7675..ecd7118f406afa1faab667ab03559f911dcbfa21 100644 (file)
@@ -2017,11 +2017,10 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
                 self.cephadm_services[service_type].config(spec, daemon_id)
                 did_config = True
 
-            port = spec.get_port_start()
             daemon_spec = self.cephadm_services[service_type].make_daemon_spec(
                 host, daemon_id, network, spec,
                 # NOTE: this does not consider port conflicts!
-                ports=[port] if port else None)
+                ports=spec.get_port_start())
             self.log.debug('Placing %s.%s on host %s' % (
                 daemon_type, daemon_id, host))
             args.append(daemon_spec)
index c7774167068fa0d715e1119c613ed65c3aa2d495..ae14dc7f3e153536c3b0b1d9c46addb6a3f28f5e 100644 (file)
@@ -16,7 +16,7 @@ class DaemonPlacement(NamedTuple):
     network: str = ''   # for mons only
     name: str = ''
     ip: Optional[str] = None
-    port: Optional[int] = None
+    ports: List[int] = []
 
     def __str__(self) -> str:
         res = self.hostname
@@ -25,19 +25,19 @@ class DaemonPlacement(NamedTuple):
             other.append(f'network={self.network}')
         if self.name:
             other.append(f'name={self.name}')
-        if self.port:
-            other.append(f'{self.ip or "*"}:{self.port}')
+        if self.ports:
+            other.append(f'{self.ip or "*"}:{self.ports[0] if len(self.ports) == 1 else ",".join(map(str, self.ports))}')
         if other:
             res += '(' + ' '.join(other) + ')'
         return res
 
-    def renumber_port(self, n: int) -> 'DaemonPlacement':
+    def renumber_ports(self, n: int) -> 'DaemonPlacement':
         return DaemonPlacement(
             self.hostname,
             self.network,
             self.name,
             self.ip,
-            (self.port + n) if self.port is not None else None
+            [p + n for p in self.ports],
         )
 
     def matches_daemon(self, dd: DaemonDescription) -> bool:
@@ -46,8 +46,8 @@ class DaemonPlacement(NamedTuple):
         # fixme: how to match against network?
         if self.name and self.name != dd.daemon_id:
             return False
-        if self.port:
-            if [self.port] != dd.ports:
+        if self.ports:
+            if self.ports != dd.ports:
                 return False
             if self.ip != dd.ip:
                 return False
@@ -72,7 +72,7 @@ class HostAssignment(object):
         self.daemons = daemons
         self.networks = networks
         self.allow_colo = allow_colo
-        self.port_start = spec.get_port_start()
+        self.ports_start = spec.get_port_start()
 
     def hosts_by_label(self, label: str) -> List[orchestrator.HostSpec]:
         return [h for h in self.hosts if label in h.labels]
@@ -137,7 +137,7 @@ class HostAssignment(object):
         def expand_candidates(ls: List[DaemonPlacement], num: int) -> List[DaemonPlacement]:
             r = []
             for offset in range(num):
-                r.extend([dp.renumber_port(offset) for dp in ls])
+                r.extend([dp.renumber_ports(offset) for dp in ls])
             return r
 
         # consider enough slots to fulfill target count-per-host or count
@@ -213,17 +213,17 @@ class HostAssignment(object):
         if self.spec.placement.hosts:
             ls = [
                 DaemonPlacement(hostname=h.hostname, network=h.network, name=h.name,
-                                port=self.port_start)
+                                ports=self.ports_start)
                 for h in self.spec.placement.hosts
             ]
         elif self.spec.placement.label:
             ls = [
-                DaemonPlacement(hostname=x.hostname, port=self.port_start)
+                DaemonPlacement(hostname=x.hostname, ports=self.ports_start)
                 for x in self.hosts_by_label(self.spec.placement.label)
             ]
         elif self.spec.placement.host_pattern:
             ls = [
-                DaemonPlacement(hostname=x, port=self.port_start)
+                DaemonPlacement(hostname=x, ports=self.ports_start)
                 for x in self.spec.placement.filter_matching_hostspecs(self.hosts)
             ]
         elif (
@@ -231,7 +231,7 @@ class HostAssignment(object):
                 or self.spec.placement.count_per_host is not None
         ):
             ls = [
-                DaemonPlacement(hostname=x.hostname, port=self.port_start)
+                DaemonPlacement(hostname=x.hostname, ports=self.ports_start)
                 for x in self.hosts
             ]
         else:
@@ -246,7 +246,7 @@ class HostAssignment(object):
                 ip = self.find_ip_on_host(p.hostname, self.spec.networks)
                 if ip:
                     ls.append(DaemonPlacement(hostname=p.hostname, network=p.network,
-                                              name=p.name, port=p.port, ip=ip))
+                                              name=p.name, ports=p.ports, ip=ip))
                 else:
                     logger.debug(
                         f'Skipping {p.hostname} with no IP in network(s) {self.spec.networks}'
@@ -254,10 +254,14 @@ class HostAssignment(object):
 
         if self.filter_new_host:
             old = ls.copy()
-            ls = [h for h in ls if self.filter_new_host(h.hostname)]
-            for h in list(set(old) - set(ls)):
-                logger.info(
-                    f"Filtered out host {h.hostname}: could not verify host allowed virtual ips")
+            ls = []
+            for h in old:
+                if self.filter_new_host(h.hostname):
+                    ls.append(h)
+                else:
+                    logger.info(
+                        f"Filtered out host {h.hostname}: could not verify host allowed virtual ips")
+            if len(old) > len(ls):
                 logger.debug('Filtered %s down to %s' % (old, ls))
 
         # shuffle for pseudo random selection
index 1344fdd4e2ba9dd58a39baae292dede8b54e5afa..aaa753f25c935398c02a7ede89efc9da2eee7048 100644 (file)
@@ -593,9 +593,11 @@ class CephadmServe:
         for slot in slots_to_add:
             for daemon_type in service_to_daemon_types(service_type):
                 # first remove daemon on conflicting port?
-                if slot.port:
+                if slot.ports:
                     for d in daemons_to_remove:
-                        if d.hostname != slot.hostname or d.ports != [slot.port]:
+                        if d.hostname != slot.hostname:
+                            continue
+                        if not (set(d.ports or []) & set(slot.ports)):
                             continue
                         if d.ip and slot.ip and d.ip != slot.ip:
                             continue
@@ -622,7 +624,7 @@ class CephadmServe:
 
                 daemon_spec = svc.make_daemon_spec(
                     slot.hostname, daemon_id, slot.network, spec, daemon_type=daemon_type,
-                    ports=[slot.port] if slot.port else None,
+                    ports=slot.ports,
                     ip=slot.ip,
                 )
                 self.log.debug('Placing %s.%s on host %s' % (
index 2a2c6a1d79ff71c6166af57db5ab4855944b3e6d..49cc2ff7102fd8f1afc4ce6d44f607bffb6f02b7 100644 (file)
@@ -187,6 +187,28 @@ test_explicit_scheduler_results = [
 ]
 
 
+@pytest.mark.parametrize("dp,n,result",
+    [   # noqa: E128
+        (
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]),
+            0,
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]),
+        ),
+        (
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]),
+            2,
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82]),
+        ),
+        (
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80, 90]),
+            2,
+            DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82, 92]),
+        ),
+    ])
+def test_daemon_placement_renumber(dp, n, result):
+    assert dp.renumber_ports(n) == result
+
+
 @pytest.mark.parametrize(
     'dp,dd,result',
     [
index 08d12523dbcf8c34437d663b6fbe8a98d6d0593a..c6e607f171eed01c6e1007958a3433b17437a6d8 100644 (file)
@@ -572,10 +572,10 @@ class ServiceSpec(object):
             n += '.' + self.service_id
         return n
 
-    def get_port_start(self) -> Optional[int]:
+    def get_port_start(self) -> List[int]:
         # If defined, we will allocate and number ports starting at this
         # point.
-        return None
+        return []
 
     def to_json(self):
         # type: () -> OrderedDict[str, Any]
@@ -745,8 +745,8 @@ class RGWSpec(ServiceSpec):
         self.rgw_frontend_type = rgw_frontend_type
         self.ssl = ssl
 
-    def get_port_start(self) -> Optional[int]:
-        return self.get_port()
+    def get_port_start(self) -> List[int]:
+        return [self.get_port()]
 
     def get_port(self) -> int:
         if self.rgw_frontend_port: