From 0c85654fb690d5d64a1808a40e85fe49cfbaf153 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 23 Mar 2021 16:37:20 -0400 Subject: [PATCH] mgr/cephadm/schedule: handle multiple ports per daemon Signed-off-by: Sage Weil (cherry picked from commit 9256f1c374ab1e1e6d45f3a912048ab486357606) --- src/pybind/mgr/cephadm/module.py | 3 +- src/pybind/mgr/cephadm/schedule.py | 40 ++++++++++--------- src/pybind/mgr/cephadm/serve.py | 8 ++-- .../mgr/cephadm/tests/test_scheduling.py | 22 ++++++++++ .../ceph/deployment/service_spec.py | 8 ++-- 5 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 7c5008ec3a8a0..ecd7118f406af 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -2017,11 +2017,10 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.cephadm_services[service_type].config(spec, daemon_id) did_config = True - port = spec.get_port_start() daemon_spec = self.cephadm_services[service_type].make_daemon_spec( host, daemon_id, network, spec, # NOTE: this does not consider port conflicts! - ports=[port] if port else None) + ports=spec.get_port_start()) self.log.debug('Placing %s.%s on host %s' % ( daemon_type, daemon_id, host)) args.append(daemon_spec) diff --git a/src/pybind/mgr/cephadm/schedule.py b/src/pybind/mgr/cephadm/schedule.py index c7774167068fa..ae14dc7f3e153 100644 --- a/src/pybind/mgr/cephadm/schedule.py +++ b/src/pybind/mgr/cephadm/schedule.py @@ -16,7 +16,7 @@ class DaemonPlacement(NamedTuple): network: str = '' # for mons only name: str = '' ip: Optional[str] = None - port: Optional[int] = None + ports: List[int] = [] def __str__(self) -> str: res = self.hostname @@ -25,19 +25,19 @@ class DaemonPlacement(NamedTuple): other.append(f'network={self.network}') if self.name: other.append(f'name={self.name}') - if self.port: - other.append(f'{self.ip or "*"}:{self.port}') + if self.ports: + other.append(f'{self.ip or "*"}:{self.ports[0] if len(self.ports) == 1 else ",".join(map(str, self.ports))}') if other: res += '(' + ' '.join(other) + ')' return res - def renumber_port(self, n: int) -> 'DaemonPlacement': + def renumber_ports(self, n: int) -> 'DaemonPlacement': return DaemonPlacement( self.hostname, self.network, self.name, self.ip, - (self.port + n) if self.port is not None else None + [p + n for p in self.ports], ) def matches_daemon(self, dd: DaemonDescription) -> bool: @@ -46,8 +46,8 @@ class DaemonPlacement(NamedTuple): # fixme: how to match against network? if self.name and self.name != dd.daemon_id: return False - if self.port: - if [self.port] != dd.ports: + if self.ports: + if self.ports != dd.ports: return False if self.ip != dd.ip: return False @@ -72,7 +72,7 @@ class HostAssignment(object): self.daemons = daemons self.networks = networks self.allow_colo = allow_colo - self.port_start = spec.get_port_start() + self.ports_start = spec.get_port_start() def hosts_by_label(self, label: str) -> List[orchestrator.HostSpec]: return [h for h in self.hosts if label in h.labels] @@ -137,7 +137,7 @@ class HostAssignment(object): def expand_candidates(ls: List[DaemonPlacement], num: int) -> List[DaemonPlacement]: r = [] for offset in range(num): - r.extend([dp.renumber_port(offset) for dp in ls]) + r.extend([dp.renumber_ports(offset) for dp in ls]) return r # consider enough slots to fulfill target count-per-host or count @@ -213,17 +213,17 @@ class HostAssignment(object): if self.spec.placement.hosts: ls = [ DaemonPlacement(hostname=h.hostname, network=h.network, name=h.name, - port=self.port_start) + ports=self.ports_start) for h in self.spec.placement.hosts ] elif self.spec.placement.label: ls = [ - DaemonPlacement(hostname=x.hostname, port=self.port_start) + DaemonPlacement(hostname=x.hostname, ports=self.ports_start) for x in self.hosts_by_label(self.spec.placement.label) ] elif self.spec.placement.host_pattern: ls = [ - DaemonPlacement(hostname=x, port=self.port_start) + DaemonPlacement(hostname=x, ports=self.ports_start) for x in self.spec.placement.filter_matching_hostspecs(self.hosts) ] elif ( @@ -231,7 +231,7 @@ class HostAssignment(object): or self.spec.placement.count_per_host is not None ): ls = [ - DaemonPlacement(hostname=x.hostname, port=self.port_start) + DaemonPlacement(hostname=x.hostname, ports=self.ports_start) for x in self.hosts ] else: @@ -246,7 +246,7 @@ class HostAssignment(object): ip = self.find_ip_on_host(p.hostname, self.spec.networks) if ip: ls.append(DaemonPlacement(hostname=p.hostname, network=p.network, - name=p.name, port=p.port, ip=ip)) + name=p.name, ports=p.ports, ip=ip)) else: logger.debug( f'Skipping {p.hostname} with no IP in network(s) {self.spec.networks}' @@ -254,10 +254,14 @@ class HostAssignment(object): if self.filter_new_host: old = ls.copy() - ls = [h for h in ls if self.filter_new_host(h.hostname)] - for h in list(set(old) - set(ls)): - logger.info( - f"Filtered out host {h.hostname}: could not verify host allowed virtual ips") + ls = [] + for h in old: + if self.filter_new_host(h.hostname): + ls.append(h) + else: + logger.info( + f"Filtered out host {h.hostname}: could not verify host allowed virtual ips") + if len(old) > len(ls): logger.debug('Filtered %s down to %s' % (old, ls)) # shuffle for pseudo random selection diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index 1344fdd4e2ba9..aaa753f25c935 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -593,9 +593,11 @@ class CephadmServe: for slot in slots_to_add: for daemon_type in service_to_daemon_types(service_type): # first remove daemon on conflicting port? - if slot.port: + if slot.ports: for d in daemons_to_remove: - if d.hostname != slot.hostname or d.ports != [slot.port]: + if d.hostname != slot.hostname: + continue + if not (set(d.ports or []) & set(slot.ports)): continue if d.ip and slot.ip and d.ip != slot.ip: continue @@ -622,7 +624,7 @@ class CephadmServe: daemon_spec = svc.make_daemon_spec( slot.hostname, daemon_id, slot.network, spec, daemon_type=daemon_type, - ports=[slot.port] if slot.port else None, + ports=slot.ports, ip=slot.ip, ) self.log.debug('Placing %s.%s on host %s' % ( diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py index 2a2c6a1d79ff7..49cc2ff7102fd 100644 --- a/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -187,6 +187,28 @@ test_explicit_scheduler_results = [ ] +@pytest.mark.parametrize("dp,n,result", + [ # noqa: E128 + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + 0, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80]), + 2, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82]), + ), + ( + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[80, 90]), + 2, + DaemonPlacement(daemon_type='mgr', hostname='host1', ports=[82, 92]), + ), + ]) +def test_daemon_placement_renumber(dp, n, result): + assert dp.renumber_ports(n) == result + + @pytest.mark.parametrize( 'dp,dd,result', [ diff --git a/src/python-common/ceph/deployment/service_spec.py b/src/python-common/ceph/deployment/service_spec.py index 08d12523dbcf8..c6e607f171eed 100644 --- a/src/python-common/ceph/deployment/service_spec.py +++ b/src/python-common/ceph/deployment/service_spec.py @@ -572,10 +572,10 @@ class ServiceSpec(object): n += '.' + self.service_id return n - def get_port_start(self) -> Optional[int]: + def get_port_start(self) -> List[int]: # If defined, we will allocate and number ports starting at this # point. - return None + return [] def to_json(self): # type: () -> OrderedDict[str, Any] @@ -745,8 +745,8 @@ class RGWSpec(ServiceSpec): self.rgw_frontend_type = rgw_frontend_type self.ssl = ssl - def get_port_start(self) -> Optional[int]: - return self.get_port() + def get_port_start(self) -> List[int]: + return [self.get_port()] def get_port(self) -> int: if self.rgw_frontend_port: -- 2.39.5