]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm/schedule: dynamically assign ports for rgw
authorSage Weil <sage@newdream.net>
Wed, 10 Mar 2021 19:25:23 +0000 (14:25 -0500)
committerSage Weil <sage@newdream.net>
Mon, 15 Mar 2021 22:55:16 +0000 (18:55 -0400)
Dynamically number ports for RGW instances, with the start port being
the one configured on the service (or the default of 80 or 443).

Signed-off-by: Sage Weil <sage@newdream.net>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/schedule.py
src/pybind/mgr/cephadm/services/cephadmservice.py
src/pybind/mgr/cephadm/tests/test_scheduling.py
src/python-common/ceph/deployment/service_spec.py

index 9702e7d52eb64d419167111b86f11b930aab0535..d2618b40d07440bf4d620ec19499d11335361d42 100644 (file)
@@ -2034,8 +2034,11 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
                 self.cephadm_services[service_type].config(spec, daemon_id)
                 did_config = True
 
+            port = spec.get_port_start()
             daemon_spec = self.cephadm_services[service_type].make_daemon_spec(
-                host, daemon_id, network, spec)
+                host, daemon_id, network, spec,
+                # NOTE: this does not consider port conflicts!
+                ports=[port] if port else None)
             self.log.debug('Placing %s.%s on host %s' % (
                 daemon_type, daemon_id, host))
             args.append(daemon_spec)
index b2dad5b0e7cee967fb09a8b0c828d61d92b439da..3a4c3cad29528a806851cd3e9bf16129bdceb470 100644 (file)
@@ -33,6 +33,15 @@ class DaemonPlacement(NamedTuple):
             res += '(' + ' '.join(other) + ')'
         return res
 
+    def renumber_port(self, n: int) -> 'DaemonPlacement':
+        return DaemonPlacement(
+            self.hostname,
+            self.network,
+            self.name,
+            self.ip,
+            (self.port + n) if self.port is not None else None
+        )
+
     def matches_daemon(self, dd: DaemonDescription) -> bool:
         if self.hostname != dd.hostname:
             return False
@@ -62,6 +71,7 @@ class HostAssignment(object):
         self.service_name = spec.service_name()
         self.daemons = daemons
         self.allow_colo = allow_colo
+        self.port_start = spec.get_port_start()
 
     def hosts_by_label(self, label: str) -> List[orchestrator.HostSpec]:
         return [h for h in self.hosts if label in h.labels]
@@ -123,16 +133,22 @@ class HostAssignment(object):
         # get candidate hosts based on [hosts, label, host_pattern]
         candidates = self.get_candidates()  # type: List[DaemonPlacement]
 
+        def expand_candidates(ls: List[DaemonPlacement], num: int) -> List[DaemonPlacement]:
+            r = []
+            for offset in range(num):
+                r.extend([dp.renumber_port(offset) for dp in ls])
+            return r
+
         # consider enough slots to fulfill target count-per-host or count
         if count is None:
             if self.spec.placement.count_per_host:
                 per_host = self.spec.placement.count_per_host
             else:
                 per_host = 1
-            candidates = candidates * per_host
+            candidates = expand_candidates(candidates, per_host)
         elif self.allow_colo and candidates:
             per_host = 1 + ((count - 1) // len(candidates))
-            candidates = candidates * per_host
+            candidates = expand_candidates(candidates, per_host)
 
         # consider active daemons first
         daemons = [
@@ -188,17 +204,18 @@ class HostAssignment(object):
     def get_candidates(self) -> List[DaemonPlacement]:
         if self.spec.placement.hosts:
             ls = [
-                DaemonPlacement(hostname=h.hostname, network=h.network, name=h.name)
+                DaemonPlacement(hostname=h.hostname, network=h.network, name=h.name,
+                                port=self.port_start)
                 for h in self.spec.placement.hosts
             ]
         elif self.spec.placement.label:
             ls = [
-                DaemonPlacement(hostname=x.hostname)
+                DaemonPlacement(hostname=x.hostname, port=self.port_start)
                 for x in self.hosts_by_label(self.spec.placement.label)
             ]
         elif self.spec.placement.host_pattern:
             ls = [
-                DaemonPlacement(hostname=x)
+                DaemonPlacement(hostname=x, port=self.port_start)
                 for x in self.spec.placement.filter_matching_hostspecs(self.hosts)
             ]
         elif (
@@ -206,7 +223,7 @@ class HostAssignment(object):
                 or self.spec.placement.count_per_host is not None
         ):
             ls = [
-                DaemonPlacement(hostname=x.hostname)
+                DaemonPlacement(hostname=x.hostname, port=self.port_start)
                 for x in self.hosts
             ]
         else:
index a4d8d3eeb210ba5a904d707ff314ea6c15620256..6513cbf7a4971e01d885b19ec4f1d2a3a6d80995 100644 (file)
@@ -681,6 +681,9 @@ class MdsService(CephService):
 class RgwService(CephService):
     TYPE = 'rgw'
 
+    def allow_colo(self) -> bool:
+        return True
+
     def config(self, spec: RGWSpec, rgw_id: str) -> None:  # type: ignore
         assert self.TYPE == spec.service_type
 
@@ -745,11 +748,11 @@ class RgwService(CephService):
         # configure frontend
         args = []
         if spec.ssl:
-            args.append(f"ssl_port={spec.get_port()}")
+            args.append(f"ssl_port={daemon_spec.ports[0]}")
             args.append(f"ssl_certificate=config://rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.crt")
             args.append(f"ssl_key=config://rgw/cert/{spec.rgw_realm}/{spec.rgw_zone}.key")
         else:
-            args.append(f"port={spec.get_port()}")
+            args.append(f"port={daemon_spec.ports[0]}")
         frontend = f'beast {" ".join(args)}'
 
         ret, out, err = self.mgr.check_mon_command({
index f98a593bdf7e91ae4765130a9f807b4f45e41638..5e10bc729facff538ee9c77583780464ef325a72 100644 (file)
@@ -361,7 +361,7 @@ class NodeAssignmentTest(NamedTuple):
         # just hosts
         NodeAssignmentTest(
             'mgr',
-            PlacementSpec(hosts=['smithi060:[v2:172.21.15.60:3301,v1:172.21.15.60:6790]=c']),
+            PlacementSpec(hosts=['smithi060']),
             ['smithi060'],
             [],
             ['smithi060'], ['smithi060'], []
@@ -528,6 +528,34 @@ class NodeAssignmentTest(NamedTuple):
             ['mdshost1', 'mdshost2', 'mdshost1', 'mdshost2', 'mdshost1', 'mdshost2'],
             []
         ),
+        # label + count_per_host + ports
+        NodeAssignmentTest(
+            'rgw',
+            PlacementSpec(count=6, label='foo'),
+            'host1 host2 host3'.split(),
+            [],
+            ['host1(port=80)', 'host2(port=80)', 'host3(port=80)',
+             'host1(port=81)', 'host2(port=81)', 'host3(port=81)'],
+            ['host1(port=80)', 'host2(port=80)', 'host3(port=80)',
+             'host1(port=81)', 'host2(port=81)', 'host3(port=81)'],
+            []
+        ),
+        # label + count_per_host + ports (+ xisting)
+        NodeAssignmentTest(
+            'rgw',
+            PlacementSpec(count=6, label='foo'),
+            'host1 host2 host3'.split(),
+            [
+                DaemonDescription('rgw', 'a', 'host1', ports=[81]),
+                DaemonDescription('rgw', 'b', 'host2', ports=[80]),
+                DaemonDescription('rgw', 'c', 'host1', ports=[82]),
+            ],
+            ['host1(port=80)', 'host2(port=80)', 'host3(port=80)',
+             'host1(port=81)', 'host2(port=81)', 'host3(port=81)'],
+            ['host1(port=80)', 'host3(port=80)',
+             'host2(port=81)', 'host3(port=81)'],
+            ['rgw.c']
+        ),
     ])
 def test_node_assignment(service_type, placement, hosts, daemons,
                          expected, expected_add, expected_remove):
@@ -535,6 +563,7 @@ def test_node_assignment(service_type, placement, hosts, daemons,
     allow_colo = False
     if service_type == 'rgw':
         service_id = 'realm.zone'
+        allow_colo = True
     elif service_type == 'mds':
         service_id = 'myfs'
         allow_colo = True
@@ -547,24 +576,26 @@ def test_node_assignment(service_type, placement, hosts, daemons,
         spec=spec,
         hosts=[HostSpec(h, labels=['foo']) for h in hosts],
         daemons=daemons,
-        allow_colo=allow_colo
+        allow_colo=allow_colo,
     ).place()
 
-    got = [hs.hostname for hs in all_slots]
+    got = [str(p) for p in all_slots]
     num_wildcard = 0
     for i in expected:
         if i == '*':
             num_wildcard += 1
         else:
+            assert i in got
             got.remove(i)
     assert num_wildcard == len(got)
 
-    got = [hs.hostname for hs in to_add]
+    got = [str(p) for p in to_add]
     num_wildcard = 0
     for i in expected_add:
         if i == '*':
             num_wildcard += 1
         else:
+            assert i in got
             got.remove(i)
     assert num_wildcard == len(got)
 
index af07d254c330d8750c1ef19cf4944c9c77cc6698..339dbe0a48340662e690904cec2ab96c8431348c 100644 (file)
@@ -573,6 +573,11 @@ class ServiceSpec(object):
             n += '.' + self.service_id
         return n
 
+    def get_port_start(self) -> Optional[int]:
+        # If defined, we will allocate and number ports starting at this
+        # point.
+        return None
+
     def to_json(self):
         # type: () -> OrderedDict[str, Any]
         ret: OrderedDict[str, Any] = OrderedDict()
@@ -727,6 +732,9 @@ class RGWSpec(ServiceSpec):
         self.rgw_frontend_ssl_key = rgw_frontend_ssl_key
         self.ssl = ssl
 
+    def get_port_start(self) -> Optional[int]:
+        return self.get_port()
+
     def get_port(self) -> int:
         if self.rgw_frontend_port:
             return self.rgw_frontend_port