]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/orch: adapt update_mgr/mon funcs to new parsing 31672/head
authorJoshua Schmid <jschmid@suse.de>
Fri, 15 Nov 2019 11:51:17 +0000 (12:51 +0100)
committerJoshua Schmid <jschmid@suse.de>
Mon, 18 Nov 2019 17:11:55 +0000 (18:11 +0100)
update_mgr and update_mon (and their correspinding orch-backend
implementation) are adapted to the new parsing parsing method
introduced in ff9093873.

Signed-off-by: Joshua Schmid <jschmid@suse.de>
src/pybind/mgr/orchestrator.py
src/pybind/mgr/orchestrator_cli/module.py
src/pybind/mgr/ssh/module.py
src/pybind/mgr/tests/test_orchestrator.py

index 46a517eb398fe3fbd220c091cc44783057272592..b23b914ef11790c65f6a81edf38b25b5444c1079 100644 (file)
@@ -29,20 +29,20 @@ try:
 except ImportError:
     T, G = object, object
 
+
 def parse_host_specs(host, require_network=True):
     """
     Split host into host, network, and (optional) daemon name parts.  The network
-    part can be an IP, CIDR, or ceph addrvec like
-    '[v2:1.2.3.4:3300,v1:1.2.3.4:6789]'.
+    part can be an IP, CIDR, or ceph addrvec like '[v2:1.2.3.4:3300,v1:1.2.3.4:6789]'.
     e.g.,
       "myhost"
       "myhost=name"
       "myhost:1.2.3.4"
       "myhost:1.2.3.4=name"
       "myhost:1.2.3.0/24"
-      "myhost:1.2.3.4/24=name"
-      "myhost:v2:1.2.3.4:3000/24=name"
-      "myhost:v2:1.2.3.4:3000,v1:1.2.3.4:6789/24=name"
+      "myhost:1.2.3.0/24=name"
+      "myhost:[v2:1.2.3.4:3000]=name"
+      "myhost:[v2:1.2.3.4:3000,v1:1.2.3.4:6789]=name"
     """
     # Matches from start to : or = or until end of string
     host_re = r'^(.*?)(:|=|$)'
@@ -60,42 +60,42 @@ def parse_host_specs(host, require_network=True):
     if match_host:
         host_spec = host_spec._replace(hostname=match_host.group(1))
 
-    ip_match = re.search(ip_re, host)
-    if ip_match:
-        host_spec = host_spec._replace(network=ip_match.group(1))
-
     name_match = re.search(name_re, host)
     if name_match:
         host_spec = host_spec._replace(name=name_match.group(1))
 
+    ip_match = re.search(ip_re, host)
+    if ip_match:
+        host_spec = host_spec._replace(network=ip_match.group(1))
+
     if not require_network:
         return host_spec
 
     from ipaddress import ip_network, ip_address
-    try:
-        networks = list()
-        network = host_spec.network
-        # in case we have v2:1.2.3.4:3000,v1:1.2.3.4:6478
-        if ',' in network:
-            net1, net2 = network.split(',')
-            networks.extend([net1, net2])
-        # if we only have one network string
-        else:
-            networks.append(network)
-        for network in networks:
-            # only if we have versioned network configs
-            if network.startswith('v'):
-                network = network.split(':')[1]
+    networks = list()
+    network = host_spec.network
+    # in case we have [v2:1.2.3.4:3000,v1:1.2.3.4:6478]
+    if ',' in network:
+        networks = [x for x in network.split(',')]
+    else:
+        networks.append(network)
+    for network in networks:
+        # only if we have versioned network configs
+        if network.startswith('v') or network.startswith('[v'):
+            network = network.split(':')[1]
+        try:
             # if subnets are defined, also verify the validity
             if '/' in network:
                 ip_network(network)
             else:
                 ip_address(network)
-    except ValueError:
-        raise Exception("No valid IPv4/IPv6 address provided")
+        except ValueError as e:
+            # logging?
+            raise e
 
     return host_spec
 
+
 class OrchestratorError(Exception):
     """
     General orchestrator specific error.
@@ -669,7 +669,7 @@ class PlacementSpec(object):
     def __init__(self, label=None, nodes=[]):
         self.label = label
 
-        self.nodes = list(map(split_host, nodes))
+        self.nodes = [parse_host_specs(x, require_network=False) for x in nodes]
 
 def handle_type_error(method):
     @wraps(method)
index 0b06cfc6859917b9c9f651356257e8edfed7cbb4..f1d0a9d551b1206b88cfc65b77e6cf9c1ad8d108 100644 (file)
@@ -553,11 +553,12 @@ Usage:
 
         if num <= 0:
             return HandleCommandResult(-errno.EINVAL,
-                    stderr="Invalid number of mgrs: require {} > 0".format(num))
+                                       stderr="Invalid number of mgrs: require {} > 0".format(num))
 
         if hosts:
             try:
-                hosts = list(map(orchestrator.split_host, hosts))
+                hosts = [orchestrator.parse_host_specs(host_spec, require_network=False)
+                         for host_spec in hosts]
             except Exception as e:
                 msg = "Failed to parse host list: '{}': {}".format(hosts, e)
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
@@ -576,11 +577,12 @@ Usage:
 
         if num <= 0:
             return HandleCommandResult(-errno.EINVAL,
-                    stderr="Invalid number of mons: require {} > 0".format(num))
+                                       stderr="Invalid number of mons: require {} > 0".format(num))
 
         if hosts:
             try:
-                hosts = list(map(orchestrator.split_host_with_network, hosts))
+                hosts = [orchestrator.parse_host_specs(host_spec)
+                         for host_spec in hosts]
             except Exception as e:
                 msg = "Failed to parse host list: '{}': {}".format(hosts, e)
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
index 4b9d83f8fd6a69271688fe6f213e3885460cc04e..f359ec37be0bf0384993dbd610688adc2fefae08 100644 (file)
@@ -894,7 +894,7 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         return self._create_daemon('mon', name or host, host, keyring,
                                    extra_args=extra_args)
 
-    def update_mons(self, num, hosts):
+    def update_mons(self, num, host_specs):
         """
         Adjust the number of cluster monitors.
         """
@@ -906,34 +906,36 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         if num < num_mons:
             raise NotImplementedError("Removing monitors is not supported.")
 
-        # check that all the hostnames are registered
-        self._require_hosts(map(lambda h: h[0], hosts))
+        self.log.debug("Trying to update monitors on: {}".format(host_specs))
+        # check that all the hosts are registered
+        [self._require_hosts(host.hostname) for host in host_specs]
 
         # current support requires a network to be specified
-        for host, network, name in hosts:
+        for host, network, _ in host_specs:
             if not network:
-                raise RuntimeError("Host '{}' missing network part".format(host))
+                raise RuntimeError("Host '{}' is missing a network spec".format(host))
 
         daemons = self._get_services('mon')
-        for _, _, name in hosts:
+        for _, _, name in host_specs:
             if name and len([d for d in daemons if d.service_instance == name]):
                 raise RuntimeError('name %s alrady exists', name)
 
         # explicit placement: enough hosts provided?
         num_new_mons = num - num_mons
-        if len(hosts) < num_new_mons:
+        if len(host_specs) < num_new_mons:
             raise RuntimeError("Error: {} hosts provided, expected {}".format(
-                len(hosts), num_new_mons))
+                len(host_specs), num_new_mons))
 
         self.log.info("creating {} monitors on hosts: '{}'".format(
-            num_new_mons, ",".join(map(lambda h: ":".join(h), hosts))))
+            num_new_mons, ",".join(map(lambda h: ":".join(h), host_specs))))
 
         # TODO: we may want to chain the creation of the monitors so they join
         # the quorum one at a time.
         results = []
-        for host, network, name in hosts:
+        for host, network, name in host_specs:
             result = self._worker_pool.apply_async(self._create_mon,
                                                    (host, network, name))
+
             results.append(result)
 
         return SSHWriteCompletion(results)
@@ -955,7 +957,7 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
 
         return self._create_daemon('mgr', name, host, keyring)
 
-    def update_mgrs(self, num, hosts):
+    def update_mgrs(self, num, host_specs):
         """
         Adjust the number of cluster managers.
         """
@@ -964,8 +966,9 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         if num == num_mgrs:
             return SSHWriteCompletionReady("The requested number of managers exist.")
 
+        self.log.debug("Trying to update managers on: {}".format(host_specs))
         # check that all the hosts are registered
-        self._require_hosts(map(lambda h: h[0], hosts))
+        [self._require_hosts(host.hostname) for host in host_specs]
 
         results = []
         if num < num_mgrs:
@@ -975,10 +978,10 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             # cluster doesn't see
             connected = []
             mgr_map = self.get("mgr_map")
-            if mgr_map["active_name"]:
-                connected.append(mgr_map['active_name'])
-            for standby in mgr_map['standbys']:
-                connected.append(standby['name'])
+            if mgr_map.get("active_name", {}):
+                connected.append(mgr_map.get('active_name', ''))
+            for standby in mgr_map.get('standbys', []):
+                connected.append(standby.get('name', ''))
             for d in daemons:
                 if d.service_instance not in connected:
                     result = self._worker_pool.apply_async(
@@ -1006,24 +1009,26 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             # we assume explicit placement by which there are the same number of
             # hosts specified as the size of increase in number of daemons.
             num_new_mgrs = num - num_mgrs
-            if len(hosts) < num_new_mgrs:
+            if len(host_specs) < num_new_mgrs:
                 raise RuntimeError(
                     "Error: {} hosts provided, expected {}".format(
-                        len(hosts), num_new_mgrs))
+                        len(host_specs), num_new_mgrs))
+
+            for host_spec in host_specs:
+                if host_spec.name and len([d for d in daemons if d.service_instance == host_spec.name]):
+                    raise RuntimeError('name %s alrady exists', host_spec.name)
 
-            for _, name in hosts:
-                if name and len([d for d in daemons if d.service_instance == name]):
-                    raise RuntimeError('name %s alrady exists', name)
+            for host_spec in host_specs:
+                if host_spec.name and len([d for d in daemons if d.service_instance == host_spec.name]):
+                    raise RuntimeError('name %s alrady exists', host_spec.name)
 
             self.log.info("creating {} managers on hosts: '{}'".format(
-                num_new_mgrs, ",".join(map(lambda h: h[0], hosts))))
+                num_new_mgrs, ",".join([spec.hostname for spec in host_specs])))
 
-            for i in range(num_new_mgrs):
-                host, name = hosts[i]
-                if not name:
-                    name = self.get_unique_name(daemons)
+            for host_spec in host_specs:
+                name = host_spec.name or self.get_unique_name(daemons)
                 result = self._worker_pool.apply_async(self._create_mgr,
-                                                       (host, name))
+                                                       (host_spec.hostname, name))
                 results.append(result)
 
         return SSHWriteCompletion(results)
@@ -1034,7 +1039,7 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         daemons = self._get_services('mds')
         results = []
         num_added = 0
-        for host, name in spec.placement.nodes:
+        for host, _, name in spec.placement.nodes:
             if num_added >= spec.count:
                 break
             mds_id = self.get_unique_name(daemons, spec.name, name)
@@ -1091,7 +1096,7 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         daemons = self._get_services('rgw')
         results = []
         num_added = 0
-        for host, name in spec.placement.nodes:
+        for host, _, name in spec.placement.nodes:
             if num_added >= spec.count:
                 break
             rgw_id = self.get_unique_name(daemons, spec.name, name)
@@ -1141,7 +1146,7 @@ class SSHOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         daemons = self._get_services('rbd-mirror')
         results = []
         num_added = 0
-        for host, name in spec.placement.nodes:
+        for host, _, name in spec.placement.nodes:
             if num_added >= spec.count:
                 break
             daemon_id = self.get_unique_name(daemons, None, name)
index de50acb0722ebb290f14235a490a503a2ef0bc68..ad1493c5ddc2937a58eeb2453802f73e201bf590 100644 (file)
@@ -7,6 +7,36 @@ from ceph.deployment import inventory
 from orchestrator import ReadCompletion, raise_if_exception, RGWSpec
 from orchestrator import InventoryNode, ServiceDescription
 from orchestrator import OrchestratorValidationError
+from orchestrator import parse_host_specs
+
+
+@pytest.mark.parametrize("test_input,expected, require_network",
+                         [("myhost", ('myhost', '', ''), False),
+                          ("myhost=sname", ('myhost', '', 'sname'), False),
+                          ("myhost:10.1.1.10", ('myhost', '10.1.1.10', ''), True),
+                          ("myhost:10.1.1.10=sname", ('myhost', '10.1.1.10', 'sname'), True),
+                          ("myhost:10.1.1.0/32", ('myhost', '10.1.1.0/32', ''), True),
+                          ("myhost:10.1.1.0/32=sname", ('myhost', '10.1.1.0/32', 'sname'), True),
+                          ("myhost:[v1:10.1.1.10:6789]", ('myhost', '[v1:10.1.1.10:6789]', ''), True),
+                          ("myhost:[v1:10.1.1.10:6789]=sname", ('myhost', '[v1:10.1.1.10:6789]', 'sname'), True),
+                          ("myhost:[v1:10.1.1.10:6789,v2:10.1.1.11:3000]", ('myhost', '[v1:10.1.1.10:6789,v2:10.1.1.11:3000]', ''), True),
+                          ("myhost:[v1:10.1.1.10:6789,v2:10.1.1.11:3000]=sname", ('myhost', '[v1:10.1.1.10:6789,v2:10.1.1.11:3000]', 'sname'), True),
+                          ])
+def test_parse_host_specs(test_input, expected, require_network):
+    ret = parse_host_specs(test_input, require_network=require_network)
+    assert ret == expected
+
+@pytest.mark.parametrize("test_input",
+                         # wrong subnet
+                         [("myhost:1.1.1.1/24"),
+                          # wrong ip format
+                          ("myhost:1"),
+                          # empty string
+                          ("myhost=1"),
+                          ])
+def test_parse_host_specs_raises(test_input):
+    with pytest.raises(ValueError):
+        ret = parse_host_specs(test_input)
 
 
 def _test_resource(data, resource_class, extra=None):