]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/orchestrator: Substitute `node` with `host`, globally
authorSebastian Wagner <sebastian.wagner@suse.com>
Fri, 21 Feb 2020 17:03:48 +0000 (18:03 +0100)
committerSage Weil <sage@redhat.com>
Sun, 23 Feb 2020 18:07:24 +0000 (12:07 -0600)
Right now, there is a mix of `node` and `host`. Unify this to `host`

* mgr/rook is special, as Kubernetes nativaly uses "node"

Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
21 files changed:
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/dashboard/controllers/orchestrator.py
src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-host.model.ts [new file with mode: 0644]
src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-node.model.ts [deleted file]
src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.spec.ts
src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.component.ts
src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/services/services.model.ts
src/pybind/mgr/dashboard/frontend/src/app/shared/api/orchestrator.service.ts
src/pybind/mgr/dashboard/services/ganesha.py
src/pybind/mgr/dashboard/services/iscsi_config.py
src/pybind/mgr/dashboard/services/orchestrator.py
src/pybind/mgr/dashboard/tests/test_orchestrator.py
src/pybind/mgr/orchestrator/__init__.py
src/pybind/mgr/orchestrator/_interface.py
src/pybind/mgr/orchestrator/module.py
src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py
src/pybind/mgr/test_orchestrator/dummy_data.json
src/pybind/mgr/test_orchestrator/module.py
src/pybind/mgr/tests/test_orchestrator.py

index 7ee8e4f619dd033de48ecf9464c4a01c8b887464..12986a2a52dc55207646f73b25c20c68ec2d3102 100644 (file)
@@ -678,14 +678,14 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
 
                 # make sure host has latest container image
                 out, err, code = self._run_cephadm(
-                    d.nodename, None, 'inspect-image', [],
+                    d.hostname, None, 'inspect-image', [],
                     image=target_name, no_fsid=True, error_ok=True)
                 self.log.debug('out %s code %s' % (out, code))
                 if code or json.loads(''.join(out)).get('image_id') != target_id:
                     self.log.info('Upgrade: Pulling %s on %s' % (target_name,
-                                                                 d.nodename))
+                                                                 d.hostname))
                     out, err, code = self._run_cephadm(
-                        d.nodename, None, 'pull', [],
+                        d.hostname, None, 'pull', [],
                         image=target_name, no_fsid=True, error_ok=True)
                     if code:
                         self._fail_upgrade('UPGRADE_FAILED_PULL', {
@@ -694,12 +694,12 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
                             'count': 1,
                             'detail': [
                                 'failed to pull %s on host %s' % (target_name,
-                                                                  d.nodename)],
+                                                                  d.hostname)],
                         })
                         return None
                     r = json.loads(''.join(out))
                     if r.get('image_id') != target_id:
-                        self.log.info('Upgrade: image %s pull on %s got new image %s (not %s), restarting' % (target_name, d.nodename, r['image_id'], target_id))
+                        self.log.info('Upgrade: image %s pull on %s got new image %s (not %s), restarting' % (target_name, d.hostname, r['image_id'], target_id))
                         self.upgrade_state['image_id'] = r['image_id']
                         self._save_upgrade_state()
                         return None
@@ -719,7 +719,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
                 return self._daemon_action([(
                     d.daemon_type,
                     d.daemon_id,
-                    d.nodename,
+                    d.hostname,
                     'redeploy'
                 )])
 
@@ -1472,7 +1472,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             sd.last_refresh = datetime.datetime.utcnow()
             sd.daemon_type = d['name'].split('.')[0]
             sd.daemon_id = '.'.join(d['name'].split('.')[1:])
-            sd.nodename = host
+            sd.hostname = host
             sd.container_id = d.get('container_id')
             sd.container_image_name = d.get('container_image_name')
             sd.container_image_id = d.get('container_image_id')
@@ -1568,7 +1568,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             for name, d in dm.items():
                 if d.matches_service(service_name):
                     args.append((d.daemon_type, d.daemon_id,
-                                 d.nodename, action))
+                                 d.hostname, action))
         if not args:
             raise orchestrator.OrchestratorError(
                 'Unable to find %s.%s.* daemon(s)' % (service_name))
@@ -1607,7 +1607,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             for name, d in dm.items():
                 if d.daemon_type == daemon_type and d.daemon_id == daemon_id:
                     args.append((d.daemon_type, d.daemon_id,
-                                 d.nodename, action))
+                                 d.hostname, action))
         if not args:
             raise orchestrator.OrchestratorError(
                 'Unable to find %s.%s daemon(s)' % (
@@ -1631,26 +1631,26 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             for name, d in dm.items():
                 if d.matches_service(service_name):
                     args.append(
-                        ('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename)
+                        ('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname)
                     )
         if not args:
             raise OrchestratorError('Unable to find daemons in %s service' % (
                 service_name))
         return self._remove_daemon(args)
 
-    def get_inventory(self, node_filter=None, refresh=False):
+    def get_inventory(self, host_filter=None, refresh=False):
         """
-        Return the storage inventory of nodes matching the given filter.
+        Return the storage inventory of hosts matching the given filter.
 
-        :param node_filter: node filter
+        :param host_filter: host filter
 
         TODO:
           - add filtering by label
         """
         if refresh:
             # ugly sync path, FIXME someday perhaps?
-            if node_filter:
-                for host in node_filter.nodes:
+            if host_filter:
+                for host in host_filter.hosts:
                     self._refresh_host_devices(host)
             else:
                 for host, hi in self.inventory.items():
@@ -1658,9 +1658,9 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
 
         result = []
         for host, dls in self.cache.devices.items():
-            if node_filter and host not in node_filter.nodes:
+            if host_filter and host not in host_filter.hosts:
                 continue
-            result.append(orchestrator.InventoryNode(host,
+            result.append(orchestrator.InventoryHost(host,
                                                      inventory.Devices(dls)))
         return trivial_result(result)
 
@@ -1717,7 +1717,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
     def _prepare_deployment(self,
                             all_hosts,  # type: List[orchestrator.HostSpec]
                             drive_groups,  # type: List[DriveGroupSpec]
-                            inventory_list  # type: List[orchestrator.InventoryNode]
+                            inventory_list  # type: List[orchestrator.InventoryHost]
                             ):
         # type: (...) -> orchestrator.Completion
 
@@ -1725,8 +1725,8 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             self.log.info("Processing DriveGroup {}".format(drive_group))
             # 1) use fn_filter to determine matching_hosts
             matching_hosts = drive_group.hosts([x.hostname for x in all_hosts])
-            # 2) Map the inventory to the InventoryNode object
-            # FIXME: lazy-load the inventory from a InventoryNode object;
+            # 2) Map the inventory to the InventoryHost object
+            # FIXME: lazy-load the inventory from a InventoryHost object;
             #        this would save one call to the inventory(at least externally)
 
             def _find_inv_for_host(hostname, inventory_list):
@@ -1894,7 +1894,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             sd = orchestrator.DaemonDescription()
             sd.daemon_type = daemon_type
             sd.daemon_id = daemon_id
-            sd.nodename = host
+            sd.hostname = host
             sd.status = 1
             sd.status_desc = 'starting'
             self.cache.add_daemon(host, sd)
@@ -1928,7 +1928,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             args = []
             for d in daemons[0:to_remove]:
                 args.append(
-                    ('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename)
+                    ('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname)
                 )
             return self._remove_daemon(args)
         elif len(daemons) < spec.count:
@@ -1945,7 +1945,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         assert spec.count is not None
         prefix = f'{daemon_type}.{spec.name}'
         our_daemons = [d for d in daemons if d.name().startswith(prefix)]
-        hosts_with_daemons = {d.nodename for d in daemons}
+        hosts_with_daemons = {d.hostname for d in daemons}
         hosts_without_daemons = {p for p in spec.placement.hosts if p.hostname not in hosts_with_daemons}
 
         for host, _, name in hosts_without_daemons:
@@ -1957,7 +1957,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             args.append((daemon_id, host))
             # add to daemon list so next name(s) will also be unique
             sd = orchestrator.DaemonDescription(
-                nodename=host,
+                hostname=host,
                 daemon_type=daemon_type,
                 daemon_id=daemon_id,
             )
@@ -2035,7 +2035,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             # Improve Error message. Point to parse_host_spec examples
             raise orchestrator.OrchestratorValidationError("Mons need a host spec. (host, network, name(opt))")
 
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mon').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mon').load()
         return self._update_mons(spec)
 
     def _update_mons(self, spec):
@@ -2094,7 +2094,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
 
     def add_mgr(self, spec):
         # type: (orchestrator.ServiceSpec) -> orchestrator.Completion
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
         return self._add_new_daemon('mgr', spec, self._create_mgr)
 
     def apply_mgr(self, spec):
@@ -2102,7 +2102,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         """
         Adjust the number of cluster managers.
         """
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
 
         daemons = self.cache.get_daemons_by_type('mgr')
         num_mgrs = len(daemons)
@@ -2128,7 +2128,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             for d in daemons:
                 if d.daemon_id not in connected:
                     to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id),
-                                             d.nodename))
+                                             d.hostname))
                     num_to_remove -= 1
                     if num_to_remove == 0:
                         break
@@ -2136,7 +2136,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             # otherwise, remove *any* mgr
             if num_to_remove > 0:
                 for d in daemons:
-                    to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename))
+                    to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname))
                     num_to_remove -= 1
                     if num_to_remove == 0:
                         break
@@ -2193,7 +2193,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
 
     def apply_mds(self, spec):
         # type: (orchestrator.ServiceSpec) -> AsyncCompletion
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mds').load()
+        spec =HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mds').load()
 
         return self._update_service('mds', self.add_mds, spec)
 
@@ -2240,13 +2240,13 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         return self._create_daemon('rgw', rgw_id, host, keyring=keyring)
 
     def apply_rgw(self, spec):
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rgw').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rgw').load()
         return self._update_service('rgw', self.add_rgw, spec)
 
     def add_rbd_mirror(self, spec):
         if not spec.placement.hosts or len(spec.placement.hosts) < spec.count:
             raise RuntimeError("must specify at least %d hosts" % spec.count)
-        self.log.debug('nodes %s' % spec.placement.hosts)
+        self.log.debug('hosts %s' % spec.placement.hosts)
 
         return self._add_new_daemon('rbd-mirror', spec, self._create_rbd_mirror)
 
@@ -2262,7 +2262,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
                                    keyring=keyring)
 
     def apply_rbd_mirror(self, spec):
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rbd-mirror').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rbd-mirror').load()
         return self._update_service('rbd-mirror', self.add_rbd_mirror, spec)
 
     def _generate_prometheus_config(self):
@@ -2281,17 +2281,17 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
             for dd in self.cache.get_daemons_by_type('mgr'):
                 if dd.daemon_id == self.get_mgr_id():
                     continue
-                hi = self.inventory.get(dd.nodename, None)
+                hi = self.inventory.get(dd.hostname, None)
                 if hi:
-                    addr = hi.get('addr', dd.nodename)
+                    addr = hi.get('addr', dd.hostname)
                 mgr_scrape_list.append(addr.split(':')[0] + ':' + port)
 
         # scrape node exporters
         node_configs = ''
         for dd in self.cache.get_daemons_by_type('node-exporter'):
-            hi = self.inventory.get(dd.nodename, None)
+            hi = self.inventory.get(dd.hostname, None)
             if hi:
-                addr = hi.get('addr', dd.nodename)
+                addr = hi.get('addr', dd.hostname)
                 if not node_configs:
                     node_configs = """
   - job_name: 'node'
@@ -2301,7 +2301,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
       labels:
         instance: '{}'
 """.format([addr.split(':')[0] + ':9100'],
-           dd.nodename)
+           dd.hostname)
         j = json.dumps({
             'files': {
                 'prometheus.yml': """# generated by cephadm
@@ -2326,7 +2326,7 @@ scrape_configs:
         return j
 
     def add_prometheus(self, spec):
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
         return self._add_new_daemon('prometheus', spec, self._create_prometheus)
 
     @async_map_completion
@@ -2334,7 +2334,7 @@ scrape_configs:
         return self._create_daemon('prometheus', daemon_id, host)
 
     def apply_prometheus(self, spec):
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
         return self._update_service('prometheus', self.add_prometheus, spec)
 
     def add_node_exporter(self, spec):
@@ -2345,7 +2345,7 @@ scrape_configs:
         return self._add_new_daemon('node-exporter', spec, self._create_node_exporter)
 
     def apply_node_exporter(self, spec):
-        spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='node-exporter').load()
+        spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='node-exporter').load()
         return self._update_service('node-exporter', self.add_node_exporter, spec)
 
     @async_map_completion
@@ -2520,11 +2520,11 @@ class SimpleScheduler(BaseScheduler):
         return host_pool[:count]
 
 
-class NodeAssignment(object):
+class HostAssignment(object):
     """
-    A class to detect if nodes are being passed imperative or declarative
-    If the spec is populated via the `nodes/hosts` field it will not load
-    any nodes into the list.
+    A class to detect if hosts are being passed imperative or declarative
+    If the spec is populated via the `hosts/hosts` field it will not load
+    any hosts into the list.
     If the spec isn't populated, i.e. when only num or label is present (declarative)
     it will use the provided `get_host_func` to load it from the inventory.
 
@@ -2546,54 +2546,54 @@ class NodeAssignment(object):
     def load(self):
         # type: () -> orchestrator.ServiceSpec
         """
-        Load nodes into the spec.placement.nodes container.
+        Load hosts into the spec.placement.hosts container.
         """
-        self.load_labeled_nodes()
-        self.assign_nodes()
+        self.load_labeled_hosts()
+        self.assign_hosts()
         return self.spec
 
-    def load_labeled_nodes(self):
+    def load_labeled_hosts(self):
         # type: () -> None
         """
-        Assign nodes based on their label
+        Assign hosts based on their label
         """
-        # Querying for labeled nodes doesn't work currently.
+        # Querying for labeled hosts doesn't work currently.
         # Leaving this open for the next iteration
         # NOTE: This currently queries for all hosts without label restriction
         if self.spec.placement.label:
-            logger.info("Found labels. Assigning nodes that match the label")
+            logger.info("Found labels. Assigning hosts that match the label")
             candidates = [HostPlacementSpec(x, '', '') for x in self.get_hosts_func()]  # TODO: query for labels
-            logger.info('Assigning nodes to spec: {}'.format(candidates))
+            logger.info('Assigning hosts to spec: {}'.format(candidates))
             self.spec.placement.set_hosts(candidates)
 
-    def assign_nodes(self):
+    def assign_hosts(self):
         # type: () -> None
         """
-        Use the assigned scheduler to load nodes into the spec.placement.nodes container
+        Use the assigned scheduler to load hosts into the spec.placement.hosts container
         """
         # If no imperative or declarative host assignments, use the scheduler to pick from the
         # host pool (assuming `count` is set)
         if not self.spec.placement.label and not self.spec.placement.hosts and self.spec.placement.count:
-            logger.info("Found num spec. Looking for labeled nodes.")
+            logger.info("Found num spec. Looking for labeled hosts.")
             # TODO: actually query for labels (self.daemon_type)
             candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()],
                                               count=self.spec.placement.count)
-            # Not enough nodes to deploy on
+            # Not enough hosts to deploy on
             if len(candidates) != self.spec.placement.count:
-                logger.warning("Did not find enough labeled nodes to \
-                               scale to <{}> services. Falling back to unlabeled nodes.".
+                logger.warning("Did not find enough labeled hosts to \
+                               scale to <{}> services. Falling back to unlabeled hosts.".
                                format(self.spec.placement.count))
             else:
-                logger.info('Assigning nodes to spec: {}'.format(candidates))
+                logger.info('Assigning hosts to spec: {}'.format(candidates))
                 self.spec.placement.set_hosts(candidates)
                 return None
 
             candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()], count=self.spec.placement.count)
-            # Not enough nodes to deploy on
+            # Not enough hosts to deploy on
             if len(candidates) != self.spec.placement.count:
                 raise OrchestratorValidationError("Cannot place {} daemons on {} hosts.".
                                                   format(self.spec.placement.count, len(candidates)))
 
-            logger.info('Assigning nodes to spec: {}'.format(candidates))
+            logger.info('Assigning hosts to spec: {}'.format(candidates))
             self.spec.placement.set_hosts(candidates)
             return None
index 1ab4469ff6550b2669148faf27ed5d06e1015508..42e66f518903ef9eacd5a129cd2abd761275e7e8 100644 (file)
@@ -11,7 +11,7 @@ try:
 except ImportError:
     pass
 
-from orchestrator import ServiceDescription, DaemonDescription, InventoryNode, \
+from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
     ServiceSpec, PlacementSpec, RGWSpec, HostSpec, OrchestratorError
 from tests import mock
 from .fixtures import cephadm_module, wait
@@ -96,7 +96,7 @@ class TestCephadm(object):
     def test_device_ls(self, _save_host, _rm_host, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
             c = cephadm_module.get_inventory()
-            assert wait(cephadm_module, c) == [InventoryNode('test')]
+            assert wait(cephadm_module, c) == [InventoryHost('test')]
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
         json.dumps([
index 0dfed8c62abaac79be36e89770f83cbe56cd57ec..6e3f11d7a5c01dd618ec6352c29a15bd59ba4827 100644 (file)
@@ -109,17 +109,17 @@ class OrchestratorInventory(RESTController):
     def list(self, hostname=None):
         orch = OrchClient.instance()
         hosts = [hostname] if hostname else None
-        inventory_nodes = [node.to_json() for node in orch.inventory.list(hosts)]
+        inventory_hosts = [host.to_json() for host in orch.inventory.list(hosts)]
         device_osd_map = get_device_osd_map()
-        for inventory_node in inventory_nodes:
-            node_osds = device_osd_map.get(inventory_node['name'])
-            for device in inventory_node['devices']:
-                if node_osds:
+        for inventory_host in inventory_hosts:
+            host_osds = device_osd_map.get(inventory_host['name'])
+            for device in inventory_host['devices']:
+                if host_osds:
                     dev_name = os.path.basename(device['path'])
-                    device['osd_ids'] = sorted(node_osds.get(dev_name, []))
+                    device['osd_ids'] = sorted(host_osds.get(dev_name, []))
                 else:
                     device['osd_ids'] = []
-        return inventory_nodes
+        return inventory_hosts
 
 
 @ApiController('/orchestrator/service', Scope.HOSTS)
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-host.model.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-host.model.ts
new file mode 100644 (file)
index 0000000..2240011
--- /dev/null
@@ -0,0 +1,6 @@
+import { InventoryDevice } from './inventory-devices/inventory-device.model';
+
+export class InventoryHost {
+  name: string;
+  devices: InventoryDevice[];
+}
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-node.model.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cluster/inventory/inventory-node.model.ts
deleted file mode 100644 (file)
index 41c38b6..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-import { InventoryDevice } from './inventory-devices/inventory-device.model';
-
-export class InventoryNode {
-  name: string;
-  devices: InventoryDevice[];
-}
index d74b8a41caaceea1c3df456bf64f1a623b270127..adfe93f5c25c5281984abf798189719892d528da 100644 (file)
@@ -16,25 +16,25 @@ describe('ServicesComponent', () => {
 
   const services = [
     {
-      nodename: 'host0',
+      hostname: 'host0',
       service: '',
       service_instance: 'x',
       service_type: 'mon'
     },
     {
-      nodename: 'host0',
+      hostname: 'host0',
       service: '',
       service_instance: '0',
       service_type: 'osd'
     },
     {
-      nodename: 'host1',
+      hostname: 'host1',
       service: '',
       service_instance: 'y',
       service_type: 'mon'
     },
     {
-      nodename: 'host1',
+      hostname: 'host1',
       service: '',
       service_instance: '1',
       service_type: 'osd'
@@ -42,7 +42,7 @@ describe('ServicesComponent', () => {
   ];
 
   const getServiceList = (hostname: String) => {
-    return hostname ? services.filter((service) => service.nodename === hostname) : services;
+    return hostname ? services.filter((service) => service.hostname === hostname) : services;
   };
 
   configureTestBed({
@@ -78,7 +78,7 @@ describe('ServicesComponent', () => {
     reqHostname = 'host0';
     component.getServices(new CdTableFetchDataContext(() => {}));
     expect(component.services.length).toBe(2);
-    expect(component.services[0].nodename).toBe(reqHostname);
-    expect(component.services[1].nodename).toBe(reqHostname);
+    expect(component.services[0].hostname).toBe(reqHostname);
+    expect(component.services[1].hostname).toBe(reqHostname);
   });
 });
index 9c26d8066c0428127c91dd287070d972a677fce6..6241f3b026ea3eb57ba521807458b3acc81c2088 100644 (file)
@@ -42,7 +42,7 @@ export class ServicesComponent implements OnChanges, OnInit {
     const columns = [
       {
         name: this.i18n('Hostname'),
-        prop: 'nodename',
+        prop: 'hostname',
         flexGrow: 2
       },
       {
@@ -132,7 +132,7 @@ export class ServicesComponent implements OnChanges, OnInit {
       (data: Service[]) => {
         const services: Service[] = [];
         data.forEach((service: Service) => {
-          service.uid = `${service.nodename}-${service.service_type}-${service.service}-${service.service_instance}`;
+          service.uid = `${service.hostname}-${service.service_type}-${service.service}-${service.service_instance}`;
           services.push(service);
         });
         this.services = services;
index 61c1b0d11a2a62826b145c7ca71e08098afadd63..4c8077e397f0eed22622d2b70c34b50d5402c873 100644 (file)
@@ -1,7 +1,7 @@
 export class Service {
   uid: string;
 
-  nodename: string;
+  hostname: string;
   container_id: string;
   service: string;
   service_instance: string;
index 0b0cec2bf083df2b13826fc9b430489b2a29784a..f26de6d3264f7d0abe9fe0ef6c36ae1870e33500 100644 (file)
@@ -6,7 +6,7 @@ import { Observable, of as observableOf } from 'rxjs';
 import { mergeMap } from 'rxjs/operators';
 
 import { InventoryDevice } from '../../ceph/cluster/inventory/inventory-devices/inventory-device.model';
-import { InventoryNode } from '../../ceph/cluster/inventory/inventory-node.model';
+import { InventoryHost } from '../../ceph/cluster/inventory/inventory-host.model';
 import { ApiModule } from './api.module';
 
 @Injectable({
@@ -29,17 +29,17 @@ export class OrchestratorService {
     });
   }
 
-  inventoryList(hostname?: string): Observable<InventoryNode[]> {
+  inventoryList(hostname?: string): Observable<InventoryHost[]> {
     const options = hostname ? { params: new HttpParams().set('hostname', hostname) } : {};
-    return this.http.get<InventoryNode[]>(`${this.url}/inventory`, options);
+    return this.http.get<InventoryHost[]>(`${this.url}/inventory`, options);
   }
 
   inventoryDeviceList(hostname?: string): Observable<InventoryDevice[]> {
     return this.inventoryList(hostname).pipe(
-      mergeMap((nodes: InventoryNode[]) => {
-        const devices = _.flatMap(nodes, (node) => {
-          return node.devices.map((device) => {
-            device.hostname = node.name;
+      mergeMap((hosts: InventoryHost[]) => {
+        const devices = _.flatMap(hosts, (host) => {
+          return host.devices.map((device) => {
+            device.hostname = host.name;
             device.uid = device.device_id ? device.device_id : `${device.hostname}-${device.path}`;
             return device;
           });
index 658c0915b67d751d3ab7144581c9273c4f95b98f..16b6fa5c409fef4f7f8ae1b71c2a13fe9f80cba5 100644 (file)
@@ -89,7 +89,7 @@ class Ganesha(object):
                 instance.service = "_default_"
             if instance.service not in result:
                 result[instance.service] = {}
-            result[instance.service][instance.nodename] = {
+            result[instance.service][instance.hostname] = {
                 'status': instance.status,
                 'desc': instance.status_desc,
             }
index 600e3a9d2d9b1880d70fb58c26d881fe07b2267e..fa1ec5a21d1741d5ce45579eb307ffa80da669a3 100644 (file)
@@ -82,7 +82,7 @@ class IscsiGatewaysConfig(object):
         try:
             instances = OrchClient.instance().services.list("iscsi")
             for instance in instances:
-                config['gateways'][instance.nodename] = {
+                config['gateways'][instance.hostname] = {
                     'service_url': instance.service_url
                 }
         except (RuntimeError, OrchestratorError, ImportError):
index 6973632eb22d7f1b00657141b693e8abb943a474..355ab9f652e573633f3e727d1bd71678ea20476a 100644 (file)
@@ -69,15 +69,15 @@ class InventoryManager(ResourceManager):
 
     @wait_api_result
     def list(self, hosts=None, refresh=False):
-        node_filter = InventoryFilter(nodes=hosts) if hosts else None
-        return self.api.get_inventory(node_filter=node_filter, refresh=refresh)
+        host_filter = InventoryFilter(hosts=hosts) if hosts else None
+        return self.api.get_inventory(host_filter=host_filter, refresh=refresh)
 
 
 class ServiceManager(ResourceManager):
 
     @wait_api_result
-    def list(self, service_type=None, service_id=None, node_name=None):
-        return self.api.list_daemons(service_type, service_id, node_name)
+    def list(self, service_type=None, service_id=None, host_name=None):
+        return self.api.list_daemons(service_type, service_id, host_name)
 
     def reload(self, service_type, service_ids):
         if not isinstance(service_ids, list):
index cb90e2cdef6f15f89dcab078ce961a24de5a21f2..da1232f8653a5dbf53ea052212d2acb3b2035c97 100644 (file)
@@ -4,7 +4,7 @@ try:
 except ImportError:
     from unittest import mock
 
-from orchestrator import InventoryNode
+from orchestrator import InventoryHost
 
 from . import ControllerTestCase
 from .. import mgr
@@ -48,11 +48,11 @@ class OrchestratorControllerTest(ControllerTestCase):
     def _set_inventory(self, mock_instance, inventory):
         # pylint: disable=unused-argument
         def _list_inventory(hosts=None, refresh=False):
-            nodes = []
-            for node in inventory:
-                if hosts is None or node['name'] in hosts:
-                    nodes.append(InventoryNode.from_json(node))
-            return nodes
+            inv_hosts = []
+            for inv_host in inventory:
+                if hosts is None or inv_host['name'] in hosts:
+                    inv_hosts.append(InventoryHost.from_json(inv_host))
+            return inv_hosts
         mock_instance.inventory.list.side_effect = _list_inventory
 
     @mock.patch('dashboard.controllers.orchestrator.get_device_osd_map')
@@ -174,8 +174,8 @@ class TestOrchestrator(unittest.TestCase):
         mgr.get.assert_called_with('osd_metadata')
         # sort OSD IDs to make assertDictEqual work
         for devices in device_osd_map.values():
-            for node in devices.keys():
-                devices[node] = sorted(devices[node])
+            for host in devices.keys():
+                devices[host] = sorted(devices[host])
         self.assertDictEqual(device_osd_map, {
             'node0': {
                 'nvme0n1': [0, 1],
index 61cf50e0ff6bf8a41341bd7b12abee4f4ef8b5f4..e0843299f3af051cd6bb04d6067da1aee0114494 100644 (file)
@@ -12,6 +12,6 @@ from ._interface import \
     servicespec_validate_add, servicespec_validate_hosts_have_network_spec, \
     ServiceDescription, InventoryFilter, PlacementSpec,  HostSpec, \
     DaemonDescription, \
-    InventoryNode, DeviceLightLoc, \
+    InventoryHost, DeviceLightLoc, \
     OutdatableData, OutdatablePersistentDict, \
     UpgradeStatusSpec
index 4e18f567d599a6db4b6309f210d717d50656bf05..7c03d159d3f438c2164455e3a759bebd94414848 100644 (file)
@@ -879,12 +879,12 @@ class Orchestrator(object):
         """
         raise NotImplementedError()
 
-    def get_inventory(self, node_filter=None, refresh=False):
+    def get_inventory(self, host_filter=None, refresh=False):
         # type: (Optional[InventoryFilter], bool) -> Completion
         """
         Returns something that was created by `ceph-volume inventory`.
 
-        :return: list of InventoryNode
+        :return: list of InventoryHost
         """
         raise NotImplementedError()
 
@@ -1171,7 +1171,7 @@ class UpgradeStatusSpec(object):
 
 class PlacementSpec(object):
     """
-    For APIs that need to specify a node subset
+    For APIs that need to specify a host subset
     """
     def __init__(self, label=None, hosts=None, count=None):
         # type: (Optional[str], Optional[List], Optional[int]) -> None
@@ -1200,7 +1200,7 @@ class PlacementSpec(object):
     def validate(self):
         if self.hosts and self.label:
             # TODO: a less generic Exception
-            raise Exception('Node and label are mutually exclusive')
+            raise Exception('Host and label are mutually exclusive')
         if self.count is not None and self.count <= 0:
             raise Exception("num/count must be > 1")
 
@@ -1265,7 +1265,7 @@ class DaemonDescription(object):
     This is not about health or performance monitoring of daemons: it's
     about letting the orchestrator tell Ceph whether and where a
     daemon is scheduled in the cluster.  When an orchestrator tells
-    Ceph "it's running on node123", that's not a promise that the process
+    Ceph "it's running on host123", that's not a promise that the process
     is literally up this second, it's a description of where the orchestrator
     has decided the daemon should run.
     """
@@ -1273,7 +1273,7 @@ class DaemonDescription(object):
     def __init__(self,
                  daemon_type=None,
                  daemon_id=None,
-                 nodename=None,
+                 hostname=None,
                  container_id=None,
                  container_image_id=None,
                  container_image_name=None,
@@ -1281,8 +1281,8 @@ class DaemonDescription(object):
                  status=None,
                  status_desc=None,
                  last_refresh=None):
-        # Node is at the same granularity as InventoryNode
-        self.nodename = nodename
+        # Host is at the same granularity as InventoryHost
+        self.hostname = hostname
 
         # Not everyone runs in containers, but enough people do to
         # justify having the container_id (runtime id) and container_image
@@ -1335,7 +1335,7 @@ class DaemonDescription(object):
 
     def to_json(self):
         out = {
-            'nodename': self.nodename,
+            'hostname': self.hostname,
             'container_id': self.container_id,
             'container_image_id': self.container_image_id,
             'container_image_name': self.container_image_name,
@@ -1366,7 +1366,7 @@ class ServiceDescription(object):
     This is not about health or performance monitoring of services: it's
     about letting the orchestrator tell Ceph whether and where a
     service is scheduled in the cluster.  When an orchestrator tells
-    Ceph "it's running on node123", that's not a promise that the process
+    Ceph "it's running on host123", that's not a promise that the process
     is literally up this second, it's a description of where the orchestrator
     has decided the service should run.
     """
@@ -1593,7 +1593,7 @@ class InventoryFilter(object):
     When fetching inventory, use this filter to avoid unnecessarily
     scanning the whole estate.
 
-    Typical use: filter by node when presenting UI workflow for configuring
+    Typical use: filter by host when presenting UI workflow for configuring
                  a particular server.
                  filter by label when not all of estate is Ceph servers,
                  and we want to only learn about the Ceph servers.
@@ -1601,20 +1601,20 @@ class InventoryFilter(object):
                  in e.g. OSD servers.
 
     """
-    def __init__(self, labels=None, nodes=None):
+    def __init__(self, labels=None, hosts=None):
         # type: (Optional[List[str]], Optional[List[str]]) -> None
 
-        #: Optional: get info about nodes matching labels
+        #: Optional: get info about hosts matching labels
         self.labels = labels
 
-        #: Optional: get info about certain named nodes only
-        self.nodes = nodes
+        #: Optional: get info about certain named hosts only
+        self.hosts = hosts
 
 
-class InventoryNode(object):
+class InventoryHost(object):
     """
     When fetching inventory, all Devices are groups inside of an
-    InventoryNode.
+    InventoryHost.
     """
     def __init__(self, name, devices=None, labels=None, addr=None):
         # type: (str, Optional[inventory.Devices], Optional[List[str]], Optional[str]) -> None
@@ -1662,12 +1662,12 @@ class InventoryNode(object):
         return [cls(item[0], devs(item[1].data)) for item in hosts]
 
     def __repr__(self):
-        return "<InventoryNode>({name})".format(name=self.name)
+        return "<InventoryHost>({name})".format(name=self.name)
 
     @staticmethod
-    def get_host_names(nodes):
-        # type: (List[InventoryNode]) -> List[str]
-        return [node.name for node in nodes]
+    def get_host_names(hosts):
+        # type: (List[InventoryHost]) -> List[str]
+        return [host.name for host in hosts]
 
     def __eq__(self, other):
         return self.name == other.name and self.devices == other.devices
index 4add508efd3a78e76b428ad82c89c74d69136e73..8407718626da438404e965c11354ab547d51db87 100644 (file)
@@ -23,7 +23,7 @@ from mgr_module import MgrModule, HandleCommandResult
 from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
     raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
     NoOrchestrator, ServiceSpec, PlacementSpec, OrchestratorValidationError, NFSServiceSpec, \
-    RGWSpec, InventoryFilter, InventoryNode, HostPlacementSpec, HostSpec, CLICommandMeta
+    RGWSpec, InventoryFilter, InventoryHost, HostPlacementSpec, HostSpec, CLICommandMeta
 
 
 @six.add_metaclass(CLICommandMeta)
@@ -203,8 +203,8 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
         if format == 'json':
-            hosts = [node.to_json()
-                     for node in completion.result]
+            hosts = [host.to_json()
+                     for host in completion.result]
             output = json.dumps(hosts, sort_keys=True)
         else:
             table = PrettyTable(
@@ -213,8 +213,8 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
             table.align = 'l'
             table.left_padding_width = 0
             table.right_padding_width = 1
-            for node in sorted(completion.result, key=lambda h: h.hostname):
-                table.add_row((node.hostname, node.addr, ' '.join(node.labels), node.status))
+            for host in sorted(completion.result, key=lambda h: h.hostname):
+                table.add_row((host.hostname, host.addr, ' '.join(host.labels), host.status))
             output = table.get_string()
         return HandleCommandResult(stdout=output)
 
@@ -245,7 +245,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         "name=host,type=CephString,n=N,req=false "
         "name=format,type=CephChoices,strings=json|plain,req=false "
         "name=refresh,type=CephBool,req=false",
-        'List devices on a node')
+        'List devices on a host')
     def _list_devices(self, host=None, format='plain', refresh=False):
         # type: (Optional[List[str]], str, bool) -> HandleCommandResult
         """
@@ -255,9 +255,9 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         date hardware inventory is fine as long as hardware ultimately appears
         in the output of this command.
         """
-        nf = InventoryFilter(nodes=host) if host else None
+        nf = InventoryFilter(hosts=host) if host else None
 
-        completion = self.get_inventory(node_filter=nf, refresh=refresh)
+        completion = self.get_inventory(host_filter=nf, refresh=refresh)
 
         self._orchestrator_wait([completion])
         raise_if_exception(completion)
@@ -276,7 +276,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
             table._align['SIZE'] = 'r'
             table.left_padding_width = 0
             table.right_padding_width = 1
-            for host_ in completion.result: # type: InventoryNode
+            for host_ in completion.result: # type: InventoryHost
                 for d in host_.devices.devices:  # type: Device
                     table.add_row(
                         (
@@ -378,7 +378,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
         def ukn(s):
             return '<unknown>' if s is None else s
         # Sort the list for display
-        daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.nodename), ukn(s.daemon_id)))
+        daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
 
         if len(daemons) == 0:
             return HandleCommandResult(stdout="No daemons reported")
@@ -408,7 +408,7 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule):
                     age = '-'
                 table.add_row((
                     s.name(),
-                    ukn(s.nodename),
+                    ukn(s.hostname),
                     status,
                     age,
                     ukn(s.version),
@@ -442,14 +442,14 @@ Usage:
 
         elif svc_arg:
             try:
-                node_name, block_device = svc_arg.split(":")
+                host_name, block_device = svc_arg.split(":")
                 block_devices = block_device.split(',')
             except (TypeError, KeyError, ValueError):
                 msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
                 return HandleCommandResult(-errno.EINVAL, stderr=msg)
 
             devs = DeviceSelection(paths=block_devices)
-            drive_groups = [DriveGroupSpec(node_name, data_devices=devs)]
+            drive_groups = [DriveGroupSpec(host_name, data_devices=devs)]
         else:
             return HandleCommandResult(-errno.EINVAL, stderr=usage)
 
index e6b2034f2cad787b6da201b010ee75dab30e5128..9b603d7b62dadcd7c735bd0ae548eae3109a373e 100644 (file)
@@ -211,22 +211,22 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         self.all_progress_references.clear()
 
     @deferred_read
-    def get_inventory(self, node_filter=None, refresh=False):
-        node_list = None
-        if node_filter and node_filter.nodes:
-            # Explicit node list
-            node_list = node_filter.nodes
-        elif node_filter and node_filter.labels:
-            # TODO: query k8s API to resolve to node list, and pass
+    def get_inventory(self, host_filter=None, refresh=False):
+        host_list = None
+        if host_filter and host_filter.hosts:
+            # Explicit host list
+            host_list = host_filter.hosts
+        elif host_filter and host_filter.labels:
+            # TODO: query k8s API to resolve to host list, and pass
             # it into RookCluster.get_discovered_devices
             raise NotImplementedError()
 
-        devs = self.rook_cluster.get_discovered_devices(node_list)
+        devs = self.rook_cluster.get_discovered_devices(host_list)
 
         result = []
-        for node_name, node_devs in devs.items():
+        for host_name, host_devs in devs.items():
             devs = []
-            for d in node_devs:
+            for d in host_devs:
                 dev = inventory.Device(
                     path='/dev/' + d['name'],
                     sys_api=dict(
@@ -238,7 +238,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
                 )
                 devs.append(dev)
 
-            result.append(orchestrator.InventoryNode(node_name, inventory.Devices(devs)))
+            result.append(orchestrator.InventoryHost(host_name, inventory.Devices(devs)))
 
         return result
 
@@ -248,14 +248,14 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         return [orchestrator.HostSpec(n) for n in self.rook_cluster.get_node_names()]
 
     @deferred_read
-    def list_daemons(self, daemon_type=None, daemon_id=None, node_name=None, refresh=False):
+    def list_daemons(self, daemon_type=None, daemon_id=None, host_name=None, refresh=False):
 
-        pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, node_name)
+        pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, host_name)
 
         result = []
         for p in pods:
             sd = orchestrator.DaemonDescription()
-            sd.nodename = p['nodename']
+            sd.hostname = p['hostname']
             sd.container_id = p['name']
             sd.daemon_type = p['labels']['app'].replace('rook-ceph-', '')
             status = {
index 3e31fc0cb2cfac8ef539e65c4c1f629bf0c76df0..59f66b06c652944312e15d0e04f6037b063e4a69 100644 (file)
@@ -321,7 +321,7 @@ class RookCluster(object):
             # p['metadata']['creationTimestamp']
             pods_summary.append({
                 "name": d['metadata']['name'],
-                "nodename": d['spec']['node_name'],
+                "hostname": d['spec']['node_name'],
                 "labels": d['metadata']['labels'],
                 'phase': d['status']['phase']
             })
index 4d7866ea1a06ddc7d0ba2317338278b3b7ee2a96..477655a365d65ee7092826b7039c1e1a4fd62b9d 100644 (file)
   ],
   "daemons": [
     {
-      "nodename": "host0",
+      "hostname": "host0",
       "daemon_type": "osd",
       "daemon_id": "1"
     }
index c68e1a4b521252083c7384fc97fcec9baa5d6ac0..087b0d332c5cf698ce479cf150c100bf848312f4 100644 (file)
@@ -115,22 +115,22 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
             self._shutdown.wait(5)
 
     def _init_data(self, data=None):
-        self._inventory = [orchestrator.InventoryNode.from_json(inventory_node)
-                           for inventory_node in data.get('inventory', [])]
+        self._inventory = [orchestrator.InventoryHost.from_json(inventory_host)
+                           for inventory_host in data.get('inventory', [])]
         self._daemons = [orchestrator.DaemonDescription.from_json(service)
                           for service in data.get('daemons', [])]
 
     @deferred_read
-    def get_inventory(self, node_filter=None, refresh=False):
+    def get_inventory(self, host_filter=None, refresh=False):
         """
         There is no guarantee which devices are returned by get_inventory.
         """
-        if node_filter and node_filter.nodes is not None:
-            assert isinstance(node_filter.nodes, list)
+        if host_filter and host_filter.hosts is not None:
+            assert isinstance(host_filter.hosts, list)
 
         if self._inventory:
-            if node_filter:
-                return list(filter(lambda node: node.name in node_filter.nodes,
+            if host_filter:
+                return list(filter(lambda host: host.name in host_filter.hosts,
                                    self._inventory))
             return self._inventory
 
@@ -149,12 +149,12 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
         for out in c_v_out.splitlines():
             self.log.error(out)
             devs = inventory.Devices.from_json(json.loads(out))
-            return [orchestrator.InventoryNode('localhost', devs)]
+            return [orchestrator.InventoryHost('localhost', devs)]
         self.log.error('c-v failed: ' + str(c_v_out))
         raise Exception('c-v failed')
 
     @deferred_read
-    def list_daemons(self, daemon_type=None, daemon_id=None, node_name=None, refresh=False):
+    def list_daemons(self, daemon_type=None, daemon_id=None, host_name=None, refresh=False):
         """
         There is no guarantee which daemons are returned by describe_service, except that
         it returns the mgr we're running in.
@@ -164,8 +164,8 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
             assert daemon_type in daemon_types, daemon_type + " unsupported"
 
         if self._daemons:
-            if node_name:
-                return list(filter(lambda svc: svc.nodename == node_name, self._daemons))
+            if host_name:
+                return list(filter(lambda svc: svc.hostname == host_name, self._daemons))
             return self._daemons
 
         out = map(str, check_output(['ps', 'aux']).splitlines())
@@ -176,7 +176,7 @@ class TestOrchestrator(MgrModule, orchestrator.Orchestrator):
         result = []
         for p in processes:
             sd = orchestrator.DaemonDescription()
-            sd.nodename = 'localhost'
+            sd.hostname = 'localhost'
             res = re.search('ceph-[^ ]+', p)
             assert res
             sd.daemon_id = res.group()
index 5461c0f3f433415f7b3f7c9538595472ed9450af..1fc62e04fe9be2c76f105c8d915d4340b955fb86 100644 (file)
@@ -8,7 +8,7 @@ import pytest
 from ceph.deployment import inventory
 from orchestrator import raise_if_exception, RGWSpec, Completion, ProgressReference, \
     servicespec_validate_add
-from orchestrator import InventoryNode, ServiceDescription, DaemonDescription
+from orchestrator import InventoryHost, ServiceDescription, DaemonDescription
 from orchestrator import OrchestratorValidationError
 from orchestrator import HostPlacementSpec
 
@@ -72,19 +72,19 @@ def test_inventory():
             }
         ]
     }
-    _test_resource(json_data, InventoryNode, {'abc': False})
+    _test_resource(json_data, InventoryHost, {'abc': False})
     for devices in json_data['devices']:
         _test_resource(devices, inventory.Device)
 
     json_data = [{}, {'name': 'host0', 'addr': '1.2.3.4'}, {'devices': []}]
     for data in json_data:
         with pytest.raises(OrchestratorValidationError):
-            InventoryNode.from_json(data)
+            InventoryHost.from_json(data)
 
 
 def test_daemon_description():
     json_data = {
-        'nodename': 'test',
+        'hostname': 'test',
         'daemon_type': 'mon',
         'daemon_id': 'a'
     }