# make sure host has latest container image
out, err, code = self._run_cephadm(
- d.nodename, None, 'inspect-image', [],
+ d.hostname, None, 'inspect-image', [],
image=target_name, no_fsid=True, error_ok=True)
self.log.debug('out %s code %s' % (out, code))
if code or json.loads(''.join(out)).get('image_id') != target_id:
self.log.info('Upgrade: Pulling %s on %s' % (target_name,
- d.nodename))
+ d.hostname))
out, err, code = self._run_cephadm(
- d.nodename, None, 'pull', [],
+ d.hostname, None, 'pull', [],
image=target_name, no_fsid=True, error_ok=True)
if code:
self._fail_upgrade('UPGRADE_FAILED_PULL', {
'count': 1,
'detail': [
'failed to pull %s on host %s' % (target_name,
- d.nodename)],
+ d.hostname)],
})
return None
r = json.loads(''.join(out))
if r.get('image_id') != target_id:
- self.log.info('Upgrade: image %s pull on %s got new image %s (not %s), restarting' % (target_name, d.nodename, r['image_id'], target_id))
+ self.log.info('Upgrade: image %s pull on %s got new image %s (not %s), restarting' % (target_name, d.hostname, r['image_id'], target_id))
self.upgrade_state['image_id'] = r['image_id']
self._save_upgrade_state()
return None
return self._daemon_action([(
d.daemon_type,
d.daemon_id,
- d.nodename,
+ d.hostname,
'redeploy'
)])
sd.last_refresh = datetime.datetime.utcnow()
sd.daemon_type = d['name'].split('.')[0]
sd.daemon_id = '.'.join(d['name'].split('.')[1:])
- sd.nodename = host
+ sd.hostname = host
sd.container_id = d.get('container_id')
sd.container_image_name = d.get('container_image_name')
sd.container_image_id = d.get('container_image_id')
for name, d in dm.items():
if d.matches_service(service_name):
args.append((d.daemon_type, d.daemon_id,
- d.nodename, action))
+ d.hostname, action))
if not args:
raise orchestrator.OrchestratorError(
'Unable to find %s.%s.* daemon(s)' % (service_name))
for name, d in dm.items():
if d.daemon_type == daemon_type and d.daemon_id == daemon_id:
args.append((d.daemon_type, d.daemon_id,
- d.nodename, action))
+ d.hostname, action))
if not args:
raise orchestrator.OrchestratorError(
'Unable to find %s.%s daemon(s)' % (
for name, d in dm.items():
if d.matches_service(service_name):
args.append(
- ('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename)
+ ('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname)
)
if not args:
raise OrchestratorError('Unable to find daemons in %s service' % (
service_name))
return self._remove_daemon(args)
- def get_inventory(self, node_filter=None, refresh=False):
+ def get_inventory(self, host_filter=None, refresh=False):
"""
- Return the storage inventory of nodes matching the given filter.
+ Return the storage inventory of hosts matching the given filter.
- :param node_filter: node filter
+ :param host_filter: host filter
TODO:
- add filtering by label
"""
if refresh:
# ugly sync path, FIXME someday perhaps?
- if node_filter:
- for host in node_filter.nodes:
+ if host_filter:
+ for host in host_filter.hosts:
self._refresh_host_devices(host)
else:
for host, hi in self.inventory.items():
result = []
for host, dls in self.cache.devices.items():
- if node_filter and host not in node_filter.nodes:
+ if host_filter and host not in host_filter.hosts:
continue
- result.append(orchestrator.InventoryNode(host,
+ result.append(orchestrator.InventoryHost(host,
inventory.Devices(dls)))
return trivial_result(result)
def _prepare_deployment(self,
all_hosts, # type: List[orchestrator.HostSpec]
drive_groups, # type: List[DriveGroupSpec]
- inventory_list # type: List[orchestrator.InventoryNode]
+ inventory_list # type: List[orchestrator.InventoryHost]
):
# type: (...) -> orchestrator.Completion
self.log.info("Processing DriveGroup {}".format(drive_group))
# 1) use fn_filter to determine matching_hosts
matching_hosts = drive_group.hosts([x.hostname for x in all_hosts])
- # 2) Map the inventory to the InventoryNode object
- # FIXME: lazy-load the inventory from a InventoryNode object;
+ # 2) Map the inventory to the InventoryHost object
+ # FIXME: lazy-load the inventory from a InventoryHost object;
# this would save one call to the inventory(at least externally)
def _find_inv_for_host(hostname, inventory_list):
sd = orchestrator.DaemonDescription()
sd.daemon_type = daemon_type
sd.daemon_id = daemon_id
- sd.nodename = host
+ sd.hostname = host
sd.status = 1
sd.status_desc = 'starting'
self.cache.add_daemon(host, sd)
args = []
for d in daemons[0:to_remove]:
args.append(
- ('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename)
+ ('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname)
)
return self._remove_daemon(args)
elif len(daemons) < spec.count:
assert spec.count is not None
prefix = f'{daemon_type}.{spec.name}'
our_daemons = [d for d in daemons if d.name().startswith(prefix)]
- hosts_with_daemons = {d.nodename for d in daemons}
+ hosts_with_daemons = {d.hostname for d in daemons}
hosts_without_daemons = {p for p in spec.placement.hosts if p.hostname not in hosts_with_daemons}
for host, _, name in hosts_without_daemons:
args.append((daemon_id, host))
# add to daemon list so next name(s) will also be unique
sd = orchestrator.DaemonDescription(
- nodename=host,
+ hostname=host,
daemon_type=daemon_type,
daemon_id=daemon_id,
)
# Improve Error message. Point to parse_host_spec examples
raise orchestrator.OrchestratorValidationError("Mons need a host spec. (host, network, name(opt))")
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mon').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mon').load()
return self._update_mons(spec)
def _update_mons(self, spec):
def add_mgr(self, spec):
# type: (orchestrator.ServiceSpec) -> orchestrator.Completion
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
return self._add_new_daemon('mgr', spec, self._create_mgr)
def apply_mgr(self, spec):
"""
Adjust the number of cluster managers.
"""
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mgr').load()
daemons = self.cache.get_daemons_by_type('mgr')
num_mgrs = len(daemons)
for d in daemons:
if d.daemon_id not in connected:
to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id),
- d.nodename))
+ d.hostname))
num_to_remove -= 1
if num_to_remove == 0:
break
# otherwise, remove *any* mgr
if num_to_remove > 0:
for d in daemons:
- to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id), d.nodename))
+ to_remove_damons.append(('%s.%s' % (d.daemon_type, d.daemon_id), d.hostname))
num_to_remove -= 1
if num_to_remove == 0:
break
def apply_mds(self, spec):
# type: (orchestrator.ServiceSpec) -> AsyncCompletion
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mds').load()
+ spec =HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='mds').load()
return self._update_service('mds', self.add_mds, spec)
return self._create_daemon('rgw', rgw_id, host, keyring=keyring)
def apply_rgw(self, spec):
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rgw').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rgw').load()
return self._update_service('rgw', self.add_rgw, spec)
def add_rbd_mirror(self, spec):
if not spec.placement.hosts or len(spec.placement.hosts) < spec.count:
raise RuntimeError("must specify at least %d hosts" % spec.count)
- self.log.debug('nodes %s' % spec.placement.hosts)
+ self.log.debug('hosts %s' % spec.placement.hosts)
return self._add_new_daemon('rbd-mirror', spec, self._create_rbd_mirror)
keyring=keyring)
def apply_rbd_mirror(self, spec):
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rbd-mirror').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='rbd-mirror').load()
return self._update_service('rbd-mirror', self.add_rbd_mirror, spec)
def _generate_prometheus_config(self):
for dd in self.cache.get_daemons_by_type('mgr'):
if dd.daemon_id == self.get_mgr_id():
continue
- hi = self.inventory.get(dd.nodename, None)
+ hi = self.inventory.get(dd.hostname, None)
if hi:
- addr = hi.get('addr', dd.nodename)
+ addr = hi.get('addr', dd.hostname)
mgr_scrape_list.append(addr.split(':')[0] + ':' + port)
# scrape node exporters
node_configs = ''
for dd in self.cache.get_daemons_by_type('node-exporter'):
- hi = self.inventory.get(dd.nodename, None)
+ hi = self.inventory.get(dd.hostname, None)
if hi:
- addr = hi.get('addr', dd.nodename)
+ addr = hi.get('addr', dd.hostname)
if not node_configs:
node_configs = """
- job_name: 'node'
labels:
instance: '{}'
""".format([addr.split(':')[0] + ':9100'],
- dd.nodename)
+ dd.hostname)
j = json.dumps({
'files': {
'prometheus.yml': """# generated by cephadm
return j
def add_prometheus(self, spec):
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
return self._add_new_daemon('prometheus', spec, self._create_prometheus)
@async_map_completion
return self._create_daemon('prometheus', daemon_id, host)
def apply_prometheus(self, spec):
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='prometheus').load()
return self._update_service('prometheus', self.add_prometheus, spec)
def add_node_exporter(self, spec):
return self._add_new_daemon('node-exporter', spec, self._create_node_exporter)
def apply_node_exporter(self, spec):
- spec = NodeAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='node-exporter').load()
+ spec = HostAssignment(spec=spec, get_hosts_func=self._get_hosts, service_type='node-exporter').load()
return self._update_service('node-exporter', self.add_node_exporter, spec)
@async_map_completion
return host_pool[:count]
-class NodeAssignment(object):
+class HostAssignment(object):
"""
- A class to detect if nodes are being passed imperative or declarative
- If the spec is populated via the `nodes/hosts` field it will not load
- any nodes into the list.
+ A class to detect if hosts are being passed imperative or declarative
+ If the spec is populated via the `hosts/hosts` field it will not load
+ any hosts into the list.
If the spec isn't populated, i.e. when only num or label is present (declarative)
it will use the provided `get_host_func` to load it from the inventory.
def load(self):
# type: () -> orchestrator.ServiceSpec
"""
- Load nodes into the spec.placement.nodes container.
+ Load hosts into the spec.placement.hosts container.
"""
- self.load_labeled_nodes()
- self.assign_nodes()
+ self.load_labeled_hosts()
+ self.assign_hosts()
return self.spec
- def load_labeled_nodes(self):
+ def load_labeled_hosts(self):
# type: () -> None
"""
- Assign nodes based on their label
+ Assign hosts based on their label
"""
- # Querying for labeled nodes doesn't work currently.
+ # Querying for labeled hosts doesn't work currently.
# Leaving this open for the next iteration
# NOTE: This currently queries for all hosts without label restriction
if self.spec.placement.label:
- logger.info("Found labels. Assigning nodes that match the label")
+ logger.info("Found labels. Assigning hosts that match the label")
candidates = [HostPlacementSpec(x, '', '') for x in self.get_hosts_func()] # TODO: query for labels
- logger.info('Assigning nodes to spec: {}'.format(candidates))
+ logger.info('Assigning hosts to spec: {}'.format(candidates))
self.spec.placement.set_hosts(candidates)
- def assign_nodes(self):
+ def assign_hosts(self):
# type: () -> None
"""
- Use the assigned scheduler to load nodes into the spec.placement.nodes container
+ Use the assigned scheduler to load hosts into the spec.placement.hosts container
"""
# If no imperative or declarative host assignments, use the scheduler to pick from the
# host pool (assuming `count` is set)
if not self.spec.placement.label and not self.spec.placement.hosts and self.spec.placement.count:
- logger.info("Found num spec. Looking for labeled nodes.")
+ logger.info("Found num spec. Looking for labeled hosts.")
# TODO: actually query for labels (self.daemon_type)
candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()],
count=self.spec.placement.count)
- # Not enough nodes to deploy on
+ # Not enough hosts to deploy on
if len(candidates) != self.spec.placement.count:
- logger.warning("Did not find enough labeled nodes to \
- scale to <{}> services. Falling back to unlabeled nodes.".
+ logger.warning("Did not find enough labeled hosts to \
+ scale to <{}> services. Falling back to unlabeled hosts.".
format(self.spec.placement.count))
else:
- logger.info('Assigning nodes to spec: {}'.format(candidates))
+ logger.info('Assigning hosts to spec: {}'.format(candidates))
self.spec.placement.set_hosts(candidates)
return None
candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()], count=self.spec.placement.count)
- # Not enough nodes to deploy on
+ # Not enough hosts to deploy on
if len(candidates) != self.spec.placement.count:
raise OrchestratorValidationError("Cannot place {} daemons on {} hosts.".
format(self.spec.placement.count, len(candidates)))
- logger.info('Assigning nodes to spec: {}'.format(candidates))
+ logger.info('Assigning hosts to spec: {}'.format(candidates))
self.spec.placement.set_hosts(candidates)
return None
except ImportError:
pass
-from orchestrator import ServiceDescription, DaemonDescription, InventoryNode, \
+from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
ServiceSpec, PlacementSpec, RGWSpec, HostSpec, OrchestratorError
from tests import mock
from .fixtures import cephadm_module, wait
def test_device_ls(self, _save_host, _rm_host, cephadm_module):
with self._with_host(cephadm_module, 'test'):
c = cephadm_module.get_inventory()
- assert wait(cephadm_module, c) == [InventoryNode('test')]
+ assert wait(cephadm_module, c) == [InventoryHost('test')]
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
json.dumps([
def list(self, hostname=None):
orch = OrchClient.instance()
hosts = [hostname] if hostname else None
- inventory_nodes = [node.to_json() for node in orch.inventory.list(hosts)]
+ inventory_hosts = [host.to_json() for host in orch.inventory.list(hosts)]
device_osd_map = get_device_osd_map()
- for inventory_node in inventory_nodes:
- node_osds = device_osd_map.get(inventory_node['name'])
- for device in inventory_node['devices']:
- if node_osds:
+ for inventory_host in inventory_hosts:
+ host_osds = device_osd_map.get(inventory_host['name'])
+ for device in inventory_host['devices']:
+ if host_osds:
dev_name = os.path.basename(device['path'])
- device['osd_ids'] = sorted(node_osds.get(dev_name, []))
+ device['osd_ids'] = sorted(host_osds.get(dev_name, []))
else:
device['osd_ids'] = []
- return inventory_nodes
+ return inventory_hosts
@ApiController('/orchestrator/service', Scope.HOSTS)
--- /dev/null
+import { InventoryDevice } from './inventory-devices/inventory-device.model';
+
+export class InventoryHost {
+ name: string;
+ devices: InventoryDevice[];
+}
+++ /dev/null
-import { InventoryDevice } from './inventory-devices/inventory-device.model';
-
-export class InventoryNode {
- name: string;
- devices: InventoryDevice[];
-}
const services = [
{
- nodename: 'host0',
+ hostname: 'host0',
service: '',
service_instance: 'x',
service_type: 'mon'
},
{
- nodename: 'host0',
+ hostname: 'host0',
service: '',
service_instance: '0',
service_type: 'osd'
},
{
- nodename: 'host1',
+ hostname: 'host1',
service: '',
service_instance: 'y',
service_type: 'mon'
},
{
- nodename: 'host1',
+ hostname: 'host1',
service: '',
service_instance: '1',
service_type: 'osd'
];
const getServiceList = (hostname: String) => {
- return hostname ? services.filter((service) => service.nodename === hostname) : services;
+ return hostname ? services.filter((service) => service.hostname === hostname) : services;
};
configureTestBed({
reqHostname = 'host0';
component.getServices(new CdTableFetchDataContext(() => {}));
expect(component.services.length).toBe(2);
- expect(component.services[0].nodename).toBe(reqHostname);
- expect(component.services[1].nodename).toBe(reqHostname);
+ expect(component.services[0].hostname).toBe(reqHostname);
+ expect(component.services[1].hostname).toBe(reqHostname);
});
});
const columns = [
{
name: this.i18n('Hostname'),
- prop: 'nodename',
+ prop: 'hostname',
flexGrow: 2
},
{
(data: Service[]) => {
const services: Service[] = [];
data.forEach((service: Service) => {
- service.uid = `${service.nodename}-${service.service_type}-${service.service}-${service.service_instance}`;
+ service.uid = `${service.hostname}-${service.service_type}-${service.service}-${service.service_instance}`;
services.push(service);
});
this.services = services;
export class Service {
uid: string;
- nodename: string;
+ hostname: string;
container_id: string;
service: string;
service_instance: string;
import { mergeMap } from 'rxjs/operators';
import { InventoryDevice } from '../../ceph/cluster/inventory/inventory-devices/inventory-device.model';
-import { InventoryNode } from '../../ceph/cluster/inventory/inventory-node.model';
+import { InventoryHost } from '../../ceph/cluster/inventory/inventory-host.model';
import { ApiModule } from './api.module';
@Injectable({
});
}
- inventoryList(hostname?: string): Observable<InventoryNode[]> {
+ inventoryList(hostname?: string): Observable<InventoryHost[]> {
const options = hostname ? { params: new HttpParams().set('hostname', hostname) } : {};
- return this.http.get<InventoryNode[]>(`${this.url}/inventory`, options);
+ return this.http.get<InventoryHost[]>(`${this.url}/inventory`, options);
}
inventoryDeviceList(hostname?: string): Observable<InventoryDevice[]> {
return this.inventoryList(hostname).pipe(
- mergeMap((nodes: InventoryNode[]) => {
- const devices = _.flatMap(nodes, (node) => {
- return node.devices.map((device) => {
- device.hostname = node.name;
+ mergeMap((hosts: InventoryHost[]) => {
+ const devices = _.flatMap(hosts, (host) => {
+ return host.devices.map((device) => {
+ device.hostname = host.name;
device.uid = device.device_id ? device.device_id : `${device.hostname}-${device.path}`;
return device;
});
instance.service = "_default_"
if instance.service not in result:
result[instance.service] = {}
- result[instance.service][instance.nodename] = {
+ result[instance.service][instance.hostname] = {
'status': instance.status,
'desc': instance.status_desc,
}
try:
instances = OrchClient.instance().services.list("iscsi")
for instance in instances:
- config['gateways'][instance.nodename] = {
+ config['gateways'][instance.hostname] = {
'service_url': instance.service_url
}
except (RuntimeError, OrchestratorError, ImportError):
@wait_api_result
def list(self, hosts=None, refresh=False):
- node_filter = InventoryFilter(nodes=hosts) if hosts else None
- return self.api.get_inventory(node_filter=node_filter, refresh=refresh)
+ host_filter = InventoryFilter(hosts=hosts) if hosts else None
+ return self.api.get_inventory(host_filter=host_filter, refresh=refresh)
class ServiceManager(ResourceManager):
@wait_api_result
- def list(self, service_type=None, service_id=None, node_name=None):
- return self.api.list_daemons(service_type, service_id, node_name)
+ def list(self, service_type=None, service_id=None, host_name=None):
+ return self.api.list_daemons(service_type, service_id, host_name)
def reload(self, service_type, service_ids):
if not isinstance(service_ids, list):
except ImportError:
from unittest import mock
-from orchestrator import InventoryNode
+from orchestrator import InventoryHost
from . import ControllerTestCase
from .. import mgr
def _set_inventory(self, mock_instance, inventory):
# pylint: disable=unused-argument
def _list_inventory(hosts=None, refresh=False):
- nodes = []
- for node in inventory:
- if hosts is None or node['name'] in hosts:
- nodes.append(InventoryNode.from_json(node))
- return nodes
+ inv_hosts = []
+ for inv_host in inventory:
+ if hosts is None or inv_host['name'] in hosts:
+ inv_hosts.append(InventoryHost.from_json(inv_host))
+ return inv_hosts
mock_instance.inventory.list.side_effect = _list_inventory
@mock.patch('dashboard.controllers.orchestrator.get_device_osd_map')
mgr.get.assert_called_with('osd_metadata')
# sort OSD IDs to make assertDictEqual work
for devices in device_osd_map.values():
- for node in devices.keys():
- devices[node] = sorted(devices[node])
+ for host in devices.keys():
+ devices[host] = sorted(devices[host])
self.assertDictEqual(device_osd_map, {
'node0': {
'nvme0n1': [0, 1],
servicespec_validate_add, servicespec_validate_hosts_have_network_spec, \
ServiceDescription, InventoryFilter, PlacementSpec, HostSpec, \
DaemonDescription, \
- InventoryNode, DeviceLightLoc, \
+ InventoryHost, DeviceLightLoc, \
OutdatableData, OutdatablePersistentDict, \
UpgradeStatusSpec
"""
raise NotImplementedError()
- def get_inventory(self, node_filter=None, refresh=False):
+ def get_inventory(self, host_filter=None, refresh=False):
# type: (Optional[InventoryFilter], bool) -> Completion
"""
Returns something that was created by `ceph-volume inventory`.
- :return: list of InventoryNode
+ :return: list of InventoryHost
"""
raise NotImplementedError()
class PlacementSpec(object):
"""
- For APIs that need to specify a node subset
+ For APIs that need to specify a host subset
"""
def __init__(self, label=None, hosts=None, count=None):
# type: (Optional[str], Optional[List], Optional[int]) -> None
def validate(self):
if self.hosts and self.label:
# TODO: a less generic Exception
- raise Exception('Node and label are mutually exclusive')
+ raise Exception('Host and label are mutually exclusive')
if self.count is not None and self.count <= 0:
raise Exception("num/count must be > 1")
This is not about health or performance monitoring of daemons: it's
about letting the orchestrator tell Ceph whether and where a
daemon is scheduled in the cluster. When an orchestrator tells
- Ceph "it's running on node123", that's not a promise that the process
+ Ceph "it's running on host123", that's not a promise that the process
is literally up this second, it's a description of where the orchestrator
has decided the daemon should run.
"""
def __init__(self,
daemon_type=None,
daemon_id=None,
- nodename=None,
+ hostname=None,
container_id=None,
container_image_id=None,
container_image_name=None,
status=None,
status_desc=None,
last_refresh=None):
- # Node is at the same granularity as InventoryNode
- self.nodename = nodename
+ # Host is at the same granularity as InventoryHost
+ self.hostname = hostname
# Not everyone runs in containers, but enough people do to
# justify having the container_id (runtime id) and container_image
def to_json(self):
out = {
- 'nodename': self.nodename,
+ 'hostname': self.hostname,
'container_id': self.container_id,
'container_image_id': self.container_image_id,
'container_image_name': self.container_image_name,
This is not about health or performance monitoring of services: it's
about letting the orchestrator tell Ceph whether and where a
service is scheduled in the cluster. When an orchestrator tells
- Ceph "it's running on node123", that's not a promise that the process
+ Ceph "it's running on host123", that's not a promise that the process
is literally up this second, it's a description of where the orchestrator
has decided the service should run.
"""
When fetching inventory, use this filter to avoid unnecessarily
scanning the whole estate.
- Typical use: filter by node when presenting UI workflow for configuring
+ Typical use: filter by host when presenting UI workflow for configuring
a particular server.
filter by label when not all of estate is Ceph servers,
and we want to only learn about the Ceph servers.
in e.g. OSD servers.
"""
- def __init__(self, labels=None, nodes=None):
+ def __init__(self, labels=None, hosts=None):
# type: (Optional[List[str]], Optional[List[str]]) -> None
- #: Optional: get info about nodes matching labels
+ #: Optional: get info about hosts matching labels
self.labels = labels
- #: Optional: get info about certain named nodes only
- self.nodes = nodes
+ #: Optional: get info about certain named hosts only
+ self.hosts = hosts
-class InventoryNode(object):
+class InventoryHost(object):
"""
When fetching inventory, all Devices are groups inside of an
- InventoryNode.
+ InventoryHost.
"""
def __init__(self, name, devices=None, labels=None, addr=None):
# type: (str, Optional[inventory.Devices], Optional[List[str]], Optional[str]) -> None
return [cls(item[0], devs(item[1].data)) for item in hosts]
def __repr__(self):
- return "<InventoryNode>({name})".format(name=self.name)
+ return "<InventoryHost>({name})".format(name=self.name)
@staticmethod
- def get_host_names(nodes):
- # type: (List[InventoryNode]) -> List[str]
- return [node.name for node in nodes]
+ def get_host_names(hosts):
+ # type: (List[InventoryHost]) -> List[str]
+ return [host.name for host in hosts]
def __eq__(self, other):
return self.name == other.name and self.devices == other.devices
from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_command, \
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
NoOrchestrator, ServiceSpec, PlacementSpec, OrchestratorValidationError, NFSServiceSpec, \
- RGWSpec, InventoryFilter, InventoryNode, HostPlacementSpec, HostSpec, CLICommandMeta
+ RGWSpec, InventoryFilter, InventoryHost, HostPlacementSpec, HostSpec, CLICommandMeta
@six.add_metaclass(CLICommandMeta)
self._orchestrator_wait([completion])
raise_if_exception(completion)
if format == 'json':
- hosts = [node.to_json()
- for node in completion.result]
+ hosts = [host.to_json()
+ for host in completion.result]
output = json.dumps(hosts, sort_keys=True)
else:
table = PrettyTable(
table.align = 'l'
table.left_padding_width = 0
table.right_padding_width = 1
- for node in sorted(completion.result, key=lambda h: h.hostname):
- table.add_row((node.hostname, node.addr, ' '.join(node.labels), node.status))
+ for host in sorted(completion.result, key=lambda h: h.hostname):
+ table.add_row((host.hostname, host.addr, ' '.join(host.labels), host.status))
output = table.get_string()
return HandleCommandResult(stdout=output)
"name=host,type=CephString,n=N,req=false "
"name=format,type=CephChoices,strings=json|plain,req=false "
"name=refresh,type=CephBool,req=false",
- 'List devices on a node')
+ 'List devices on a host')
def _list_devices(self, host=None, format='plain', refresh=False):
# type: (Optional[List[str]], str, bool) -> HandleCommandResult
"""
date hardware inventory is fine as long as hardware ultimately appears
in the output of this command.
"""
- nf = InventoryFilter(nodes=host) if host else None
+ nf = InventoryFilter(hosts=host) if host else None
- completion = self.get_inventory(node_filter=nf, refresh=refresh)
+ completion = self.get_inventory(host_filter=nf, refresh=refresh)
self._orchestrator_wait([completion])
raise_if_exception(completion)
table._align['SIZE'] = 'r'
table.left_padding_width = 0
table.right_padding_width = 1
- for host_ in completion.result: # type: InventoryNode
+ for host_ in completion.result: # type: InventoryHost
for d in host_.devices.devices: # type: Device
table.add_row(
(
def ukn(s):
return '<unknown>' if s is None else s
# Sort the list for display
- daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.nodename), ukn(s.daemon_id)))
+ daemons.sort(key=lambda s: (ukn(s.daemon_type), ukn(s.hostname), ukn(s.daemon_id)))
if len(daemons) == 0:
return HandleCommandResult(stdout="No daemons reported")
age = '-'
table.add_row((
s.name(),
- ukn(s.nodename),
+ ukn(s.hostname),
status,
age,
ukn(s.version),
elif svc_arg:
try:
- node_name, block_device = svc_arg.split(":")
+ host_name, block_device = svc_arg.split(":")
block_devices = block_device.split(',')
except (TypeError, KeyError, ValueError):
msg = "Invalid host:device spec: '{}'".format(svc_arg) + usage
return HandleCommandResult(-errno.EINVAL, stderr=msg)
devs = DeviceSelection(paths=block_devices)
- drive_groups = [DriveGroupSpec(node_name, data_devices=devs)]
+ drive_groups = [DriveGroupSpec(host_name, data_devices=devs)]
else:
return HandleCommandResult(-errno.EINVAL, stderr=usage)
self.all_progress_references.clear()
@deferred_read
- def get_inventory(self, node_filter=None, refresh=False):
- node_list = None
- if node_filter and node_filter.nodes:
- # Explicit node list
- node_list = node_filter.nodes
- elif node_filter and node_filter.labels:
- # TODO: query k8s API to resolve to node list, and pass
+ def get_inventory(self, host_filter=None, refresh=False):
+ host_list = None
+ if host_filter and host_filter.hosts:
+ # Explicit host list
+ host_list = host_filter.hosts
+ elif host_filter and host_filter.labels:
+ # TODO: query k8s API to resolve to host list, and pass
# it into RookCluster.get_discovered_devices
raise NotImplementedError()
- devs = self.rook_cluster.get_discovered_devices(node_list)
+ devs = self.rook_cluster.get_discovered_devices(host_list)
result = []
- for node_name, node_devs in devs.items():
+ for host_name, host_devs in devs.items():
devs = []
- for d in node_devs:
+ for d in host_devs:
dev = inventory.Device(
path='/dev/' + d['name'],
sys_api=dict(
)
devs.append(dev)
- result.append(orchestrator.InventoryNode(node_name, inventory.Devices(devs)))
+ result.append(orchestrator.InventoryHost(host_name, inventory.Devices(devs)))
return result
return [orchestrator.HostSpec(n) for n in self.rook_cluster.get_node_names()]
@deferred_read
- def list_daemons(self, daemon_type=None, daemon_id=None, node_name=None, refresh=False):
+ def list_daemons(self, daemon_type=None, daemon_id=None, host_name=None, refresh=False):
- pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, node_name)
+ pods = self.rook_cluster.describe_pods(daemon_type, daemon_id, host_name)
result = []
for p in pods:
sd = orchestrator.DaemonDescription()
- sd.nodename = p['nodename']
+ sd.hostname = p['hostname']
sd.container_id = p['name']
sd.daemon_type = p['labels']['app'].replace('rook-ceph-', '')
status = {
# p['metadata']['creationTimestamp']
pods_summary.append({
"name": d['metadata']['name'],
- "nodename": d['spec']['node_name'],
+ "hostname": d['spec']['node_name'],
"labels": d['metadata']['labels'],
'phase': d['status']['phase']
})
],
"daemons": [
{
- "nodename": "host0",
+ "hostname": "host0",
"daemon_type": "osd",
"daemon_id": "1"
}
self._shutdown.wait(5)
def _init_data(self, data=None):
- self._inventory = [orchestrator.InventoryNode.from_json(inventory_node)
- for inventory_node in data.get('inventory', [])]
+ self._inventory = [orchestrator.InventoryHost.from_json(inventory_host)
+ for inventory_host in data.get('inventory', [])]
self._daemons = [orchestrator.DaemonDescription.from_json(service)
for service in data.get('daemons', [])]
@deferred_read
- def get_inventory(self, node_filter=None, refresh=False):
+ def get_inventory(self, host_filter=None, refresh=False):
"""
There is no guarantee which devices are returned by get_inventory.
"""
- if node_filter and node_filter.nodes is not None:
- assert isinstance(node_filter.nodes, list)
+ if host_filter and host_filter.hosts is not None:
+ assert isinstance(host_filter.hosts, list)
if self._inventory:
- if node_filter:
- return list(filter(lambda node: node.name in node_filter.nodes,
+ if host_filter:
+ return list(filter(lambda host: host.name in host_filter.hosts,
self._inventory))
return self._inventory
for out in c_v_out.splitlines():
self.log.error(out)
devs = inventory.Devices.from_json(json.loads(out))
- return [orchestrator.InventoryNode('localhost', devs)]
+ return [orchestrator.InventoryHost('localhost', devs)]
self.log.error('c-v failed: ' + str(c_v_out))
raise Exception('c-v failed')
@deferred_read
- def list_daemons(self, daemon_type=None, daemon_id=None, node_name=None, refresh=False):
+ def list_daemons(self, daemon_type=None, daemon_id=None, host_name=None, refresh=False):
"""
There is no guarantee which daemons are returned by describe_service, except that
it returns the mgr we're running in.
assert daemon_type in daemon_types, daemon_type + " unsupported"
if self._daemons:
- if node_name:
- return list(filter(lambda svc: svc.nodename == node_name, self._daemons))
+ if host_name:
+ return list(filter(lambda svc: svc.hostname == host_name, self._daemons))
return self._daemons
out = map(str, check_output(['ps', 'aux']).splitlines())
result = []
for p in processes:
sd = orchestrator.DaemonDescription()
- sd.nodename = 'localhost'
+ sd.hostname = 'localhost'
res = re.search('ceph-[^ ]+', p)
assert res
sd.daemon_id = res.group()
from ceph.deployment import inventory
from orchestrator import raise_if_exception, RGWSpec, Completion, ProgressReference, \
servicespec_validate_add
-from orchestrator import InventoryNode, ServiceDescription, DaemonDescription
+from orchestrator import InventoryHost, ServiceDescription, DaemonDescription
from orchestrator import OrchestratorValidationError
from orchestrator import HostPlacementSpec
}
]
}
- _test_resource(json_data, InventoryNode, {'abc': False})
+ _test_resource(json_data, InventoryHost, {'abc': False})
for devices in json_data['devices']:
_test_resource(devices, inventory.Device)
json_data = [{}, {'name': 'host0', 'addr': '1.2.3.4'}, {'devices': []}]
for data in json_data:
with pytest.raises(OrchestratorValidationError):
- InventoryNode.from_json(data)
+ InventoryHost.from_json(data)
def test_daemon_description():
json_data = {
- 'nodename': 'test',
+ 'hostname': 'test',
'daemon_type': 'mon',
'daemon_id': 'a'
}