.. automethod:: Orchestrator.get_inventory
.. autoclass:: InventoryFilter
-.. autoclass:: InventoryNode
-.. autoclass:: InventoryDevice
+.. py:currentmodule:: ceph.deployment.inventory
+
+.. autoclass:: Devices
+ :members:
+
+.. autoclass:: Device
:members:
+.. py:currentmodule:: orchestrator
+
+
+
.. automethod:: Orchestrator.describe_service
.. autoclass:: ServiceDescription
'name': 'test-host0',
'devices': [
{
- 'type': 'hdd',
- 'id': '/dev/sda',
- 'size': 1024**4 * 4,
- 'rotates': True
+ 'path': '/dev/sda',
}
]
},
'name': 'test-host1',
'devices': [
{
- 'type': 'hdd',
- 'id': '/dev/sda',
- 'size': 1024**4 * 4,
- 'rotates': True
+ 'path': '/dev/sdb',
}
]
}
if not data['devices']:
return
- test_devices = sorted(data['devices'], key=lambda d: d['id'])
- resp_devices = sorted(resp_data['devices'], key=lambda d: d['id'])
+ test_devices = sorted(data['devices'], key=lambda d: d['path'])
+ resp_devices = sorted(resp_data['devices'], key=lambda d: d['path'])
for test, resp in zip(test_devices, resp_devices):
self._validate_device(test, resp)
import json
-
-from orchestrator import InventoryDevice, InventoryNode
+from ceph.deployment import inventory
+from orchestrator import InventoryNode
from .ansible_runner_svc import EVENT_DATA_URL
host = event_data["host"]
devices = json.loads(event_data["res"]["stdout"])
- devs = []
- for storage_device in devices:
- dev = InventoryDevice.from_ceph_volume_inventory(storage_device)
- devs.append(dev)
-
+ devs = inventory.Devices.from_json(devices)
inventory_nodes.append(InventoryNode(host, devs))
json_resp = json.loads(host_ls_json)
for host in json_resp["data"]["hosts"]:
- inventory_nodes.append(InventoryNode(host, []))
+ inventory_nodes.append(InventoryNode(host, inventory.Devices([])))
except ValueError:
self.log.exception("Malformed json response")
pytest
mock
requests-mock
+-e ../../../python-common
self.assertEqual(nodes_list[0].name, "192.168.121.144")
# Devices
- self.assertTrue(len(nodes_list[0].devices), 4)
+ self.assertTrue(len(nodes_list[0].devices.devices), 4)
expected_device_ids = ["/dev/sdc", "/dev/sda", "/dev/sdb", "/dev/vda"]
- device_ids = [dev.id for dev in nodes_list[0].devices]
+ device_ids = [dev.path for dev in nodes_list[0].devices.devices]
self.assertEqual(expected_device_ids, device_ids)
this.columns = [
{
name: this.i18n('Device path'),
- prop: 'id',
+ prop: 'path',
flexGrow: 1
},
{
name: this.i18n('Type'),
- prop: 'type',
+ prop: 'human_readable_type',
flexGrow: 1
},
{
name: this.i18n('Size'),
- prop: 'size',
+ prop: 'sys_api.size',
flexGrow: 1,
pipe: this.dimlessBinary
},
{
name: this.i18n('Rotates'),
- prop: 'rotates',
+ prop: 'sys_api.rotational',
flexGrow: 1
},
{
},
{
name: this.i18n('Model'),
- prop: 'model',
+ prop: 'sys_api.model',
flexGrow: 1
},
{
data.forEach((node: InventoryNode) => {
node.devices.forEach((device: Device) => {
device.hostname = node.name;
- device.uid = `${node.name}-${device.id}`;
+ device.uid = `${node.name}-${device.device_id}`;
devices.push(device);
});
});
+export class SysAPI {
+ vendor: string;
+ model: string;
+ size: number;
+ rotational: string;
+ human_readable_size: string;
+}
+
export class Device {
hostname: string;
uid: string;
osd_ids: number[];
- blank: boolean;
- type: string;
- id: string;
- size: number;
- rotates: boolean;
+ path: string;
+ sys_api: SysAPI;
available: boolean;
- dev_id: string;
- extended: any;
+ rejected_reasons: string[];
+ device_id: string;
+ human_readable_type: string;
}
export class InventoryNode {
requests
Routes
six
+../../../python-common
fake_client = mock.Mock()
fake_client.available.return_value = True
fake_client.hosts.list.return_value = [
- InventoryNode('node1', []), InventoryNode('node2', [])]
+ InventoryNode('node1'), InventoryNode('node2')]
instance.return_value = fake_client
hosts = get_hosts()
except ImportError:
from unittest import mock
+from ceph.deployment.inventory import Devices
+
from orchestrator import InventoryNode, ServiceDescription
from . import ControllerTestCase
from threading import Event, Thread, Lock
+from ceph.deployment import inventory
from mgr_module import MgrModule
import orchestrator
# nodes, the cache will never be populated, and you'll always have
# the full round trip to DeepSea.
self.inventory_cache[node_name] = orchestrator.OutdatableData(node_devs)
- devs = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(node_devs)
+ devs = inventory.Devices.from_json(node_devs)
result.append(orchestrator.InventoryNode(node_name, devs))
else:
self.log.error(event_data['return'])
import datetime
import copy
+from ceph.deployment import inventory
+
from mgr_module import MgrModule, PersistentStoreDict
from mgr_util import format_bytes
self.nodes = nodes # Optional: get info about certain named nodes only
-class InventoryDevice(object):
- """
- When fetching inventory, block devices are reported in this format.
-
- Note on device identifiers: the format of this is up to the orchestrator,
- but the same identifier must also work when passed into StatefulServiceSpec.
- The identifier should be something meaningful like a device WWID or
- stable device node path -- not something made up by the orchestrator.
-
- "Extended" is for reporting any special configuration that may have
- already been done out of band on the block device. For example, if
- the device has already been configured for encryption, report that
- here so that it can be indicated to the user. The set of
- extended properties may differ between orchestrators. An orchestrator
- is permitted to support no extended properties (only normal block
- devices)
- """
- def __init__(self, blank=False, type=None, id=None, size=None,
- rotates=False, available=False, dev_id=None, extended=None,
- metadata_space_free=None):
- # type: (bool, str, str, int, bool, bool, str, dict, bool) -> None
-
- self.blank = blank
-
- #: 'ssd', 'hdd', 'nvme'
- self.type = type
-
- #: unique within a node (or globally if you like).
- self.id = id
-
- #: byte integer.
- self.size = size
-
- #: indicates if it is a spinning disk
- self.rotates = rotates
-
- #: can be used to create a new OSD?
- self.available = available
-
- #: vendor/model
- self.dev_id = dev_id
-
- #: arbitrary JSON-serializable object
- self.extended = extended if extended is not None else extended
-
- # If this drive is not empty, but is suitable for appending
- # additional journals, wals, or bluestore dbs, then report
- # how much space is available.
- self.metadata_space_free = metadata_space_free
-
- def to_json(self):
- return dict(type=self.type, blank=self.blank, id=self.id,
- size=self.size, rotates=self.rotates,
- available=self.available, dev_id=self.dev_id,
- extended=self.extended)
-
- @classmethod
- @handle_type_error
- def from_json(cls, data):
- return cls(**data)
-
- @classmethod
- def from_ceph_volume_inventory(cls, data):
- # TODO: change InventoryDevice itself to mirror c-v inventory closely!
-
- dev = InventoryDevice()
- dev.id = data["path"]
- dev.type = 'hdd' if data["sys_api"]["rotational"] == "1" else 'ssd/nvme'
- dev.size = data["sys_api"]["size"]
- dev.rotates = data["sys_api"]["rotational"] == "1"
- dev.available = data["available"]
- dev.dev_id = "%s/%s" % (data["sys_api"]["vendor"],
- data["sys_api"]["model"])
- dev.extended = data
- return dev
-
- @classmethod
- def from_ceph_volume_inventory_list(cls, datas):
- return [cls.from_ceph_volume_inventory(d) for d in datas]
-
- def pretty_print(self, only_header=False):
- """Print a human friendly line with the information of the device
-
- :param only_header: Print only the name of the device attributes
-
- Ex::
-
- Device Path Type Size Rotates Available Model
- /dev/sdc hdd 50.00 GB True True ATA/QEMU
-
- """
- row_format = " {0:<15} {1:>10} {2:>10} {3:>10} {4:>10} {5:<15}\n"
- if only_header:
- return row_format.format("Device Path", "Type", "Size", "Rotates",
- "Available", "Model")
- else:
- return row_format.format(str(self.id), self.type if self.type is not None else "",
- format_bytes(self.size if self.size is not None else 0, 5,
- colored=False),
- str(self.rotates), str(self.available),
- self.dev_id if self.dev_id is not None else "")
class InventoryNode(object):
When fetching inventory, all Devices are groups inside of an
InventoryNode.
"""
- def __init__(self, name, devices):
- # type: (str, List[InventoryDevice]) -> None
- assert isinstance(devices, list)
+ def __init__(self, name, devices=None):
+ # type: (str, inventory.Devices) -> None
+ if devices is None:
+ devices = inventory.Devices([])
+ assert isinstance(devices, inventory.Devices)
+
self.name = name # unique within cluster. For example a hostname.
self.devices = devices
def to_json(self):
- return {'name': self.name, 'devices': [d.to_json() for d in self.devices]}
+ return {'name': self.name, 'devices': self.devices.to_json()}
@classmethod
def from_json(cls, data):
try:
_data = copy.deepcopy(data)
name = _data.pop('name')
- devices = [InventoryDevice.from_json(device)
- for device in _data.pop('devices')]
+ devices = inventory.Devices.from_json(_data.pop('devices'))
if _data:
error_msg = 'Unknown key(s) in Inventory: {}'.format(','.join(_data.keys()))
raise OrchestratorValidationError(error_msg)
except KeyError as e:
error_msg = '{} is required for {}'.format(e, cls.__name__)
raise OrchestratorValidationError(error_msg)
+ except TypeError as e:
+ raise OrchestratorValidationError('Failed to read inventory: {}'.format(e))
+
@classmethod
def from_nested_items(cls, hosts):
- devs = InventoryDevice.from_ceph_volume_inventory_list
+ devs = inventory.Devices.from_json
return [cls(item[0], devs(item[1].data)) for item in hosts]
import json
from functools import wraps
+from ceph.deployment.inventory import Device
from prettytable import PrettyTable
+from mgr_util import format_bytes
+
try:
from typing import List, Set, Optional
except ImportError:
data = [n.to_json() for n in completion.result]
return HandleCommandResult(stdout=json.dumps(data))
else:
- # Return a human readable version
- result = ""
-
- for inventory_node in completion.result:
- result += "Host {0}:\n".format(inventory_node.name)
-
- if inventory_node.devices:
- result += inventory_node.devices[0].pretty_print(only_header=True)
- else:
- result += "No storage devices found"
-
- for d in inventory_node.devices:
- result += d.pretty_print()
- result += "\n"
-
- return HandleCommandResult(stdout=result)
+ out = []
+
+ for host in completion.result: # type: orchestrator.InventoryNode
+ out.append('Host {}:'.format(host.name))
+ table = PrettyTable(
+ ['Path', 'Type', 'Size', 'Available', 'Ceph Device ID', 'Reject Reasons'],
+ border=False)
+ table._align['Path'] = 'l'
+ for d in host.devices.devices: # type: Device
+ table.add_row(
+ (
+ d.path,
+ d.human_readable_type,
+ format_bytes(d.sys_api.get('size', 0), 5, colored=False),
+ d.available,
+ d.device_id,
+ ', '.join(d.rejected_reasons)
+ )
+ )
+ out.append(table.get_string())
+ return HandleCommandResult(stdout='\n'.join(out))
@_read_cli('orchestrator service ls',
"name=host,type=CephString,req=false "
tox==2.9.1
--e ../../../python-common
\ No newline at end of file
+../../../python-common
+pytest
+mock
+requests-mock
import pytest
+from ceph.deployment import inventory
from orchestrator import ReadCompletion, raise_if_exception, RGWSpec
-from orchestrator import InventoryNode, InventoryDevice, ServiceDescription
+from orchestrator import InventoryNode, ServiceDescription
from orchestrator import OrchestratorValidationError
-def _test_resource(data, resource_class, extra):
- # create the instance with normal way
- rsc = resource_class(**data)
- if hasattr(rsc, 'pretty_print'):
- assert rsc.pretty_print()
-
+def _test_resource(data, resource_class, extra=None):
# ensure we can deserialize and serialize
rsc = resource_class.from_json(data)
rsc.to_json()
- # if there is an unexpected data provided
- data.update(extra)
- with pytest.raises(OrchestratorValidationError):
- resource_class.from_json(data)
+ if extra:
+ # if there is an unexpected data provided
+ data.update(extra)
+ with pytest.raises(OrchestratorValidationError):
+ resource_class.from_json(data)
def test_inventory():
'name': 'host0',
'devices': [
{
- 'type': 'hdd',
- 'id': '/dev/sda',
- 'size': 1024,
- 'rotates': True
+ 'sys_api': {
+ 'rotational': '1',
+ 'size': 1024,
+ },
+ 'path': '/dev/sda',
+ 'available': False,
+ 'rejected_reasons': [],
+ 'lvs': []
}
]
}
_test_resource(json_data, InventoryNode, {'abc': False})
for devices in json_data['devices']:
- _test_resource(devices, InventoryDevice, {'abc': False})
+ _test_resource(devices, inventory.Device)
json_data = [{}, {'name': 'host0'}, {'devices': []}]
for data in json_data:
minversion = 2.5
[testenv]
-deps =
- pytest
- mock
- requests-mock
+deps = -rrequirements.txt
setenv=
UNITTEST = true
py27: PYTHONPATH = {toxinidir}/../../../../build/lib/cython_modules/lib.2
import os
import uuid
+from ceph.deployment import inventory
+
try:
from typing import List, Dict
from ceph.deployment.drive_group import DriveGroupSpec
for node_name, node_devs in devs.items():
devs = []
for d in node_devs:
- dev = orchestrator.InventoryDevice()
-
- # XXX CAUTION! https://github.com/rook/rook/issues/1716
- # Passing this through for the sake of completeness but it
- # is not trustworthy!
- dev.blank = d['empty']
- dev.type = 'hdd' if d['rotational'] else 'ssd'
- dev.id = d['name']
- dev.size = d['size']
-
- if d['filesystem'] == "" and not d['rotational']:
- # Empty or partitioned SSD
- partitioned_space = sum(
- [p['size'] for p in d['Partitions']])
- dev.metadata_space_free = max(0, d[
- 'size'] - partitioned_space)
-
+ dev = inventory.Device(
+ path=d['name'],
+ sys_api=dict(
+ rotational='1' if d['rotational'] else '0',
+ size=d['size']
+ ),
+ available=d['empty'],
+ rejected_reasons=[] if d['empty'] else ['not empty'],
+ )
devs.append(dev)
- result.append(orchestrator.InventoryNode(node_name, devs))
+ result.append(orchestrator.InventoryNode(node_name, inventory.Devices(devs)))
return result
@deferred_read
def get_hosts(self):
- return [orchestrator.InventoryNode(n, []) for n in self.rook_cluster.get_node_names()]
+ return [orchestrator.InventoryNode(n, inventory.Devices([])) for n in self.rook_cluster.get_node_names()]
@deferred_read
def describe_service(self, service_type=None, service_id=None, node_name=None, refresh=False):
import tempfile
import multiprocessing.pool
+from ceph.deployment import inventory
from mgr_module import MgrModule
import orchestrator
TODO:
- InventoryNode probably needs to be able to report labels
"""
- nodes = [orchestrator.InventoryNode(host_name, []) for host_name in self.inventory_cache]
+ nodes = [orchestrator.InventoryNode(host_name, inventory.Devices([])) for host_name in self.inventory_cache]
return orchestrator.TrivialReadCompletion(nodes)
def _refresh_host_services(self, host):
else:
self.log.debug("reading cached inventory for '{}'".format(host))
- devices = orchestrator.InventoryDevice.from_ceph_volume_inventory_list(host_info.data)
+ devices = inventory.Devices.from_json(host_info.data)
return orchestrator.InventoryNode(host, devices)
results = []
import six
+from ceph.deployment import inventory
from mgr_module import CLICommand, HandleCommandResult
from mgr_module import MgrModule, PersistentStoreDict
for out in c_v_out.splitlines():
self.log.error(out)
- devs = []
- for device in json.loads(out):
- dev = orchestrator.InventoryDevice.from_ceph_volume_inventory(device)
- devs.append(dev)
+ devs = inventory.Devices.from_json(json.loads(out))
return [orchestrator.InventoryNode('localhost', devs)]
self.log.error('c-v failed: ' + str(c_v_out))
raise Exception('c-v failed')
def get_hosts(self):
if self._inventory:
return self._inventory
- return [orchestrator.InventoryNode('localhost', [])]
+ return [orchestrator.InventoryNode('localhost', inventory.Devices([]))]
@deferred_write("add_host")
def add_host(self, host):
--- /dev/null
+import json
+
+from ceph.deployment.inventory import Devices
+
+
+def test_from_json():
+ data = json.loads("""
+ [
+ {
+ "available": false,
+ "rejected_reasons": [
+ "locked"
+ ],
+ "sys_api": {
+ "scheduler_mode": "",
+ "rotational": "0",
+ "vendor": "",
+ "human_readable_size": "50.00 GB",
+ "sectors": 0,
+ "sas_device_handle": "",
+ "partitions": {},
+ "rev": "",
+ "sas_address": "",
+ "locked": 1,
+ "sectorsize": "512",
+ "removable": "0",
+ "path": "/dev/dm-0",
+ "support_discard": "",
+ "model": "",
+ "ro": "0",
+ "nr_requests": "128",
+ "size": 53687091200
+ },
+ "lvs": [],
+ "path": "/dev/dm-0"
+ },
+ {
+ "available": false,
+ "rejected_reasons": [
+ "locked"
+ ],
+ "sys_api": {
+ "scheduler_mode": "",
+ "rotational": "0",
+ "vendor": "",
+ "human_readable_size": "31.47 GB",
+ "sectors": 0,
+ "sas_device_handle": "",
+ "partitions": {},
+ "rev": "",
+ "sas_address": "",
+ "locked": 1,
+ "sectorsize": "512",
+ "removable": "0",
+ "path": "/dev/dm-1",
+ "support_discard": "",
+ "model": "",
+ "ro": "0",
+ "nr_requests": "128",
+ "size": 33789313024
+ },
+ "lvs": [],
+ "path": "/dev/dm-1"
+ },
+ {
+ "available": false,
+ "rejected_reasons": [
+ "locked"
+ ],
+ "sys_api": {
+ "scheduler_mode": "",
+ "rotational": "0",
+ "vendor": "",
+ "human_readable_size": "394.27 GB",
+ "sectors": 0,
+ "sas_device_handle": "",
+ "partitions": {},
+ "rev": "",
+ "sas_address": "",
+ "locked": 1,
+ "sectorsize": "512",
+ "removable": "0",
+ "path": "/dev/dm-2",
+ "support_discard": "",
+ "model": "",
+ "ro": "0",
+ "nr_requests": "128",
+ "size": 423347879936
+ },
+ "lvs": [],
+ "path": "/dev/dm-2"
+ },
+ {
+ "available": false,
+ "rejected_reasons": [
+ "locked"
+ ],
+ "sys_api": {
+ "scheduler_mode": "cfq",
+ "rotational": "0",
+ "vendor": "ATA",
+ "human_readable_size": "476.94 GB",
+ "sectors": 0,
+ "sas_device_handle": "",
+ "partitions": {
+ "sda2": {
+ "start": "411648",
+ "holders": [],
+ "sectorsize": 512,
+ "sectors": "2097152",
+ "size": "1024.00 MB"
+ },
+ "sda3": {
+ "start": "2508800",
+ "holders": [
+ "dm-1",
+ "dm-2",
+ "dm-0"
+ ],
+ "sectorsize": 512,
+ "sectors": "997705728",
+ "size": "475.74 GB"
+ },
+ "sda1": {
+ "start": "2048",
+ "holders": [],
+ "sectorsize": 512,
+ "sectors": "409600",
+ "size": "200.00 MB"
+ }
+ },
+ "rev": "0000",
+ "sas_address": "",
+ "locked": 1,
+ "sectorsize": "512",
+ "removable": "0",
+ "path": "/dev/sda",
+ "support_discard": "",
+ "model": "SanDisk SD8SN8U5",
+ "ro": "0",
+ "nr_requests": "128",
+ "size": 512110190592
+ },
+ "lvs": [
+ {
+ "comment": "not used by ceph",
+ "name": "swap"
+ },
+ {
+ "comment": "not used by ceph",
+ "name": "home"
+ },
+ {
+ "comment": "not used by ceph",
+ "name": "root"
+ }
+ ],
+ "path": "/dev/sda"
+ }
+]""".strip())
+ ds = Devices.from_json(data)
+ assert len(ds.devices) == 4
+ assert Devices.from_json(ds.to_json()) == ds