From 38f6a6a70890dc5ae7d9f82e89e60da478cfc772 Mon Sep 17 00:00:00 2001 From: Joshua Schmid Date: Tue, 10 Dec 2019 11:02:19 +0100 Subject: [PATCH] mgr/cephadm: move utils in test/utils.py Signed-off-by: Joshua Schmid --- src/pybind/mgr/cephadm/module.py | 15 +++-- src/pybind/mgr/cephadm/tests/test_cephadm.py | 6 +- src/pybind/mgr/orchestrator_cli/module.py | 3 +- .../ceph/deployment/drive_group.py | 23 ++++--- .../deployment/drive_selection/selector.py | 12 ++-- .../ceph/deployment/translate.py | 2 +- .../ceph/tests/test_disk_selector.py | 2 - .../ceph/tests/test_drive_group.py | 61 ++++++++++++------- src/python-common/ceph/tests/utils.py | 38 ++++++++++++ 9 files changed, 112 insertions(+), 50 deletions(-) create mode 100644 src/python-common/ceph/tests/utils.py diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 2888ee73681b1..3a58ca0fd9d1c 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -22,6 +22,7 @@ import shutil import subprocess from ceph.deployment import inventory, translate +from ceph.deployment.drive_group import DriveGroupSpecs from ceph.deployment.drive_selection import selector from mgr_module import MgrModule @@ -1298,9 +1299,12 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin): def create_osds(self, drive_groups): return self.get_hosts().then(lambda hosts: self.call_inventory(hosts, drive_groups)) - - def _prepare_deployment(self, all_hosts, drive_groups, inventory_list): - # type: (List[orchestrator.InventoryNode], List[orchestrator.DriveGroupSpecs], List[orchestrator.InventoryNode] -> orchestrator.Completion + def _prepare_deployment(self, + all_hosts, # type: List[orchestrator.InventoryNode] + drive_groups, # type: List[DriveGroupSpecs] + inventory_list # type: List[orchestrator.InventoryNode] + ): + # type: (...) -> orchestrator.Completion for drive_group in drive_groups: self.log.info("Processing DriveGroup {}".format(drive_group)) @@ -1315,13 +1319,14 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin): for _inventory in inventory_list: if _inventory.name == hostname: return _inventory + raise OrchestratorError("No inventory found for host: {}".format(hostname)) cmds = [] # 3) iterate over matching_host and call DriveSelection and to_ceph_volume for host in matching_hosts: inventory_for_host = _find_inv_for_host(host, inventory_list) drive_selection = selector.DriveSelection(drive_group, inventory_for_host.devices) - cmd = translate.ToCephVolume(drive_group, drive_selection).run() + cmd = translate.to_ceph_volume(drive_group, drive_selection).run() if not cmd: self.log.info("No data_devices, skipping DriveGroup: {}".format(drive_group.name)) continue @@ -1353,7 +1358,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin): split_cmd = cmd.split(' ') _cmd = ['--config-and-keyring', '-', '--'] _cmd.extend(split_cmd) - out, code = self._run_ceph_daemon( + out, err, code = self._run_cephadm( host, 'osd', 'ceph-volume', _cmd, stdin=j) diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index d9630fb767250..d45c7addd2c69 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -124,9 +124,9 @@ class TestCephadm(object): @mock.patch("cephadm.module.CephadmOrchestrator._get_connection") def test_create_osds(self, _send_command, _get_connection, cephadm_module): with self._with_host(cephadm_module, 'test'): - dg = DriveGroupSpec('test', DeviceSelection(paths=[''])) - c = cephadm_module.create_osds(dg) - assert wait(cephadm_module, c) == "Created osd(s) on host 'test'" + dg = DriveGroupSpec('test', data_devices=DeviceSelection(paths=[''])) + c = cephadm_module.create_osds([dg]) + assert wait(cephadm_module, c) == ["Created osd(s) on host 'test'"] @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm( json.dumps([ diff --git a/src/pybind/mgr/orchestrator_cli/module.py b/src/pybind/mgr/orchestrator_cli/module.py index 458a3537b0dbd..37249d5aa6ff6 100644 --- a/src/pybind/mgr/orchestrator_cli/module.py +++ b/src/pybind/mgr/orchestrator_cli/module.py @@ -347,10 +347,9 @@ Usage: ceph orchestrator osd create host:device1,device2,... """ - # TODO: try if inbuf file is yaml of json if inbuf: try: - dgs = DriveGroupSpecs(json.loads(inbuf)) + dgs = DriveGroupSpecs(yaml.load(inbuf)) drive_groups = dgs.drive_groups except ValueError as e: msg = 'Failed to read JSON input: {}'.format(str(e)) + usage diff --git a/src/python-common/ceph/deployment/drive_group.py b/src/python-common/ceph/deployment/drive_group.py index eeebdb1f77d1d..138f7d49e3870 100644 --- a/src/python-common/ceph/deployment/drive_group.py +++ b/src/python-common/ceph/deployment/drive_group.py @@ -1,4 +1,5 @@ import fnmatch +from ceph.deployment.inventory import Device try: from typing import Optional, List, Dict, Any except ImportError: @@ -31,8 +32,8 @@ class DeviceSelection(object): """ ephemeral drive group device specification """ - #: List of absolute paths to the devices. - self.paths = [] if paths is None else paths # type: List[str] + #: List of Device objects for devices paths. + self.paths = [] if paths is None else [Device(path) for path in paths] # type: List[Device] #: A wildcard string. e.g: "SDD*" or "SanDisk SD8SN8U5" self.model = model @@ -107,18 +108,20 @@ class DriveGroupValidationError(Exception): class DriveGroupSpecs(object): + """ Container class to parse drivegroups """ - def __init__(self, drive_group_json: dict): - self.drive_group_json: dict = drive_group_json - self.drive_groups: list = list() + def __init__(self, drive_group_json): + # type: (dict) -> None + self.drive_group_json = drive_group_json + self.drive_groups = list() # type: list self.build_drive_groups() - def build_drive_groups(self) -> list: + def build_drive_groups(self): for drive_group_name, drive_group_spec in self.drive_group_json.items(): self.drive_groups.append(DriveGroupSpec.from_json (drive_group_spec, name=drive_group_name)) - def __repr__(self) -> str: + def __repr__(self): return ", ".join([repr(x) for x in self.drive_groups]) @@ -212,7 +215,7 @@ class DriveGroupSpec(object): @classmethod def from_json(cls, json_drive_group, name=None): - # type: (dict) -> DriveGroupSpec + # type: (dict, Optional[str]) -> DriveGroupSpec """ Initialize 'Drive group' structure @@ -235,13 +238,13 @@ class DriveGroupSpec(object): json_drive_group.items()} if not args: raise DriveGroupValidationError("Didn't find Drivegroup specs") - return DriveGroupSpec(**args, name=name) # noqa, that's no syntax error + return DriveGroupSpec(name=name, **args) except (KeyError, TypeError) as e: raise DriveGroupValidationError(str(e)) def hosts(self, all_hosts): # type: (List[str]) -> List[str] - return fnmatch.filter(all_hosts, self.host_pattern) + return fnmatch.filter(all_hosts, self.host_pattern) # type: ignore def validate(self, all_hosts): # type: (List[str]) -> None diff --git a/src/python-common/ceph/deployment/drive_selection/selector.py b/src/python-common/ceph/deployment/drive_selection/selector.py index 9b719fa0e23ac..5b3252d166952 100644 --- a/src/python-common/ceph/deployment/drive_selection/selector.py +++ b/src/python-common/ceph/deployment/drive_selection/selector.py @@ -23,11 +23,13 @@ class DriveSelection(object): self.disks = disks.copy() self.spec = spec - if self.spec.data_devices.paths: - self._data = self.spec.data_devices.paths - self._db = [] - self._wal = [] - self._journal = [] + if self.spec.data_devices.paths: # type: ignore + # re: type: ignore there is *always* a path attribute assigned to DeviceSelection + # it's just None if actual drivegroups are used + self._data = self.spec.data_devices.paths # type: ignore + self._db = [] # type: List + self._wal = [] # type: List + self._journal = [] # type: List else: self._data = self.assign_devices(self.spec.data_devices) self._wal = self.assign_devices(self.spec.wal_devices) diff --git a/src/python-common/ceph/deployment/translate.py b/src/python-common/ceph/deployment/translate.py index d28155208ad2b..b7b0473746924 100644 --- a/src/python-common/ceph/deployment/translate.py +++ b/src/python-common/ceph/deployment/translate.py @@ -5,7 +5,7 @@ from ceph.deployment.drive_selection.selector import DriveSelection logger = logging.getLogger(__name__) -class ToCephVolume(object): +class to_ceph_volume(object): def __init__(self, spec, # type: DriveGroupSpec diff --git a/src/python-common/ceph/tests/test_disk_selector.py b/src/python-common/ceph/tests/test_disk_selector.py index 7aa73945acf93..0e2ad2de37a6e 100644 --- a/src/python-common/ceph/tests/test_disk_selector.py +++ b/src/python-common/ceph/tests/test_disk_selector.py @@ -673,8 +673,6 @@ class TestFilter(object): assert ret.is_matchable is False - - class TestDriveSelection(object): testdata = [ diff --git a/src/python-common/ceph/tests/test_drive_group.py b/src/python-common/ceph/tests/test_drive_group.py index 81212032c0cf3..11fbfbd9a5dfd 100644 --- a/src/python-common/ceph/tests/test_drive_group.py +++ b/src/python-common/ceph/tests/test_drive_group.py @@ -1,20 +1,25 @@ import pytest -import mock from ceph.deployment import drive_selection, translate +from ceph.deployment.inventory import Device from ceph.tests.utils import _mk_inventory, _mk_device -from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, DriveGroupValidationError +from ceph.deployment.drive_group import DriveGroupSpec, DriveGroupSpecs, \ + DeviceSelection, DriveGroupValidationError def test_DriveGroup(): - dg_json = { - 'host_pattern': 'hostname', - 'data_devices': {'paths': ['/dev/sda']} - } + dg_json = {'testing_drivegroup': + {'host_pattern': 'hostname', + 'data_devices': {'paths': ['/dev/sda']} + } + } - dg = DriveGroupSpec.from_json(dg_json) - assert dg.hosts(['hostname']) == ['hostname'] - assert dg.data_devices.paths == ['/dev/sda'] + dgs = DriveGroupSpecs(dg_json) + for dg in dgs.drive_groups: + assert dg.hosts(['hostname']) == ['hostname'] + assert dg.name == 'testing_drivegroup' + assert all([isinstance(x, Device) for x in dg.data_devices.paths]) + assert dg.data_devices.paths[0].path == '/dev/sda' def test_DriveGroup_fail(): @@ -30,7 +35,8 @@ def test_drivegroup_pattern(): def test_drive_selection(): devs = DeviceSelection(paths=['/dev/sda']) spec = DriveGroupSpec('node_name', data_devices=devs) - assert spec.data_devices.paths == ['/dev/sda'] + assert all([isinstance(x, Device) for x in spec.data_devices.paths]) + assert spec.data_devices.paths[0].path == '/dev/sda' with pytest.raises(DriveGroupValidationError, match='exclusive'): DeviceSelection(paths=['/dev/sda'], rotational=False) @@ -42,7 +48,7 @@ def test_ceph_volume_command_0(): ) inventory = _mk_inventory(_mk_device()*2) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() + cmd = translate.to_ceph_volume(spec, sel).run() assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd' @@ -53,8 +59,9 @@ def test_ceph_volume_command_1(): ) inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() - assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --yes --no-systemd' + cmd = translate.to_ceph_volume(spec, sel).run() + assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb ' + '--db-devices /dev/sdc /dev/sdd --yes --no-systemd') def test_ceph_volume_command_2(): @@ -68,8 +75,10 @@ def test_ceph_volume_command_2(): _mk_device(size="10.0 GB", rotational=False)*2 ) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() - assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --yes --no-systemd' + cmd = translate.to_ceph_volume(spec, sel).run() + assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb ' + '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf ' + '--yes --no-systemd') def test_ceph_volume_command_3(): @@ -84,8 +93,11 @@ def test_ceph_volume_command_3(): _mk_device(size="10.0 GB", rotational=False)*2 ) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() - assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --dmcrypt --yes --no-systemd' + cmd = translate.to_ceph_volume(spec, sel).run() + assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb ' + '--db-devices /dev/sdc /dev/sdd ' + '--wal-devices /dev/sde /dev/sdf --dmcrypt ' + '--yes --no-systemd') def test_ceph_volume_command_4(): @@ -103,8 +115,11 @@ def test_ceph_volume_command_4(): _mk_device(size="10.0 GB", rotational=False)*2 ) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() - assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --block-wal-size 500M --block-db-size 500M --dmcrypt --osds-per-device 3 --yes --no-systemd' + cmd = translate.to_ceph_volume(spec, sel).run() + assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb ' + '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf ' + '--block-wal-size 500M --block-db-size 500M --dmcrypt ' + '--osds-per-device 3 --yes --no-systemd') def test_ceph_volume_command_5(): @@ -114,7 +129,7 @@ def test_ceph_volume_command_5(): ) inventory = _mk_inventory(_mk_device(rotational=True)*2) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() + cmd = translate.to_ceph_volume(spec, sel).run() assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd' @@ -127,5 +142,7 @@ def test_ceph_volume_command_6(): ) inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2) sel = drive_selection.DriveSelection(spec, inventory) - cmd = translate.ToCephVolume(spec, sel).run() - assert cmd == 'lvm batch --no-auto /dev/sdc /dev/sdd --journal-size 500M --journal-devices /dev/sda /dev/sdb --filestore --yes --no-systemd' + cmd = translate.to_ceph_volume(spec, sel).run() + assert cmd == ('lvm batch --no-auto /dev/sdc /dev/sdd ' + '--journal-size 500M --journal-devices /dev/sda /dev/sdb ' + '--filestore --yes --no-systemd') diff --git a/src/python-common/ceph/tests/utils.py b/src/python-common/ceph/tests/utils.py new file mode 100644 index 0000000000000..8e04ec3bdeb73 --- /dev/null +++ b/src/python-common/ceph/tests/utils.py @@ -0,0 +1,38 @@ +from ceph.deployment.inventory import Devices, Device + + +def _mk_device(rotational=True, + locked=False, + size="394.27 GB"): + return [Device( + path='??', + sys_api={ + "rotational": '1' if rotational else '0', + "vendor": "Vendor", + "human_readable_size": size, + "partitions": {}, + "locked": int(locked), + "sectorsize": "512", + "removable": "0", + "path": "??", + "support_discard": "", + "model": "Model", + "ro": "0", + "nr_requests": "128", + "size": 423347879936 # ignore coversion from human_readable_size + }, + available=not locked, + rejected_reasons=['locked'] if locked else [], + lvs=[], + device_id="Model-Vendor-foobar" + )] + + +def _mk_inventory(devices): + devs = [] + for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))): + dev = Device.from_json(dev_.to_json()) + dev.path = '/dev/sd' + name + dev.sys_api = dict(dev_.sys_api, path='/dev/sd' + name) + devs.append(dev) + return Devices(devices=devs) -- 2.39.5