]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: move utils in test/utils.py
authorJoshua Schmid <jschmid@suse.de>
Tue, 10 Dec 2019 10:02:19 +0000 (11:02 +0100)
committerSage Weil <sage@redhat.com>
Tue, 28 Jan 2020 16:09:38 +0000 (10:09 -0600)
Signed-off-by: Joshua Schmid <jschmid@suse.de>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/orchestrator_cli/module.py
src/python-common/ceph/deployment/drive_group.py
src/python-common/ceph/deployment/drive_selection/selector.py
src/python-common/ceph/deployment/translate.py
src/python-common/ceph/tests/test_disk_selector.py
src/python-common/ceph/tests/test_drive_group.py
src/python-common/ceph/tests/utils.py [new file with mode: 0644]

index 2888ee73681b1f682917d1f3acd00c4f7e9793f7..3a58ca0fd9d1cfe37756b6e7d1f80772f2fa3b59 100644 (file)
@@ -22,6 +22,7 @@ import shutil
 import subprocess
 
 from ceph.deployment import inventory, translate
+from ceph.deployment.drive_group import DriveGroupSpecs
 from ceph.deployment.drive_selection import selector
 
 from mgr_module import MgrModule
@@ -1298,9 +1299,12 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
     def create_osds(self, drive_groups):
         return self.get_hosts().then(lambda hosts: self.call_inventory(hosts, drive_groups))
 
-
-    def _prepare_deployment(self, all_hosts, drive_groups, inventory_list):
-        # type: (List[orchestrator.InventoryNode], List[orchestrator.DriveGroupSpecs], List[orchestrator.InventoryNode] -> orchestrator.Completion
+    def _prepare_deployment(self,
+                            all_hosts,  # type: List[orchestrator.InventoryNode]
+                            drive_groups,  # type: List[DriveGroupSpecs]
+                            inventory_list  # type: List[orchestrator.InventoryNode]
+                            ):
+        # type: (...) -> orchestrator.Completion
 
         for drive_group in drive_groups:
             self.log.info("Processing DriveGroup {}".format(drive_group))
@@ -1315,13 +1319,14 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
                 for _inventory in inventory_list:
                     if _inventory.name == hostname:
                         return _inventory
+                    raise OrchestratorError("No inventory found for host: {}".format(hostname))
 
             cmds = []
             # 3) iterate over matching_host and call DriveSelection and to_ceph_volume
             for host in matching_hosts:
                 inventory_for_host = _find_inv_for_host(host, inventory_list)
                 drive_selection = selector.DriveSelection(drive_group, inventory_for_host.devices)
-                cmd = translate.ToCephVolume(drive_group, drive_selection).run()
+                cmd = translate.to_ceph_volume(drive_group, drive_selection).run()
                 if not cmd:
                     self.log.info("No data_devices, skipping DriveGroup: {}".format(drive_group.name))
                     continue
@@ -1353,7 +1358,7 @@ class CephadmOrchestrator(MgrModule, orchestrator.OrchestratorClientMixin):
         split_cmd = cmd.split(' ')
         _cmd = ['--config-and-keyring', '-', '--']
         _cmd.extend(split_cmd)
-        out, code = self._run_ceph_daemon(
+        out, err, code = self._run_cephadm(
             host, 'osd', 'ceph-volume',
             _cmd,
             stdin=j)
index d9630fb767250036d5830cf83748f06317711bd1..d45c7addd2c69d31e6093e223203865b01007a2b 100644 (file)
@@ -124,9 +124,9 @@ class TestCephadm(object):
     @mock.patch("cephadm.module.CephadmOrchestrator._get_connection")
     def test_create_osds(self, _send_command, _get_connection, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
-            dg = DriveGroupSpec('test', DeviceSelection(paths=['']))
-            c = cephadm_module.create_osds(dg)
-            assert wait(cephadm_module, c) == "Created osd(s) on host 'test'"
+            dg = DriveGroupSpec('test', data_devices=DeviceSelection(paths=['']))
+            c = cephadm_module.create_osds([dg])
+            assert wait(cephadm_module, c) == ["Created osd(s) on host 'test'"]
 
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
         json.dumps([
index 458a3537b0dbd3204f5da57db5b651b5f607dad9..37249d5aa6ff66d1cb9a3004db588706604ce570 100644 (file)
@@ -347,10 +347,9 @@ Usage:
   ceph orchestrator osd create host:device1,device2,...
 """
 
-        # TODO: try if inbuf file is yaml of json
         if inbuf:
             try:
-                dgs = DriveGroupSpecs(json.loads(inbuf))
+                dgs = DriveGroupSpecs(yaml.load(inbuf))
                 drive_groups = dgs.drive_groups
             except ValueError as e:
                 msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
index eeebdb1f77d1df045a88fb0fcc699acc22dafee3..138f7d49e387004d899f07f60f2d2011b5a77951 100644 (file)
@@ -1,4 +1,5 @@
 import fnmatch
+from ceph.deployment.inventory import Device
 try:
     from typing import Optional, List, Dict, Any
 except ImportError:
@@ -31,8 +32,8 @@ class DeviceSelection(object):
         """
         ephemeral drive group device specification
         """
-        #: List of absolute paths to the devices.
-        self.paths = [] if paths is None else paths  # type: List[str]
+        #: List of Device objects for devices paths.
+        self.paths = [] if paths is None else [Device(path) for path in paths]  # type: List[Device]
 
         #: A wildcard string. e.g: "SDD*" or "SanDisk SD8SN8U5"
         self.model = model
@@ -107,18 +108,20 @@ class DriveGroupValidationError(Exception):
 
 
 class DriveGroupSpecs(object):
+    """ Container class to parse drivegroups """
 
-    def __init__(self, drive_group_json: dict):
-        self.drive_group_json: dict = drive_group_json
-        self.drive_groups: list = list()
+    def __init__(self, drive_group_json):
+        # type: (dict) -> None
+        self.drive_group_json = drive_group_json
+        self.drive_groups = list()  # type: list
         self.build_drive_groups()
 
-    def build_drive_groups(self) -> list:
+    def build_drive_groups(self):
         for drive_group_name, drive_group_spec in self.drive_group_json.items():
             self.drive_groups.append(DriveGroupSpec.from_json
                                      (drive_group_spec, name=drive_group_name))
 
-    def __repr__(self) -> str:
+    def __repr__(self):
         return ", ".join([repr(x) for x in self.drive_groups])
 
 
@@ -212,7 +215,7 @@ class DriveGroupSpec(object):
 
     @classmethod
     def from_json(cls, json_drive_group, name=None):
-        # type: (dict) -> DriveGroupSpec
+        # type: (dict, Optional[str]) -> DriveGroupSpec
         """
         Initialize 'Drive group' structure
 
@@ -235,13 +238,13 @@ class DriveGroupSpec(object):
                     json_drive_group.items()}
             if not args:
                 raise DriveGroupValidationError("Didn't find Drivegroup specs")
-            return DriveGroupSpec(**args, name=name) # noqa, that's no syntax error
+            return DriveGroupSpec(name=name, **args)
         except (KeyError, TypeError) as e:
             raise DriveGroupValidationError(str(e))
 
     def hosts(self, all_hosts):
         # type: (List[str]) -> List[str]
-        return fnmatch.filter(all_hosts, self.host_pattern)
+        return fnmatch.filter(all_hosts, self.host_pattern)  # type: ignore
 
     def validate(self, all_hosts):
         # type: (List[str]) -> None
index 9b719fa0e23ac59d01aec99d74573743c1b769f8..5b3252d166952bc3adc968f28b0222271b27cf59 100644 (file)
@@ -23,11 +23,13 @@ class DriveSelection(object):
         self.disks = disks.copy()
         self.spec = spec
 
-        if self.spec.data_devices.paths:
-            self._data = self.spec.data_devices.paths
-            self._db = []
-            self._wal = []
-            self._journal = []
+        if self.spec.data_devices.paths:  # type: ignore
+            # re: type: ignore there is *always* a path attribute assigned to DeviceSelection
+            # it's just None if actual drivegroups are used
+            self._data = self.spec.data_devices.paths  # type: ignore
+            self._db = []  # type: List
+            self._wal = []  # type: List
+            self._journal = []  # type: List
         else:
             self._data = self.assign_devices(self.spec.data_devices)
             self._wal = self.assign_devices(self.spec.wal_devices)
index d28155208ad2b5e17f581403fa8a16d74d3e0acd..b7b04737469244a319b1691785346d97ebe22db4 100644 (file)
@@ -5,7 +5,7 @@ from ceph.deployment.drive_selection.selector import DriveSelection
 logger = logging.getLogger(__name__)
 
 
-class ToCephVolume(object):
+class to_ceph_volume(object):
 
     def __init__(self,
                  spec,  # type: DriveGroupSpec
index 7aa73945acf93e71075cd4414f7b62b2ed10cd03..0e2ad2de37a6e4fe67278df9608a0b9faf8002f7 100644 (file)
@@ -673,8 +673,6 @@ class TestFilter(object):
         assert ret.is_matchable is False
 
 
-
-
 class TestDriveSelection(object):
 
     testdata = [
index 81212032c0cf301457f0e05624832e6f632e6d86..11fbfbd9a5dfdc847fef060596e51116a5cb1605 100644 (file)
@@ -1,20 +1,25 @@
 import pytest
-import mock
 
 from ceph.deployment import drive_selection, translate
+from ceph.deployment.inventory import Device
 from ceph.tests.utils import _mk_inventory, _mk_device
-from ceph.deployment.drive_group import DriveGroupSpec, DeviceSelection, DriveGroupValidationError
+from ceph.deployment.drive_group import DriveGroupSpec, DriveGroupSpecs, \
+                                        DeviceSelection, DriveGroupValidationError
 
 
 def test_DriveGroup():
-    dg_json = {
-        'host_pattern': 'hostname',
-        'data_devices': {'paths': ['/dev/sda']}
-    }
+    dg_json = {'testing_drivegroup':
+               {'host_pattern': 'hostname',
+                'data_devices': {'paths': ['/dev/sda']}
+                }
+               }
 
-    dg = DriveGroupSpec.from_json(dg_json)
-    assert dg.hosts(['hostname']) == ['hostname']
-    assert dg.data_devices.paths == ['/dev/sda']
+    dgs = DriveGroupSpecs(dg_json)
+    for dg in dgs.drive_groups:
+        assert dg.hosts(['hostname']) == ['hostname']
+        assert dg.name == 'testing_drivegroup'
+        assert all([isinstance(x, Device) for x in dg.data_devices.paths])
+        assert dg.data_devices.paths[0].path == '/dev/sda'
 
 
 def test_DriveGroup_fail():
@@ -30,7 +35,8 @@ def test_drivegroup_pattern():
 def test_drive_selection():
     devs = DeviceSelection(paths=['/dev/sda'])
     spec = DriveGroupSpec('node_name', data_devices=devs)
-    assert spec.data_devices.paths == ['/dev/sda']
+    assert all([isinstance(x, Device) for x in spec.data_devices.paths])
+    assert spec.data_devices.paths[0].path == '/dev/sda'
 
     with pytest.raises(DriveGroupValidationError, match='exclusive'):
         DeviceSelection(paths=['/dev/sda'], rotational=False)
@@ -42,7 +48,7 @@ def test_ceph_volume_command_0():
                           )
     inventory = _mk_inventory(_mk_device()*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
+    cmd = translate.to_ceph_volume(spec, sel).run()
     assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd'
 
 
@@ -53,8 +59,9 @@ def test_ceph_volume_command_1():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
-    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --yes --no-systemd'
+    cmd = translate.to_ceph_volume(spec, sel).run()
+    assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
+                   '--db-devices /dev/sdc /dev/sdd --yes --no-systemd')
 
 
 def test_ceph_volume_command_2():
@@ -68,8 +75,10 @@ def test_ceph_volume_command_2():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
-    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --yes --no-systemd'
+    cmd = translate.to_ceph_volume(spec, sel).run()
+    assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
+                   '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
+                   '--yes --no-systemd')
 
 
 def test_ceph_volume_command_3():
@@ -84,8 +93,11 @@ def test_ceph_volume_command_3():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
-    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --dmcrypt --yes --no-systemd'
+    cmd = translate.to_ceph_volume(spec, sel).run()
+    assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
+                   '--db-devices /dev/sdc /dev/sdd '
+                   '--wal-devices /dev/sde /dev/sdf --dmcrypt '
+                   '--yes --no-systemd')
 
 
 def test_ceph_volume_command_4():
@@ -103,8 +115,11 @@ def test_ceph_volume_command_4():
                               _mk_device(size="10.0 GB", rotational=False)*2
                               )
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
-    assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --block-wal-size 500M --block-db-size 500M --dmcrypt --osds-per-device 3 --yes --no-systemd'
+    cmd = translate.to_ceph_volume(spec, sel).run()
+    assert cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
+                   '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
+                   '--block-wal-size 500M --block-db-size 500M --dmcrypt '
+                   '--osds-per-device 3 --yes --no-systemd')
 
 
 def test_ceph_volume_command_5():
@@ -114,7 +129,7 @@ def test_ceph_volume_command_5():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
+    cmd = translate.to_ceph_volume(spec, sel).run()
     assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --filestore --yes --no-systemd'
 
 
@@ -127,5 +142,7 @@ def test_ceph_volume_command_6():
                           )
     inventory = _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)*2)
     sel = drive_selection.DriveSelection(spec, inventory)
-    cmd = translate.ToCephVolume(spec, sel).run()
-    assert cmd == 'lvm batch --no-auto /dev/sdc /dev/sdd --journal-size 500M --journal-devices /dev/sda /dev/sdb --filestore --yes --no-systemd'
+    cmd = translate.to_ceph_volume(spec, sel).run()
+    assert cmd == ('lvm batch --no-auto /dev/sdc /dev/sdd '
+                   '--journal-size 500M --journal-devices /dev/sda /dev/sdb '
+                   '--filestore --yes --no-systemd')
diff --git a/src/python-common/ceph/tests/utils.py b/src/python-common/ceph/tests/utils.py
new file mode 100644 (file)
index 0000000..8e04ec3
--- /dev/null
@@ -0,0 +1,38 @@
+from ceph.deployment.inventory import Devices, Device
+
+
+def _mk_device(rotational=True,
+               locked=False,
+               size="394.27 GB"):
+    return [Device(
+        path='??',
+        sys_api={
+            "rotational": '1' if rotational else '0',
+            "vendor": "Vendor",
+            "human_readable_size": size,
+            "partitions": {},
+            "locked": int(locked),
+            "sectorsize": "512",
+            "removable": "0",
+            "path": "??",
+            "support_discard": "",
+            "model": "Model",
+            "ro": "0",
+            "nr_requests": "128",
+            "size": 423347879936  # ignore coversion from human_readable_size
+        },
+        available=not locked,
+        rejected_reasons=['locked'] if locked else [],
+        lvs=[],
+        device_id="Model-Vendor-foobar"
+    )]
+
+
+def _mk_inventory(devices):
+    devs = []
+    for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))):
+        dev = Device.from_json(dev_.to_json())
+        dev.path = '/dev/sd' + name
+        dev.sys_api = dict(dev_.sys_api, path='/dev/sd' + name)
+        devs.append(dev)
+    return Devices(devices=devs)