]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume batch: track rel_size in percent, more tests
authorJan Fajerski <jfajerski@suse.com>
Tue, 9 Jun 2020 14:40:46 +0000 (16:40 +0200)
committerJan Fajerski <jfajerski@suse.com>
Fri, 2 Oct 2020 07:47:42 +0000 (09:47 +0200)
Signed-off-by: Jan Fajerski <jfajerski@suse.com>
(cherry picked from commit 2327e92abae74518d463a55ef4d42dbb816c9200)

src/ceph-volume/ceph_volume/devices/lvm/batch.py
src/ceph-volume/ceph_volume/tests/conftest.py
src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
src/ceph-volume/ceph_volume/util/device.py
src/ceph-volume/ceph_volume/util/templates.py

index 09b232c6ed0b89408ee0964c609b0c9a0a686892..cb4c2d96731fe9ec0200d3d3f31c8e99506af4ef 100644 (file)
@@ -45,13 +45,13 @@ def get_physical_osds(devices, args):
     data_slots = args.osds_per_device
     if args.data_slots:
         data_slots = max(args.data_slots, args.osds_per_device)
-    rel_data_size = 100 / data_slots
+    rel_data_size = 1.0 / data_slots
     mlogger.debug('relative data size: {}'.format(rel_data_size))
     ret = []
     for dev in devices:
         if dev.available_lvm:
             dev_size = dev.vg_size[0]
-            abs_size = disk.Size(b=int(dev_size * rel_data_size / 100))
+            abs_size = disk.Size(b=int(dev_size * rel_data_size))
             free_size = dev.vg_free[0]
             if abs_size < 419430400:
                 mlogger.error('Data LV on {} would be too small (<400M)'.format(dev.path))
@@ -71,6 +71,52 @@ def get_physical_osds(devices, args):
     return ret
 
 
+def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args):
+    requested_slots = getattr(args, '{}_slots'.format(type_))
+    if not requested_slots or requested_slots < fast_slots_per_device:
+        if requested_slots:
+            mlogger.info('{}_slots argument is to small, ignoring'.format(type_))
+        requested_slots = fast_slots_per_device
+
+    requested_size = getattr(args, '{}_size'.format(type_), 0)
+    if requested_size == 0:
+        # no size argument was specified, check ceph.conf
+        get_size_fct = getattr(prepare, 'get_{}_size'.format(type_))
+        requested_size = get_size_fct(lv_format=False)
+
+    ret = []
+    for dev in devices:
+        if not dev.available_lvm:
+            continue
+        # any LV present is considered a taken slot
+        occupied_slots = len(dev.lvs)
+        # TODO this only looks at the first vg on device
+        dev_size = dev.vg_size[0]
+        abs_size = disk.Size(b=int(dev_size / requested_slots))
+        free_size = dev.vg_free[0]
+        # if abs_size < 419430400:
+        #     mlogger.error('{} LV on {} would be too small (<400M)'.format(
+        #         type_, dev.path))
+        #     continue
+        relative_size = int(abs_size) / dev_size
+        if requested_size:
+            if requested_size <= abs_size:
+                abs_size = requested_size
+            else:
+                mlogger.error(
+                    '{} was requested for {}, but only {} can be fulfilled'.format(
+                        requested_size,
+                        '{}_size'.format(type_),
+                        abs_size,
+                    ))
+                exit(1)
+        while abs_size <= free_size and len(ret) < new_osds and occupied_slots < fast_slots_per_device:
+            free_size -= abs_size.b
+            occupied_slots += 1
+            ret.append((dev.path, relative_size, abs_size, requested_slots))
+    return ret
+
+
 class Batch(object):
 
     help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
@@ -384,7 +430,9 @@ class Batch(object):
         requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
 
         fast_type = 'block_db' if args.bluestore else 'journal'
-        fast_allocations = self.fast_allocations(fast_devices, requested_osds,
+        fast_allocations = self.fast_allocations(fast_devices,
+                                                 requested_osds,
+                                                 num_osds,
                                                  fast_type)
         if fast_devices and not fast_allocations:
             mlogger.info('{} fast devices were passed, but none are available'.format(len(fast_devices)))
@@ -395,7 +443,9 @@ class Batch(object):
             exit(1)
 
         very_fast_allocations = self.fast_allocations(very_fast_devices,
-                                                      requested_osds, 'block_wal')
+                                                      requested_osds,
+                                                      num_osds,
+                                                      'block_wal')
         if very_fast_devices and not very_fast_allocations:
             mlogger.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices)))
             exit(0)
@@ -413,56 +463,12 @@ class Batch(object):
                                         type_='block.wal')
         return plan
 
-    def get_physical_fast_allocs(self, devices, type_, used_slots, args):
-        requested_slots = getattr(args, '{}_slots'.format(type_))
-        if not requested_slots or requested_slots < used_slots:
-            if requested_slots:
-                mlogger.info('{}_slots argument is to small, ignoring'.format(type_))
-            requested_slots = used_slots
-        requested_size = getattr(args, '{}_size'.format(type_), 0)
-
-        if requested_size == 0:
-            # no size argument was specified, check ceph.conf
-            get_size_fct = getattr(prepare, 'get_{}_size'.format(type_))
-            requested_size = get_size_fct(lv_format=False)
-
-        ret = []
-        for dev in devices:
-            if not dev.available_lvm:
-                continue
-            # TODO this only looks at the first vg on device
-            dev_size = dev.vg_size[0]
-            abs_size = disk.Size(b=int(dev_size / requested_slots))
-            free_size = dev.vg_free[0]
-            if abs_size < 419430400:
-                mlogger.error('{} LV on {} would be too small (<400M)'.format(
-                    type_, dev.path))
-                continue
-            relative_size = int(abs_size) / dev_size * 100
-            if requested_size:
-                if requested_size <= abs_size:
-                    abs_size = requested_size
-                else:
-                    mlogger.error(
-                        '{} was requested for {}, but only {} can be fulfilled'.format(
-                            requested_size,
-                            '{}_size'.format(type_),
-                            abs_size,
-                        ))
-                    exit(1)
-            for _ in range(used_slots):
-                if abs_size > free_size:
-                    break
-                free_size -= abs_size.b
-                ret.append((dev.path, relative_size, abs_size, requested_slots))
-        return ret
-
     def get_lvm_fast_allocs(self, lvs):
         return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
                  disk.Size(b=int(d.lv_size)), 1) for d in lvs if not
                 d.used_by_ceph]
 
-    def fast_allocations(self, devices, num_osds, type_):
+    def fast_allocations(self, devices, requested_osds, new_osds, type_):
         ret = []
         if not devices:
             return ret
@@ -472,16 +478,17 @@ class Batch(object):
 
         ret.extend(self.get_lvm_fast_allocs(lvm_devs))
 
-        if (num_osds - len(lvm_devs)) % len(phys_devs):
-            used_slots = int((num_osds - len(lvm_devs)) / len(phys_devs)) + 1
+        if (requested_osds - len(lvm_devs)) % len(phys_devs):
+            fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs)) + 1
         else:
-            used_slots = int((num_osds - len(lvm_devs)) / len(phys_devs))
+            fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs))
 
 
-        ret.extend(self.get_physical_fast_allocs(phys_devs,
-                                                 type_,
-                                                 used_slots,
-                                                 self.args))
+        ret.extend(get_physical_fast_allocs(phys_devs,
+                                            type_,
+                                            fast_slots_per_device,
+                                            new_osds,
+                                            self.args))
         return ret
 
     class OSD(object):
index 51791a497c57008565e38f4d51a64565bc4ce946..bf6235b755ce57ee6e9203cce2067504aa9378de 100644 (file)
@@ -50,6 +50,23 @@ def mock_devices_available():
     dev.vg_free = dev.vg_size
     return [dev]
 
+@pytest.fixture
+def mock_device_generator():
+    def mock_device():
+        dev = create_autospec(device.Device)
+        dev.path = '/dev/foo'
+        dev.available_lvm = True
+        dev.vg_size = [21474836480]
+        dev.vg_free = dev.vg_size
+        dev.lvs = []
+        return dev
+    return mock_device
+
+
+@pytest.fixture(params=range(1,11))
+def osds_per_device(request):
+    return request.param
+
 
 @pytest.fixture
 def fake_run(monkeypatch):
index c0ea489097ef04c60931044ad986f75b42a44108..cd1c4b7af4d9faabef55ac93c1599eef130c9455 100644 (file)
@@ -1,5 +1,6 @@
+from functools import reduce
 import pytest
-import json
+import random
 from ceph_volume.devices.lvm import batch
 
 
@@ -19,24 +20,65 @@ class TestBatch(object):
         assert 'Device lists are not disjoint' in str(disjoint_ex.value)
 
     def test_get_physical_osds_return_len(self, factory,
-                                          mock_devices_available):
-        osds_per_device = 1
+                                          mock_devices_available,
+                                          osds_per_device):
         args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
         osds = batch.get_physical_osds(mock_devices_available, args)
         assert len(osds) == len(mock_devices_available) * osds_per_device
 
     def test_get_physical_osds_rel_size(self, factory,
-                                          mock_devices_available):
-        osds_per_device = 1
+                                          mock_devices_available,
+                                          osds_per_device):
         args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
         osds = batch.get_physical_osds(mock_devices_available, args)
         for osd in osds:
-            assert osd.data[1] == 100 / osds_per_device
+            assert osd.data[1] == 1.0 / osds_per_device
 
     def test_get_physical_osds_abs_size(self, factory,
-                                          mock_devices_available):
-        osds_per_device = 1
+                                          mock_devices_available,
+                                          osds_per_device):
         args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
         osds = batch.get_physical_osds(mock_devices_available, args)
         for osd, dev in zip(osds, mock_devices_available):
-            assert osd.data[2] == dev.vg_size[0] / osds_per_device
+            assert osd.data[2] == int(dev.vg_size[0] / osds_per_device)
+
+    def test_get_physical_osds_osd_ids(self, factory,
+                                          mock_devices_available,
+                                          osds_per_device):
+        pass
+
+    def test_get_physical_fast_allocs_length(self, factory,
+                                             mock_devices_available):
+        args = factory(block_db_slots=None, get_block_db_size=None)
+        fast = batch.get_physical_fast_allocs(mock_devices_available,
+                                              'block_db', 2, 2, args)
+        assert len(fast) == 2
+
+    @pytest.mark.parametrize('occupied_prior', range(7))
+    @pytest.mark.parametrize('slots,num_devs',
+                             [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
+    def test_get_physical_fast_allocs_length_existing(self,
+                                                      num_devs,
+                                                      slots,
+                                                      occupied_prior,
+                                                      factory,
+                                                      mock_device_generator):
+        occupied_prior = min(occupied_prior, slots)
+        devs = [mock_device_generator() for _ in range(num_devs)]
+        already_assigned = 0
+        while already_assigned < occupied_prior:
+            dev_i = random.randint(0, num_devs - 1)
+            dev = devs[dev_i]
+            if len(dev.lvs) < occupied_prior:
+                dev.lvs.append('foo')
+                dev.path = '/dev/bar'
+                already_assigned = sum([len(d.lvs) for d in devs])
+        args = factory(block_db_slots=None, get_block_db_size=None)
+        expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
+        fast = batch.get_physical_fast_allocs(devs,
+                                              'block_db', slots,
+                                              expected_num_osds, args)
+        assert len(fast) == expected_num_osds
+        expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+        assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
+        assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
index dc228f2465f365b4b6c4453d1bd140d7aed5ddf1..0fde00ccd2469cb152d1c82336be6984b32f8f20 100644 (file)
@@ -75,13 +75,17 @@ class Device(object):
         'vendor',
     ]
 
+    # define some class variables; mostly to enable the use of autospec in
+    # unittests
+    lvs = []
+
     def __init__(self, path):
         self.path = path
         # LVs can have a vg/lv path, while disks will have /dev/sda
         self.abspath = path
         self.lv_api = None
-        self.vgs = []
         self.lvs = []
+        self.vgs = []
         self.vg_name = None
         self.lv_name = None
         self.disk_api = {}
index a8b1eec4e61cd5df4bd5c890cd6d6a5932550717..97b15b73b5e6279eb66bd05602d47169730362e4 100644 (file)
@@ -12,7 +12,7 @@ osd_reused_id = """
 
 
 osd_component = """
-  {_type: <15} {path: <55} {size: <15} {percent:.2f}%"""
+  {_type: <15} {path: <55} {size: <15} {percent:.2%}%"""
 
 
 total_osds = """