]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume: batch - major refactor
authorJan Fajerski <jfajerski@suse.com>
Mon, 27 Apr 2020 09:47:04 +0000 (11:47 +0200)
committerJan Fajerski <jfajerski@suse.com>
Fri, 25 Sep 2020 09:49:44 +0000 (11:49 +0200)
This completely refactors the batch code in order to make use of the
create/prepare code path for creating OSDs instead of having a second
code path doing this. This not only eases the maintenance burden but
also adds various features and fixes bugs. This subcommand can now
handle LVs, replace OSDs, reuse VGs and has a better notion of
idempotency.

Signed-off-by: Jan Fajerski <jfajerski@suse.com>
13 files changed:
src/ceph-volume/ceph_volume/devices/lvm/batch.py
src/ceph-volume/ceph_volume/devices/lvm/prepare.py
src/ceph-volume/ceph_volume/devices/lvm/strategies/__init__.py [deleted file]
src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py [deleted file]
src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py [deleted file]
src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py [deleted file]
src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py [deleted file]
src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py [deleted file]
src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py [deleted file]
src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py [deleted file]
src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py [deleted file]
src/ceph-volume/ceph_volume/util/prepare.py
src/ceph-volume/ceph_volume/util/templates.py

index 4f9db9b996b9275d5b6bcde5361d901b47c86698..4f646f9adfce93d07571dd4b7f566728b3771462 100644 (file)
@@ -1,11 +1,15 @@
 import argparse
+import json
 import logging
 import json
 from textwrap import dedent
 from ceph_volume import terminal, decorators
-from ceph_volume.util import prompt_bool
-from ceph_volume.util import arg_validators
-from . import strategies
+from ceph_volume.api.lvm import Volume
+from ceph_volume.util import disk, prompt_bool, arg_validators, templates
+from ceph_volume.util import prepare
+from . import common
+from .create import Create
+from .prepare import Prepare
 
 mlogger = terminal.MultiLogger(__name__)
 logger = logging.getLogger(__name__)
@@ -26,100 +30,6 @@ def device_formatter(devices):
     return ''.join(lines)
 
 
-# Scenario filtering/detection
-def bluestore_single_type(device_facts):
-    """
-    Detect devices that are just HDDs or solid state so that a 1:1
-    device-to-osd provisioning can be done
-    """
-    types = [device.rotational for device in device_facts]
-    if len(set(types)) == 1:
-        return strategies.bluestore.SingleType
-
-
-def bluestore_mixed_type(device_facts):
-    """
-    Detect if devices are HDDs as well as solid state so that block.db can be
-    placed in solid devices while data is kept in the spinning drives.
-    """
-    types = [device.rotational for device in device_facts]
-    if len(set(types)) > 1:
-        return strategies.bluestore.MixedType
-
-
-def filestore_single_type(device_facts):
-    """
-    Detect devices that are just HDDs or solid state so that a 1:1
-    device-to-osd provisioning can be done, keeping the journal on the OSD
-    """
-    types = [device.rotational for device in device_facts]
-    if len(set(types)) == 1:
-        return strategies.filestore.SingleType
-
-
-def filestore_mixed_type(device_facts):
-    """
-    Detect if devices are HDDs as well as solid state so that the journal can be
-    placed in solid devices while data is kept in the spinning drives.
-    """
-    types = [device.rotational for device in device_facts]
-    if len(set(types)) > 1:
-        return strategies.filestore.MixedType
-
-
-def get_strategy(args, devices):
-    """
-    Given a set of devices as input, go through the different detection
-    mechanisms to narrow down on a strategy to use. The strategies are 4 in
-    total:
-
-    * Single device type on Bluestore
-    * Mixed device types on Bluestore
-    * Single device type on Filestore
-    * Mixed device types on Filestore
-
-    When the function matches to a scenario it returns the strategy class. This
-    allows for dynamic loading of the conditions needed for each scenario, with
-    normalized classes
-    """
-    bluestore_strategies = [bluestore_mixed_type, bluestore_single_type]
-    filestore_strategies = [filestore_mixed_type, filestore_single_type]
-    if args.bluestore:
-        strategies = bluestore_strategies
-    else:
-        strategies = filestore_strategies
-
-    for strategy in strategies:
-        backend = strategy(devices)
-        if backend:
-            return backend
-
-
-def filter_devices(args):
-    unused_devices = [device for device in args.devices if not device.used_by_ceph]
-    # only data devices, journals can be reused
-    used_devices = [device.abspath for device in args.devices if device.used_by_ceph]
-    filtered_devices = {}
-    if used_devices:
-        for device in used_devices:
-            filtered_devices[device] = {"reasons": ["Used by ceph as a data device already"]}
-        logger.info("Ignoring devices already used by ceph: %s" % ", ".join(used_devices))
-    if len(unused_devices) == 1:
-        last_device = unused_devices[0]
-        if not last_device.rotational and last_device.is_lvm_member:
-            if last_device.lvs:
-                reason = "Used by ceph as a %s already and there are no devices left for data/block" % (
-                    last_device.lvs[0].tags.get("ceph.type"),
-                )
-            else:
-                reason = "Disk is an LVM member already, skipping"
-            filtered_devices[last_device.abspath] = {"reasons": [reason]}
-            logger.info(reason + ": %s" % last_device.abspath)
-            unused_devices = []
-
-    return unused_devices, filtered_devices
-
-
 class Batch(object):
 
     help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
@@ -131,6 +41,7 @@ class Batch(object):
 
         ceph-volume lvm batch [DEVICE...]
 
+    Devices can be physical block devices or LVs.
     Optional reporting on possible outcomes is enabled with --report
 
         ceph-volume lvm batch --report [DEVICE...]
@@ -202,7 +113,7 @@ class Batch(object):
             '--format',
             help='output format, defaults to "pretty"',
             default='pretty',
-            choices=['json', 'pretty'],
+            choices=['json', 'json-pretty', 'pretty'],
         )
         parser.add_argument(
             '--dmcrypt',
@@ -226,21 +137,43 @@ class Batch(object):
             default=1,
             help='Provision more than 1 (the default) OSD per device',
         )
+        parser.add_argument(
+            '--data-slots',
+            type=int,
+            help=('Provision more than 1 (the default) OSD slot per device'
+                  ' if more slots then osds-per-device are specified, slots'
+                  'will stay unoccupied'),
+        )
         parser.add_argument(
             '--block-db-size',
             type=int,
             help='Set (or override) the "bluestore_block_db_size" value, in bytes'
         )
+        parser.add_argument(
+            '--block-db-slots',
+            type=int,
+            help='Provision slots on DB device, can remain unoccupied'
+        )
         parser.add_argument(
             '--block-wal-size',
             type=int,
             help='Set (or override) the "bluestore_block_wal_size" value, in bytes'
         )
+        parser.add_argument(
+            '--block-wal-slots',
+            type=int,
+            help='Provision slots on WAL device, can remain unoccupied'
+        )
         parser.add_argument(
             '--journal-size',
             type=int,
             help='Override the "osd_journal_size" value, in megabytes'
         )
+        parser.add_argument(
+            '--journal-slots',
+            type=int,
+            help='Provision slots on journal device, can remain unoccupied'
+        )
         parser.add_argument(
             '--prepare',
             action='store_true',
@@ -257,39 +190,33 @@ class Batch(object):
         for dev_list in ['', 'db_', 'wal_', 'journal_']:
             setattr(self, '{}usable'.format(dev_list), [])
 
-    def report(self):
-        if self.args.format == 'pretty':
-            self.strategy.report_pretty(self.filtered_devices)
-        elif self.args.format == 'json':
-            self.strategy.report_json(self.filtered_devices)
+    def report(self, plan):
+        if self.args.format == 'json':
+            print(json.dumps([osd.report_json() for osd in plan]))
+        elif self.args.format == 'json-pretty':
+            print(json.dumps([osd.report_json() for osd in plan], indent=4,
+                       sort_keys=True))
         else:
-            raise RuntimeError('report format must be "pretty" or "json"')
-
-    def execute(self):
-        if not self.args.yes:
-            self.strategy.report_pretty(self.filtered_devices)
-            terminal.info('The above OSDs would be created if the operation continues')
-            if not prompt_bool('do you want to proceed? (yes/no)'):
-                devices = ','.join([device.abspath for device in self.args.devices])
-                terminal.error('aborting OSD provisioning for %s' % devices)
-                raise SystemExit(0)
-
-        self.strategy.execute()
-
-    def _get_strategy(self):
-        strategy = get_strategy(self.args, self.args.devices)
-        unused_devices, self.filtered_devices = filter_devices(self.args)
-        if not unused_devices and not self.args.format == 'json':
-            # report nothing changed
-            mlogger.info("All devices are already used by ceph. No OSDs will be created.")
-            raise SystemExit(0)
-        else:
-            new_strategy = get_strategy(self.args, unused_devices)
-            if new_strategy and strategy != new_strategy:
-                mlogger.error("Aborting because strategy changed from %s to %s after filtering" % (strategy.type(), new_strategy.type()))
-                raise SystemExit(1)
-
-        self.strategy = strategy.with_auto_devices(self.args, unused_devices)
+            report = ''
+            report += templates.total_osds.format(total_osds=len(plan))
+
+            report += templates.osd_component_titles
+            for osd in plan:
+                report += templates.osd_header
+                report += osd.report()
+
+            print(report)
+
+    def _check_slot_args(self):
+        if self.args.data_slots and self.args.osds_per_device:
+            if self.args.data_slots < self.args.osds_per_device:
+                raise ValueError('data_slots is smaller then osds_per_device')
+        # TODO this needs more thought.
+        # for slot in ['block_db_slots', 'block_wal_slots', 'journal_slots']:
+        #     slot_value = getattr(self.args, slot, None)
+        #     if slot_value:
+        #         if slot_value < len(self.args.devices):
+        #             raise ValueError('{} is smaller then osds_per_device')
 
     @decorators.needs_root
     def main(self):
@@ -301,79 +228,266 @@ class Batch(object):
         if not self.args.bluestore and not self.args.filestore:
             self.args.bluestore = True
 
-        if (self.args.no_auto or self.args.db_devices or
-                                  self.args.journal_devices or
-                                  self.args.wal_devices):
-            self._get_explicit_strategy()
-        else:
-            self._get_strategy()
+        # TODO add device sorter for when the user wants legacy auto_detect
+        # behaviour
+
+        self._check_slot_args()
+
+        self._ensure_disjoint_device_lists(self.args.devices,
+                                           self.args.db_devices,
+                                           self.args.wal_devices,
+                                           self.args.journal_devices)
+
+        plan = self.get_plan(self.args)
 
         if self.args.report:
-            self.report()
-        else:
-            self.execute()
-
-    def _get_explicit_strategy(self):
-        self._filter_devices()
-        self._ensure_disjoint_device_lists()
-        if self.args.bluestore:
-            if self.db_usable or self.wal_usable:
-                self.strategy = strategies.bluestore.MixedType(
-                    self.args,
-                    self.usable,
-                    self.db_usable,
-                    self.wal_usable)
-            else:
-                self.strategy = strategies.bluestore.SingleType(
-                    self.args,
-                    self.usable)
-        else:
-            if self.journal_usable:
-                self.strategy = strategies.filestore.MixedType(
-                    self.args,
-                    self.usable,
-                    self.journal_usable)
+            self.report(plan)
+            return 0
+
+        if not self.args.yes:
+            self.report(plan)
+            terminal.info('The above OSDs would be created if the operation continues')
+            if not prompt_bool('do you want to proceed? (yes/no)'):
+                terminal.error('aborting OSD provisioning')
+                raise SystemExit(0)
+
+        self._execute(plan)
+
+    def _execute(self, plan):
+        defaults = common.get_default_args()
+        global_args = [
+            'bluestore',
+            'filestore',
+            'dmcrypt',
+            'crush_device_class',
+            'no_systemd',
+        ]
+        defaults.update({arg: getattr(self.args, arg) for arg in global_args})
+        for osd in plan:
+            args = osd.get_args(defaults)
+            if self.args.prepare:
+                p = Prepare([])
+                p.prepare(argparse.Namespace(**args))
             else:
-                self.strategy = strategies.filestore.SingleType(
-                    self.args,
-                    self.usable)
+                c = Create([])
+                c.create(argparse.Namespace(**args))
 
 
-    def _filter_devices(self):
-        # filter devices by their available property.
-        # TODO: Some devices are rejected in the argparser already. maybe it
-        # makes sense to unifiy this
-        used_reason = {"reasons": ["Used by ceph already"]}
-        self.filtered_devices = {}
-        for dev_list in ['', 'db_', 'wal_', 'journal_']:
-            dev_list_prop = '{}devices'.format(dev_list)
-            if hasattr(self.args, dev_list_prop):
-                usable_dev_list_prop = '{}usable'.format(dev_list)
-                devs = getattr(self.args, dev_list_prop)
-                usable = [d for d in devs if d.available]
-                setattr(self, usable_dev_list_prop, usable)
-                self.filtered_devices.update({d: used_reason for d in
-                                              getattr(self.args, dev_list_prop)
-                                              if d.used_by_ceph})
-                # only fail if non-interactive, this iteration concerns
-                # non-data devices, there are usable data devices (or not all
-                # data devices were filtered) and non-data devices were filtered
-                # so in short this branch is not taken if all data devices are
-                # filtered
-                if self.args.yes and dev_list and self.usable and devs != usable:
-                    err = '{} devices were filtered in non-interactive mode, bailing out'
-                    if self.args.format == "json" and self.args.report:
-                        # if a json report is requested, report unchanged so idempotency checks
-                        # in ceph-ansible will work
-                        print(json.dumps({"changed": False, "osds": [], "vgs": []}))
-                        raise SystemExit(0)
-                    raise RuntimeError(err.format(len(devs) - len(usable)))
-
-
-    def _ensure_disjoint_device_lists(self):
+
+    def _ensure_disjoint_device_lists(self, data, db=[], wal=[], journal=[]):
         # check that all device lists are disjoint with each other
-        if not(set(self.usable).isdisjoint(set(self.db_usable)) and
-               set(self.usable).isdisjoint(set(self.wal_usable)) and
-               set(self.usable).isdisjoint(set(self.journal_usable)) and
-               set(self.db_usable).isdisjoint(set(self.wal_usable))):
+        if not(set(data).isdisjoint(set(db)) and
+               set(data).isdisjoint(set(wal)) and
+               set(data).isdisjoint(set(journal)) and
+               set(db).isdisjoint(set(wal))):
             raise Exception('Device lists are not disjoint')
+
+    def get_plan(self, args):
+        if args.bluestore:
+            plan = self.get_deployment_layout(args, args.devices, args.db_devices,
+                                              args.wal_devices)
+        elif args.filestore:
+            plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
+        return plan
+
+    def get_deployment_layout(self, args, devices, fast_devices=[],
+                              very_fast_devices=[]):
+        plan = []
+        phys_devs = [d for d in devices if d.is_device]
+        lvm_devs = [d.lvs[0] for d in list(set(devices) -
+                                           set(phys_devs))]
+        mlogger.debug(('passed data_devices: {} physical, {}'
+                       ' LVM').format(len(phys_devs), len(lvm_devs)))
+        data_slots = args.osds_per_device
+        if args.data_slots:
+            data_slots = max(args.data_slots, args.osds_per_device)
+        rel_data_size = 100 / data_slots
+        mlogger.debug('relative data size: {}'.format(rel_data_size))
+
+        for dev in phys_devs:
+            if dev.available_lvm:
+                dev_size = dev.vg_size[0]
+                abs_size = disk.Size(b=int(dev_size * rel_data_size / 100))
+                free_size = dev.vg_free[0]
+                if abs_size < 419430400:
+                    mlogger.error('Data LV on {} would be too small (<400M)'.format(dev.path))
+                    continue
+                for _ in range(args.osds_per_device):
+                    if abs_size > free_size:
+                        break
+                    free_size -= abs_size.b
+                    osd_id = None
+                    if args.osd_ids:
+                        osd_id = args.osd_ids.pop()
+                    osd = self.OSD(dev.path,
+                                   rel_data_size,
+                                   abs_size,
+                                   args.osds_per_device,
+                                   osd_id)
+                    plan.append(osd)
+        for dev in lvm_devs:
+            if dev.used_by_ceph:
+                continue
+            osd_id = None
+            if args.osd_ids:
+                osd_id = args.osd_ids.pop()
+            osd = self.OSD("{}/{}".format(dev.vg_name, dev.lv_name),
+                           100.0,
+                           disk.Size(b=int(dev.lv_size)),
+                           1,
+                           osd_id)
+            plan.append(osd)
+
+        num_osds = len(plan)
+        requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
+
+        fast_type = 'block_db' if args.bluestore else 'journal'
+        fast_allocations = self.fast_allocations(fast_devices, requested_osds,
+                                                 fast_type)
+        very_fast_allocations = self.fast_allocations(very_fast_devices,
+                                                      requested_osds, 'block_wal')
+        if fast_devices:
+            if not fast_allocations:
+                mlogger.info('{} fast devices were passed, but none are available'.format(len(fast_devices)))
+                exit(0)
+            assert len(fast_allocations) == num_osds, '{} fast allocations != {} num_osds'.format(fast_allocations, num_osds)
+        if very_fast_devices:
+            if not very_fast_allocations:
+                mlogger.info('{} very fast devices were passed, but none are available'.format(len(very_fast_devices)))
+                exit(0)
+            assert len(very_fast_allocations) == num_osds, '{} fast allocations != {} num_osds'.format(very_fast_allocations, num_osds)
+
+        for osd in plan:
+            if fast_devices:
+                type_ = 'block.db'
+                if args.filestore:
+                    type_ = 'journal'
+                osd.add_fast_device(*fast_allocations.pop(),
+                                    type_=type_)
+            if very_fast_devices:
+                assert args.bluestore and not args.filestore, 'filestore does not support wal devices'
+                osd.add_very_fast_device(*very_fast_allocations.pop(),
+                                        type_='block.wal')
+        return plan
+
+    def fast_allocations(self, devices, num_osds, type_):
+        ret = []
+        if not devices:
+            return ret
+        phys_devs = [d for d in devices if d.is_device]
+        lvm_devs = [d.lvs[0] for d in list(set(devices) -
+                                           set(phys_devs))]
+        ret.extend(
+            [("{}/{}".format(d.vg_name, d.lv_name), 100.0, disk.Size(b=int(d.lv_size)), 1) for d in
+                    lvm_devs if not d.used_by_ceph])
+        if (num_osds - len(lvm_devs)) % len(phys_devs):
+            used_slots = int((num_osds - len(lvm_devs)) / len(phys_devs)) + 1
+        else:
+            used_slots = int((num_osds - len(lvm_devs)) / len(phys_devs))
+
+        requested_slots = getattr(self.args, '{}_slots'.format(type_))
+        if not requested_slots or requested_slots < used_slots:
+            if requested_slots:
+                mlogger.info('{}_slots argument is to small, ignoring'.format(type_))
+            requested_slots = used_slots
+        requested_size = getattr(self.args, '{}_size'.format(type_), 0)
+        if requested_size == 0:
+            # no size argument was specified, check ceph.conf
+            get_size_fct = getattr(prepare, 'get_{}_size'.format(type_))
+            requested_size = get_size_fct(lv_format=False)
+
+        available = [d for d in phys_devs if d.available_lvm]
+        for dev in available:
+            # TODO this only looks at the first vg on device
+            dev_size = dev.vg_size[0]
+            abs_size = disk.Size(b=int(dev_size / requested_slots))
+            free_size = dev.vg_free[0]
+            if abs_size < 419430400:
+                mlogger.error('{} LV on {} would be too small (<400M)'.format(
+                    type_, dev.path))
+                continue
+            relative_size = int(abs_size) / dev_size * 100
+            if requested_size:
+                if requested_size <= abs_size:
+                    abs_size = requested_size
+                else:
+                    mlogger.error(
+                        '{} was requested for {}, but only {} can be fulfilled'.format(
+                            requested_size,
+                            '{}_size'.format(type_),
+                            abs_size,
+                        ))
+                    exit(1)
+            for _ in range(used_slots):
+                if abs_size > free_size:
+                    break
+                free_size -= abs_size.b
+                ret.append((dev.path, relative_size, abs_size, requested_slots))
+        return ret
+
+    class OSD(object):
+        def __init__(self, data_path, rel_size, abs_size, slots, id_):
+            self.id_ = id_
+            self.data = (data_path, rel_size, abs_size, slots)
+            self.fast = None
+            self.very_fast = None
+
+        def add_fast_device(self, path, rel_size, abs_size, slots, type_):
+            self.fast = (path, rel_size, abs_size, slots, type_)
+
+        def add_very_fast_device(self, path, rel_size, abs_size, slots, type_):
+            self.very_fast = (path, rel_size, abs_size, slots, type_)
+
+        def _get_osd_plan(self):
+            plan = {
+                'data': self.data[0],
+                'data_size': self.data[2]
+            }
+            if self.fast:
+                type_ = self.fast[4].replace('.', '_')
+                plan.update(
+                    {
+                        type_: self.fast[0],
+                        '{}_size'.format(type_): self.fast[2],
+                    })
+            if self.very_fast:
+                plan.update(
+                    {
+                        'block_wal': self.very_fast[0],
+                        'block_wal_size': self.very_fast[2],
+                    })
+            if self.id_:
+                plan.update({'osd_id': self.osd_id})
+            return plan
+
+        def get_args(self, defaults):
+            my_defaults = defaults.copy()
+            my_defaults.update(self._get_osd_plan())
+            return my_defaults
+
+        def report(self):
+            report = ''
+            if self.id_:
+                report += templates.osd_reused_id.format(
+                    id_=self.id_)
+            report += templates.osd_component.format(
+                _type='data',
+                path=self.data[0],
+                size=self.data[2],
+                percent=self.data[1])
+            if self.fast:
+                report += templates.osd_component.format(
+                    _type=self.fast[4],
+                    path=self.fast[0],
+                    size=self.fast[2],
+                    percent=self.fast[1])
+            if self.very_fast:
+                report += templates.osd_component.format(
+                    _type=self.very_fast[4],
+                    path=self.very_fast[0],
+                    size=self.very_fast[2],
+                    percent=self.very_fast[1])
+            return report
+
+        def report_json(self):
+            return self._get_osd_plan()
index fdda66646a8848ba163e9772c32e796cf8ad535b..a951546a879a7e8e7a8c13eab7251fe578407c32 100644 (file)
@@ -174,6 +174,8 @@ class Prepare(object):
                                  'block_{}_slots'.format(device_type),
                                  1),
             }
+            #TODO use get_block_db_size and co here to get configured size in
+            #conf file
             if size != 0:
                 kwargs['size'] = size
             lv = api.create_lv(
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/__init__.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/__init__.py
deleted file mode 100644 (file)
index cb16d11..0000000
+++ /dev/null
@@ -1 +0,0 @@
-from . import bluestore, filestore # noqa
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/bluestore.py
deleted file mode 100644 (file)
index cdaabbe..0000000
+++ /dev/null
@@ -1,539 +0,0 @@
-from __future__ import print_function
-from ceph_volume.util import disk, prepare, str_to_int
-from ceph_volume.api import lvm
-from . import validators
-from .strategies import Strategy
-from .strategies import MixedStrategy
-from ceph_volume.devices.lvm.create import Create
-from ceph_volume.devices.lvm.prepare import Prepare
-from ceph_volume.util import templates, system
-from ceph_volume.exceptions import SizeAllocationError
-
-
-class SingleType(Strategy):
-    """
-    Support for all SSDs, or all HDDS
-    """
-
-    def __init__(self, args, data_devs):
-        super(SingleType, self).__init__(args, data_devs)
-        self.validate_compute()
-
-    @classmethod
-    def with_auto_devices(cls, args, devices):
-        #SingleType only deploys standalone OSDs
-        return cls(args, devices)
-
-    @staticmethod
-    def type():
-        return "bluestore.SingleType"
-
-    def report_pretty(self, filtered_devices):
-        string = ""
-        if filtered_devices:
-            string += templates.filtered_devices(filtered_devices)
-        string += templates.total_osds.format(
-            total_osds=self.total_osds,
-        )
-        string += templates.osd_component_titles
-
-        for osd in self.computed['osds']:
-            string += templates.osd_header
-            if 'osd_id' in osd:
-                string += templates.osd_reused_id.format(
-                    id_=osd['osd_id'])
-            string += templates.osd_component.format(
-                _type='[data]',
-                path=osd['data']['path'],
-                size=osd['data']['human_readable_size'],
-                percent=osd['data']['percentage'],
-            )
-
-        print(string)
-
-    def validate(self):
-        """
-        Ensure that the minimum requirements for this type of scenario is
-        met, raise an error if the provided devices would not work
-        """
-        # validate minimum size for all devices
-        validators.minimum_device_size(
-            self.data_devs, osds_per_device=self.osds_per_device
-        )
-
-        # make sure that data devices do not have any LVs
-        validators.no_lvm_membership(self.data_devs)
-
-        if self.osd_ids:
-            self._validate_osd_ids()
-
-    def compute(self):
-        """
-        Go through the rules needed to properly size the lvs, return
-        a dictionary with the result
-        """
-        osds = self.computed['osds']
-        for device in self.data_devs:
-            extents = lvm.sizing(device.lvm_size.b, parts=self.osds_per_device)
-            for _i in range(self.osds_per_device):
-                osd = {'data': {}, 'block.db': {}}
-                osd['data']['path'] = device.abspath
-                osd['data']['size'] = extents['sizes']
-                osd['data']['parts'] = extents['parts']
-                osd['data']['percentage'] = 100 / self.osds_per_device
-                osd['data']['human_readable_size'] = str(disk.Size(gb=extents['sizes']))
-                osds.append(osd)
-
-        self.computed['changed'] = len(osds) > 0
-
-    def execute(self):
-        """
-        Create vgs/lvs from the incoming set of devices, assign their roles
-        (block, block.db, block.wal, etc..) and offload the OSD creation to
-        ``lvm create``
-        """
-        osd_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
-
-        # create the vgs first, mapping them to the device path
-        for osd in self.computed['osds']:
-            vg = osd_vgs.get(osd['data']['path'])
-            if not vg:
-                vg = lvm.create_vg(osd['data']['path'])
-                osd_vgs[osd['data']['path']] = {'vg': vg, 'parts': osd['data']['parts']}
-
-        # create the lvs from the vgs captured in the beginning
-        for create in osd_vgs.values():
-            lvs = lvm.create_lvs(create['vg'], parts=create['parts'], name_prefix='osd-data')
-            vg_name = create['vg'].name
-            for lv in lvs:
-                command = ['--bluestore', '--data']
-                command.append('%s/%s' % (vg_name, lv.name))
-                if self.args.dmcrypt:
-                    command.append('--dmcrypt')
-                if self.args.no_systemd:
-                    command.append('--no-systemd')
-                if self.args.crush_device_class:
-                    command.extend(['--crush-device-class', self.args.crush_device_class])
-
-                if self.osd_ids:
-                    command.extend(['--osd-id', self.osd_ids.pop(0)])
-
-                if self.args.prepare:
-                    Prepare(command).main()
-                else:
-                    Create(command).main()
-
-
-class MixedType(MixedStrategy):
-
-    def __init__(self, args, data_devs, db_devs, wal_devs=[]):
-        super(MixedType, self).__init__(args, data_devs, db_devs, wal_devs)
-        self.block_db_size = self.get_block_db_size()
-        self.block_wal_size = self.get_block_wal_size()
-        self.common_vg = None
-        self.common_wal_vg = None
-        self.dbs_needed = len(self.data_devs) * self.osds_per_device
-        self.wals_needed = self.dbs_needed
-        self.use_large_block_db = self.use_large_block_wal = False
-        self.validate_compute()
-
-    @classmethod
-    def with_auto_devices(cls, args, devices):
-        data_devs, db_devs = cls.split_devices_rotational(devices)
-        return cls(args, data_devs, db_devs)
-
-    @staticmethod
-    def type():
-        return "bluestore.MixedType"
-
-    def get_block_db_size(self):
-        if self.args.block_db_size:
-            return disk.Size(b=self.args.block_db_size)
-        else:
-            return prepare.get_block_db_size(lv_format=False) or disk.Size(b=0)
-
-    def get_block_wal_size(self):
-        if self.args.block_wal_size:
-            return disk.Size(b=self.args.block_wal_size)
-        else:
-            return prepare.get_block_wal_size(lv_format=False) or disk.Size(b=0)
-
-    def report_pretty(self, filtered_devices):
-        string = ""
-        if filtered_devices:
-            string += templates.filtered_devices(filtered_devices)
-        string += templates.total_osds.format(
-            total_osds=len(self.data_devs) * self.osds_per_device
-        )
-
-        if self.db_or_journal_devs:
-            vg_extents = lvm.sizing(self.total_available_db_space.b, parts=self.dbs_needed)
-            db_size = str(disk.Size(b=(vg_extents['sizes'])))
-
-            string += templates.ssd_volume_group.format(
-                target='block.db',
-                total_lv_size=str(self.total_available_db_space),
-                total_lvs=vg_extents['parts'] * self.osds_per_device,
-                block_lv_size=db_size,
-                block_db_devices=', '.join([ssd.abspath for ssd in
-                                            self.db_or_journal_devs]),
-                lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))),
-                total_osds=len(self.data_devs)
-            )
-
-        if self.wal_devs:
-            wal_vg_extents = lvm.sizing(self.total_available_wal_space.b,
-                                        parts=self.wals_needed)
-            wal_size = str(disk.Size(b=(wal_vg_extents['sizes'])))
-            string += templates.ssd_volume_group.format(
-                target='block.wal',
-                total_lv_size=str(self.total_available_wal_space),
-                total_lvs=wal_vg_extents['parts'] * self.osds_per_device,
-                block_lv_size=wal_size,
-                block_db_devices=', '.join([dev.abspath for dev in
-                                            self.wal_devs]),
-                lv_size=self.block_wal_size or str(disk.Size(b=(wal_vg_extents['sizes']))),
-                total_osds=len(self.data_devs)
-            )
-
-        string += templates.osd_component_titles
-        for osd in self.computed['osds']:
-            string += templates.osd_header
-            if 'osd_id' in osd:
-                string += templates.osd_reused_id.format(
-                    id_=osd['osd_id'])
-            string += templates.osd_component.format(
-                _type='[data]',
-                path=osd['data']['path'],
-                size=osd['data']['human_readable_size'],
-                percent=osd['data']['percentage'])
-
-            if 'block.db' in osd:
-                string += templates.osd_component.format(
-                    _type='[block.db]',
-                    path=osd['block.db']['path'],
-                    size=osd['block.db']['human_readable_size'],
-                    percent=osd['block.db']['percentage'])
-
-            if 'block.wal' in osd:
-                string += templates.osd_component.format(
-                    _type='[block.wal]',
-                    path=osd['block.wal']['path'],
-                    size=osd['block.wal']['human_readable_size'],
-                    percent=osd['block.wal']['percentage'])
-
-        print(string)
-
-    def compute(self):
-        osds = self.computed['osds']
-
-        if self.data_devs and self.db_or_journal_devs:
-            if not self.common_vg:
-                # there isn't a common vg, so a new one must be created with all
-                # the blank db devs
-                self.computed['vg'] = {
-                    'devices': ", ".join([ssd.abspath for ssd in self.blank_db_devs]),
-                    'parts': self.dbs_needed,
-                    'percentages': self.vg_extents['percentages'],
-                    'sizes': self.block_db_size.b.as_int(),
-                    'size': self.total_blank_db_dev_size.b.as_int(),
-                    'human_readable_sizes': str(self.block_db_size),
-                    'human_readable_size': str(self.total_available_db_space),
-                }
-                vg_name = 'vg/lv'
-            else:
-                vg_name = self.common_vg.name
-
-        if self.data_devs and self.wal_devs:
-            if not self.common_wal_vg:
-                # there isn't a common vg, so a new one must be created with all
-                # the blank wal devs
-                self.computed['wal_vg'] = {
-                    'devices': ", ".join([dev.abspath for dev in self.blank_wal_devs]),
-                    'parts': self.wals_needed,
-                    'percentages': self.wal_vg_extents['percentages'],
-                    'sizes': self.block_wal_size.b.as_int(),
-                    'size': self.total_blank_wal_dev_size.b.as_int(),
-                    'human_readable_sizes': str(self.block_wal_size),
-                    'human_readable_size': str(self.total_available_wal_space),
-                }
-                wal_vg_name = 'vg/lv'
-            else:
-                wal_vg_name = self.common_wal_vg.name
-
-        for device in self.data_devs:
-            for hdd in range(self.osds_per_device):
-                osd = {'data': {}}
-                osd['data']['path'] = device.abspath
-                osd['data']['size'] = device.lvm_size.b / self.osds_per_device
-                osd['data']['percentage'] = 100 / self.osds_per_device
-                osd['data']['human_readable_size'] = str(
-                    disk.Size(b=device.lvm_size.b) / self.osds_per_device
-                )
-
-                if self.db_or_journal_devs:
-                    osd['block.db'] = {}
-                    osd['block.db']['path'] = 'vg: %s' % vg_name
-                    osd['block.db']['size'] = int(self.block_db_size.b)
-                    osd['block.db']['human_readable_size'] = str(self.block_db_size)
-                    osd['block.db']['percentage'] = self.vg_extents['percentages']
-
-                if self.wal_devs:
-                    osd['block.wal'] = {}
-                    osd['block.wal']['path'] = 'vg: %s' % wal_vg_name
-                    osd['block.wal']['size'] = int(self.block_wal_size.b)
-                    osd['block.wal']['human_readable_size'] = str(self.block_wal_size)
-                    osd['block.wal']['percentage'] = self.wal_vg_extents['percentages']
-
-                if self.osd_ids:
-                    osd['osd_id'] = self.osd_ids.pop(0)
-
-                osds.append(osd)
-
-        self.computed['changed'] = len(osds) > 0
-
-    def execute(self):
-        """
-        Create vgs/lvs from the incoming set of devices, assign their roles
-        (block, block.db, block.wal, etc..) and offload the OSD creation to
-        ``lvm create``
-        """
-        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
-
-        # create 1 vg per data device first, mapping them to the device path,
-        # when the lv gets created later, it can create as many as needed (or
-        # even just 1)
-        for osd in self.computed['osds']:
-            vg = data_vgs.get(osd['data']['path'])
-            if not vg:
-                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
-                data_vgs[osd['data']['path']] = vg
-
-        if self.data_devs and self.db_or_journal_devs:
-            blank_db_dev_paths = [d.abspath for d in self.blank_db_devs]
-
-            # no common vg is found, create one with all the blank SSDs
-            if not self.common_vg:
-                db_vg = lvm.create_vg(blank_db_dev_paths, name_prefix='ceph-block-dbs')
-            elif self.common_vg and blank_db_dev_paths:
-                # if a common vg exists then extend it with any blank ssds
-                db_vg = lvm.extend_vg(self.common_vg, blank_db_dev_paths)
-            else:
-                # one common vg with nothing else to extend can be used directly,
-                # either this is one device with one vg, or multiple devices with the
-                # same vg
-                db_vg = self.common_vg
-
-            if self.use_large_block_db:
-                # make the block.db lvs as large as possible
-                vg_free_count = str_to_int(db_vg.vg_free_count)
-                db_lv_extents = int(vg_free_count / self.dbs_needed)
-            else:
-                db_lv_extents = db_vg.sizing(size=self.block_db_size.gb.as_int())['extents']
-
-        if self.data_devs and self.wal_devs:
-            blank_wal_dev_paths = [d.abspath for d in self.blank_wal_devs]
-
-            if not self.common_wal_vg:
-                wal_vg = lvm.create_vg(blank_wal_dev_paths,
-                                      name_prefix='ceph-block-wals')
-            elif self.common_wal_vg and blank_wal_dev_paths:
-                wal_vg = lvm.extend_vg(self.common_wal_vg, blank_wal_dev_paths)
-            else:
-                wal_vg = self.common_wal_vg
-
-            if self.use_large_block_wal:
-                # make the block.db lvs as large as possible
-                vg_free_count = str_to_int(wal_vg.vg_free_count)
-                wal_lv_extents = int(vg_free_count / self.wals_needed)
-            else:
-                wal_lv_extents = wal_vg.sizing(size=self.block_wal_size.gb.as_int())['extents']
-
-        # create the data lvs, and create the OSD with an lv from the common
-        # block.db vg from before
-        for osd in self.computed['osds']:
-            data_path = osd['data']['path']
-            data_vg = data_vgs[data_path]
-            data_lv_extents = data_vg.sizing(parts=self.osds_per_device)['extents']
-            data_uuid = system.generate_uuid()
-            data_lv = lvm.create_lv(
-                'osd-block', data_uuid, vg=data_vg, extents=data_lv_extents)
-            command = [
-                '--bluestore',
-                '--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
-            ]
-            if 'block.db' in osd:
-                db_uuid = system.generate_uuid()
-                db_lv = lvm.create_lv(
-                    'osd-block-db', db_uuid, vg=db_vg, extents=db_lv_extents)
-                command.extend([ '--block.db',
-                                '{}/{}'.format(db_lv.vg_name, db_lv.name)])
-            if 'block.wal' in osd:
-                wal_uuid = system.generate_uuid()
-                wal_lv = lvm.create_lv(
-                    'osd-block-wal', wal_uuid, vg=wal_vg, extents=wal_lv_extents)
-                command.extend(
-                    ['--block.wal',
-                     '{}/{}'.format(wal_lv.vg_name, wal_lv.name)
-                    ])
-            if self.args.dmcrypt:
-                command.append('--dmcrypt')
-            if self.args.no_systemd:
-                command.append('--no-systemd')
-            if self.args.crush_device_class:
-                command.extend(['--crush-device-class', self.args.crush_device_class])
-            if 'osd_id' in osd:
-                command.extend(['--osd-id', osd['osd_id']])
-
-            if self.args.prepare:
-                Prepare(command).main()
-            else:
-                Create(command).main()
-
-    def validate(self):
-        """
-        HDDs represent data devices, and solid state devices are for block.db,
-        make sure that the number of data devices would have enough LVs and
-        those LVs would be large enough to accommodate a block.db
-        """
-        # validate minimum size for all devices
-        validators.minimum_device_size(self.data_devs + self.db_or_journal_devs,
-                                       osds_per_device=self.osds_per_device)
-        validators.minimum_device_size(self.wal_devs,
-                                      osds_per_device=self.osds_per_device,
-                                      min_size=1)
-
-        # make sure that data devices do not have any LVs
-        validators.no_lvm_membership(self.data_devs)
-
-        if self.data_devs and self.db_or_journal_devs:
-            self._validate_db_devs()
-
-        if self.data_devs and self.wal_devs:
-            self._validate_wal_devs()
-
-        if self.osd_ids:
-            self._validate_osd_ids()
-
-    def _validate_db_devs(self):
-        # do not allow non-common VG to continue
-        validators.has_common_vg(self.db_or_journal_devs)
-
-        # find the common VG to calculate how much is available
-        self.common_vg = self.get_common_vg(self.db_or_journal_devs)
-
-        # find how many block.db LVs are possible from the common VG
-        if self.common_vg:
-            common_vg_size = disk.Size(gb=self.common_vg.free)
-        else:
-            common_vg_size = disk.Size(gb=0)
-
-        # non-VG SSDs
-        vg_members = set([d for d in self.db_or_journal_devs if d.is_lvm_member])
-        self.blank_db_devs = set(self.db_or_journal_devs).difference(vg_members)
-        self.total_blank_db_dev_size = disk.Size(b=0)
-        for blank_db_dev in self.blank_db_devs:
-            self.total_blank_db_dev_size += disk.Size(b=blank_db_dev.lvm_size.b)
-
-        self.total_available_db_space = self.total_blank_db_dev_size + common_vg_size
-
-        # If not configured, we default to 0, which is really "use as much as
-        # possible" captured by the `else` condition
-        if self.block_db_size.gb > 0:
-            try:
-                self.vg_extents = lvm.sizing(
-                    self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device
-                )
-            except SizeAllocationError:
-                msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV"
-                raise RuntimeError(
-                    msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size)
-                )
-        else:
-            self.vg_extents = lvm.sizing(
-                self.total_available_db_space.b, parts=self.dbs_needed
-            )
-
-        # validate that number of block.db LVs possible are enough for number of
-        # OSDs proposed
-        if self.total_available_db_space.b == 0:
-            msg = "No space left in fast devices to create block.db LVs"
-            raise RuntimeError(msg)
-
-        # bluestore_block_db_size was unset, so we must set this to whatever
-        # size we get by dividing the total available space for block.db LVs
-        # into the number of block.db LVs needed (i.e. "as large as possible")
-        if self.block_db_size.b == 0:
-            self.block_db_size = self.total_available_db_space / self.dbs_needed
-            self.use_large_block_db = True
-
-        total_dbs_possible = self.total_available_db_space / self.block_db_size
-
-        if self.dbs_needed > total_dbs_possible:
-            msg = "Not enough space (%s) to create %s x %s block.db LVs" % (
-                self.total_available_db_space, self.dbs_needed, self.block_db_size,
-            )
-            raise RuntimeError(msg)
-
-    def _validate_wal_devs(self):
-        # do not allow non-common VG to continue
-        validators.has_common_vg(self.wal_devs)
-
-        # find the common VG to calculate how much is available
-        self.common_wal_vg = self.get_common_vg(self.wal_devs)
-
-        # find how many block.wal LVs are possible from the common VG
-        if self.common_wal_vg:
-            common_vg_size = disk.Size(gb=self.common_wal_vg.free)
-        else:
-            common_vg_size = disk.Size(gb=0)
-
-        # non-VG SSDs
-        vg_members = set([d for d in self.wal_devs if d.is_lvm_member])
-        self.blank_wal_devs = set(self.wal_devs).difference(vg_members)
-        self.total_blank_wal_dev_size = disk.Size(b=0)
-        for blank_wal_dev in self.blank_wal_devs:
-            self.total_blank_wal_dev_size += disk.Size(b=blank_wal_dev.lvm_size.b)
-
-        self.total_available_wal_space = self.total_blank_wal_dev_size + common_vg_size
-
-        # If not configured, we default to 0, which is really "use as much as
-        # possible" captured by the `else` condition
-        if self.block_wal_size.gb > 0:
-            try:
-                self.vg_extents = lvm.sizing(
-                    self.total_available_wal_space.b, size=self.block_wal_size.b * self.osds_per_device
-                )
-            except SizeAllocationError:
-                msg = "Not enough space in fast devices (%s) to create %s x %s block.wal LV"
-                raise RuntimeError(
-                    msg % (self.total_available_wal_space,
-                           self.osds_per_device, self.block_wal_size)
-                )
-        else:
-            self.wal_vg_extents = lvm.sizing(
-                self.total_available_wal_space.b, parts=self.wals_needed
-            )
-
-        # validate that number of block.wal LVs possible are enough for number of
-        # OSDs proposed
-        if self.total_available_wal_space.b == 0:
-            msg = "No space left in fast devices to create block.wal LVs"
-            raise RuntimeError(msg)
-
-        # bluestore_block_wal_size was unset, so we must set this to whatever
-        # size we get by dividing the total available space for block.wal LVs
-        # into the number of block.wal LVs needed (i.e. "as large as possible")
-        if self.block_wal_size.b == 0:
-            self.block_wal_size = self.total_available_wal_space / self.wals_needed
-            self.use_large_block_wal = True
-
-        total_wals_possible = self.total_available_wal_space / self.block_wal_size
-
-        if self.wals_needed > total_wals_possible:
-            msg = "Not enough space (%s) to create %s x %s block.wal LVs" % (
-                self.total_available_wal_space, self.wals_needed,
-                self.block_wal_size,
-            )
-            raise RuntimeError(msg)
-
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/filestore.py
deleted file mode 100644 (file)
index bc4eaba..0000000
+++ /dev/null
@@ -1,389 +0,0 @@
-from __future__ import print_function
-from ceph_volume.util import disk, prepare
-from ceph_volume.api import lvm
-from . import validators
-from .strategies import Strategy
-from .strategies import MixedStrategy
-from ceph_volume.devices.lvm.create import Create
-from ceph_volume.devices.lvm.prepare import Prepare
-from ceph_volume.util import templates, system
-from ceph_volume.exceptions import SizeAllocationError
-
-
-def get_journal_size(args):
-    """
-    Helper for Filestore strategies, to prefer the --journal-size value from
-    the CLI over anything that might be in a ceph configuration file (if any).
-    """
-    if args.journal_size:
-        return disk.Size(mb=args.journal_size)
-    else:
-        return prepare.get_journal_size(lv_format=False)
-
-
-class SingleType(Strategy):
-    """
-    Support for all SSDs, or all HDDs, data and journal LVs will be colocated
-    in the same device
-    """
-
-
-    def __init__(self, args, data_devs):
-        super(SingleType, self).__init__(args, data_devs)
-        self.journal_size = get_journal_size(args)
-        self.validate_compute()
-
-    @classmethod
-    def with_auto_devices(cls, args, devices):
-        return cls(args, devices)
-
-    @staticmethod
-    def type():
-        return "filestore.SingleType"
-
-    def report_pretty(self, filtered_devices):
-        string = ""
-        if filtered_devices:
-            string += templates.filtered_devices(filtered_devices)
-        string += templates.total_osds.format(
-            total_osds=self.total_osds
-        )
-        string += templates.osd_component_titles
-
-        for osd in self.computed['osds']:
-            string += templates.osd_header
-            if 'osd_id' in osd:
-                string += templates.osd_reused_id.format(
-                    id_=osd['osd_id'])
-            string += templates.osd_component.format(
-                _type='[data]',
-                path=osd['data']['path'],
-                size=osd['data']['human_readable_size'],
-                percent=osd['data']['percentage'],
-            )
-            string += templates.osd_component.format(
-                _type='[journal]',
-                path=osd['journal']['path'],
-                size=osd['journal']['human_readable_size'],
-                percent=osd['journal']['percentage'],
-            )
-
-        print(string)
-
-    def validate(self):
-        """
-        Ensure that the minimum requirements for this type of scenario is
-        met, raise an error if the provided devices would not work
-        """
-        # validate minimum size for all devices
-        validators.minimum_device_size(self.data_devs, osds_per_device=self.osds_per_device)
-
-        # validate collocation
-        validators.minimum_device_collocated_size(
-            self.data_devs, self.journal_size, osds_per_device=self.osds_per_device
-        )
-
-        # make sure that data devices do not have any LVs
-        validators.no_lvm_membership(self.data_devs)
-
-        if self.osd_ids:
-            self._validate_osd_ids()
-
-    def compute(self):
-        """
-        Go through the rules needed to properly size the lvs, return
-        a dictionary with the result
-        """
-        # chose whichever is the one group we have to compute against
-        osds = self.computed['osds']
-        for device in self.data_devs:
-            for osd in range(self.osds_per_device):
-                device_size = disk.Size(b=device.lvm_size.b)
-                osd_size = device_size / self.osds_per_device
-                journal_size = self.journal_size
-                data_size = osd_size - journal_size
-                data_percentage = data_size * 100 / device_size
-                osd = {'data': {}, 'journal': {}}
-                osd['data']['path'] = device.abspath
-                osd['data']['size'] = data_size.b.as_int()
-                osd['data']['parts'] = self.osds_per_device
-                osd['data']['percentage'] = int(data_percentage)
-                osd['data']['human_readable_size'] = str(data_size)
-                osd['journal']['path'] = device.abspath
-                osd['journal']['size'] = journal_size.b.as_int()
-                osd['journal']['percentage'] = int(100 - data_percentage)
-                osd['journal']['human_readable_size'] = str(journal_size)
-
-                if self.osd_ids:
-                    osd['osd_id'] = self.osd_ids.pop()
-
-                osds.append(osd)
-
-        self.computed['changed'] = len(osds) > 0
-
-    def execute(self):
-        """
-        Create vgs/lvs from the incoming set of devices, assign their roles
-        (data, journal) and offload the OSD creation to ``lvm create``
-        """
-        device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
-
-        # create 1 vg per data device first, mapping them to the device path,
-        # when the lvs get created later, it can create as many as needed,
-        # including the journals since it is going to be collocated
-        for osd in self.computed['osds']:
-            vg = device_vgs.get(osd['data']['path'])
-            if not vg:
-                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore')
-                device_vgs[osd['data']['path']] = vg
-
-        # create the lvs from the per-device vg created in the beginning
-        for osd in self.computed['osds']:
-            data_path = osd['data']['path']
-            data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
-            device_vg = device_vgs[data_path]
-            data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
-            journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
-            data_uuid = system.generate_uuid()
-            data_lv = lvm.create_lv(
-                'osd-data', data_uuid, vg=device_vg, extents=data_lv_extents)
-            journal_uuid = system.generate_uuid()
-            journal_lv = lvm.create_lv(
-                'osd-journal', journal_uuid, vg=device_vg, extents=journal_lv_extents)
-
-            command = ['--filestore', '--data']
-            command.append('%s/%s' % (device_vg.name, data_lv.name))
-            command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)])
-            if self.args.dmcrypt:
-                command.append('--dmcrypt')
-            if self.args.no_systemd:
-                command.append('--no-systemd')
-            if self.args.crush_device_class:
-                command.extend(['--crush-device-class', self.args.crush_device_class])
-            if 'osd_id' in osd:
-                command.extend(['--osd-id', osd['osd_id']])
-
-            if self.args.prepare:
-                Prepare(command).main()
-            else:
-                Create(command).main()
-
-
-class MixedType(MixedStrategy):
-    """
-    Supports HDDs with SSDs, journals will be placed on SSDs, while HDDs will
-    be used fully for data.
-
-    If an existing common VG is detected on SSDs, it will be extended if blank
-    SSDs are used, otherwise it will be used directly.
-    """
-
-
-    def __init__(self, args, data_devs, journal_devs):
-        super(MixedType, self).__init__(args, data_devs, journal_devs)
-        self.blank_journal_devs = []
-        self.journals_needed = len(self.data_devs) * self.osds_per_device
-        self.journal_size = get_journal_size(args)
-        self.validate_compute()
-
-    @classmethod
-    def with_auto_devices(cls, args, devices):
-        data_devs, journal_devs = cls.split_devices_rotational(devices)
-        return cls(args, data_devs, journal_devs)
-
-    @staticmethod
-    def type():
-        return "filestore.MixedType"
-
-    def report_pretty(self, filtered_devices):
-        string = ""
-        if filtered_devices:
-            string += templates.filtered_devices(filtered_devices)
-        string += templates.total_osds.format(
-            total_osds=self.total_osds
-        )
-
-        string += templates.ssd_volume_group.format(
-            target='journal',
-            total_lv_size=str(self.total_available_journal_space),
-            total_lvs=self.journals_needed,
-            block_db_devices=', '.join([d.path for d in self.db_or_journal_devs]),
-            lv_size=str(self.journal_size),
-            total_osds=self.journals_needed
-        )
-
-        string += templates.osd_component_titles
-
-        for osd in self.computed['osds']:
-            string += templates.osd_header
-            if 'osd_id' in osd:
-                string += templates.osd_reused_id.format(
-                    id_=osd['osd_id'])
-            string += templates.osd_component.format(
-                _type='[data]',
-                path=osd['data']['path'],
-                size=osd['data']['human_readable_size'],
-                percent=osd['data']['percentage'],
-            )
-            string += templates.osd_component.format(
-                _type='[journal]',
-                path=osd['journal']['path'],
-                size=osd['journal']['human_readable_size'],
-                percent=osd['journal']['percentage'],
-            )
-
-        print(string)
-
-    def validate(self):
-        """
-        Ensure that the minimum requirements for this type of scenario is
-        met, raise an error if the provided devices would not work
-        """
-        # validate minimum size for all devices
-        validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
-
-        # make sure that data devices do not have any LVs
-        validators.no_lvm_membership(self.data_devs)
-
-        # do not allow non-common VG to continue
-        validators.has_common_vg(self.db_or_journal_devs)
-
-        # find the common VG to calculate how much is available
-        self.common_vg = self.get_common_vg(self.db_or_journal_devs)
-
-        # find how many journals are possible from the common VG
-        if self.common_vg:
-            common_vg_size = disk.Size(b=self.common_vg.free)
-        else:
-            common_vg_size = disk.Size(gb=0)
-
-        # non-VG SSDs
-        vg_ssds = set([d for d in self.db_or_journal_devs if d.is_lvm_member])
-        self.blank_journal_devs = set(self.db_or_journal_devs).difference(vg_ssds)
-        self.total_blank_journal_dev_size = disk.Size(b=0)
-        for blank_journal_dev in self.blank_journal_devs:
-            self.total_blank_journal_dev_size += disk.Size(b=blank_journal_dev.lvm_size.b)
-
-        self.total_available_journal_space = self.total_blank_journal_dev_size + common_vg_size
-
-        try:
-            self.vg_extents = lvm.sizing(
-                self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device
-            )
-        except SizeAllocationError:
-            msg = "Not enough space in fast devices (%s) to create %s x %s journal LV"
-            raise RuntimeError(
-                msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size)
-            )
-
-        # validate that number of journals possible are enough for number of
-        # OSDs proposed
-        total_journals_possible = self.total_available_journal_space / self.journal_size
-        if self.osds_per_device > total_journals_possible:
-            msg = "Not enough space (%s) to create %s x %s journal LVs" % (
-                self.total_available_journal_space, self.journals_needed, self.journal_size
-            )
-            raise RuntimeError(msg)
-
-        if self.osd_ids:
-            self._validate_osd_ids()
-
-    def compute(self):
-        """
-        Go through the rules needed to properly size the lvs, return
-        a dictionary with the result
-        """
-        osds = self.computed['osds']
-
-        vg_free = int(self.total_available_journal_space.gb)
-        if not self.common_vg:
-            # there isn't a common vg, so a new one must be created with all
-            # the blank SSDs
-            self.computed['vg'] = {
-                'devices': ", ".join([ssd.abspath for ssd in self.blank_journal_devs]),
-                'parts': self.journals_needed,
-                'percentages': self.vg_extents['percentages'],
-                'sizes': self.journal_size.b.as_int(),
-                'size': self.total_blank_journal_dev_size.b.as_int(),
-                'human_readable_sizes': str(self.journal_size),
-                'human_readable_size': str(self.total_available_journal_space),
-            }
-            vg_name = 'lv/vg'
-        else:
-            vg_name = self.common_vg.name
-
-        for device in self.data_devs:
-            for osd in range(self.osds_per_device):
-                device_size = disk.Size(b=device.lvm_size.b)
-                data_size = device_size / self.osds_per_device
-                osd = {'data': {}, 'journal': {}}
-                osd['data']['path'] = device.path
-                osd['data']['size'] = data_size.b.as_int()
-                osd['data']['percentage'] = 100 / self.osds_per_device
-                osd['data']['human_readable_size'] = str(data_size)
-                osd['journal']['path'] = 'vg: %s' % vg_name
-                osd['journal']['size'] = self.journal_size.b.as_int()
-                osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
-                osd['journal']['human_readable_size'] = str(self.journal_size)
-
-                if self.osd_ids:
-                    osd['osd_id'] = self.osd_ids.pop(0)
-
-                osds.append(osd)
-
-        self.computed['changed'] = len(osds) > 0
-
-    def execute(self):
-        """
-        Create vgs/lvs from the incoming set of devices, assign their roles
-        (data, journal) and offload the OSD creation to ``lvm create``
-        """
-        blank_journal_dev_paths = [d.abspath for d in self.blank_journal_devs]
-        data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
-
-        # no common vg is found, create one with all the blank SSDs
-        if not self.common_vg:
-            journal_vg = lvm.create_vg(blank_journal_dev_paths, name_prefix='ceph-journals')
-        # a vg exists that can be extended
-        elif self.common_vg and blank_journal_dev_paths:
-            journal_vg = lvm.extend_vg(self.common_vg, blank_journal_dev_paths)
-        # one common vg with nothing else to extend can be used directly
-        else:
-            journal_vg = self.common_vg
-
-        # create 1 vg per data device first, mapping them to the device path,
-        # when the lv gets created later, it can create as many as needed (or
-        # even just 1)
-        for osd in self.computed['osds']:
-            vg = data_vgs.get(osd['data']['path'])
-            if not vg:
-                vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
-                data_vgs[osd['data']['path']] = vg
-
-        for osd in self.computed['osds']:
-            data_path = osd['data']['path']
-            data_vg = data_vgs[data_path]
-            data_lv_extents = data_vg.sizing(parts=1)['extents']
-            data_uuid = system.generate_uuid()
-            data_lv = lvm.create_lv(
-                'osd-data', data_uuid, vg=data_vg, extents=data_lv_extents)
-            journal_uuid = system.generate_uuid()
-            journal_lv = lvm.create_lv(
-                'osd-journal', journal_uuid, vg=journal_vg, size=self.journal_size)
-
-            command = ['--filestore', '--data']
-            command.append('%s/%s' % (data_vg.name, data_lv.name))
-            command.extend(['--journal', '%s/%s' % (journal_vg.name, journal_lv.name)])
-            if self.args.dmcrypt:
-                command.append('--dmcrypt')
-            if self.args.no_systemd:
-                command.append('--no-systemd')
-            if self.args.crush_device_class:
-                command.extend(['--crush-device-class', self.args.crush_device_class])
-            if 'osd_id' in osd:
-                command.extend(['--osd-id', osd['osd_id']])
-
-            if self.args.prepare:
-                Prepare(command).main()
-            else:
-                Create(command).main()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/strategies.py
deleted file mode 100644 (file)
index 75d748f..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-import json
-from ceph_volume.util.prepare import osd_id_available
-from ceph_volume.api.lvm import get_device_vgs
-
-class Strategy(object):
-
-    def __init__(self, args, data_devs, db_or_journal_devs=[], wal_devs=[]):
-        '''
-        Note that this ctor is used by both bluestore and filestore strategies
-        to reduce code duplication. A filestore strategy will always pass an
-        empty list for wal_devs.
-        '''
-        self.args = args
-        self.osd_ids = args.osd_ids
-        self.osds_per_device = args.osds_per_device
-        self.devices = data_devs + wal_devs + db_or_journal_devs
-        self.data_devs = data_devs
-        self.db_or_journal_devs = db_or_journal_devs
-        self.wal_devs = wal_devs
-        self.computed = {'osds': [], 'vgs': []}
-
-    @staticmethod
-    def split_devices_rotational(devices):
-        data_devs = [device for device in devices if device.rotational]
-        db_or_journal_devs = [device for device in devices if not device.rotational]
-        return data_devs, db_or_journal_devs
-
-
-    def validate_compute(self):
-        if self.devices:
-            self.validate()
-            self.compute()
-        else:
-            self.computed["changed"] = False
-
-    def report_json(self, filtered_devices):
-        # add filtered devices to report
-        report = self.computed.copy()
-        report['filtered_devices'] = filtered_devices
-        print(json.dumps(self.computed, indent=4, sort_keys=True))
-
-    def _validate_osd_ids(self):
-        unavailable_ids = [id_ for id_ in self.osd_ids if
-                           not osd_id_available(id_)]
-        if unavailable_ids:
-            msg = ("Not all specfied OSD ids are available: {}"
-                   "unavailable").format(",".join(unavailable_ids))
-            raise RuntimeError(msg)
-
-    @property
-    def total_osds(self):
-        return len(self.data_devs) * self.osds_per_device
-
-    # protect against base class instantiation and incomplete implementations.
-    # We could also use the abc module and implement this as an
-    # AbstractBaseClass
-    def compute(self):
-        raise NotImplementedError('compute() must be implemented in a child class')
-
-    def execute(self):
-        raise NotImplementedError('execute() must be implemented in a child class')
-
-class MixedStrategy(Strategy):
-
-    def get_common_vg(self, devs):
-        # find all the vgs associated with the current device
-        for dev in devs:
-            vgs = get_device_vgs(dev.abspath)
-            if vgs:
-                return vgs[0]
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py b/src/ceph-volume/ceph_volume/devices/lvm/strategies/validators.py
deleted file mode 100644 (file)
index e631e10..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-from ceph_volume.util import disk
-from ceph_volume.api import lvm
-
-
-def minimum_device_size(devices, osds_per_device=1, min_size=5):
-    """
-    Ensure that the minimum requirements for this type of scenario is
-    met, raise an error if the provided devices would not work
-    """
-    msg = 'Unable to use device %s %s, LVs would be smaller than {}GB'.format(min_size)
-    for device in devices:
-        device_size = disk.Size(b=device.lvm_size.b)
-        lv_size = device_size / osds_per_device
-        if lv_size < disk.Size(gb=min_size):
-            raise RuntimeError(msg % (device_size, device.path))
-
-
-def minimum_device_collocated_size(devices, journal_size, osds_per_device=1):
-    """
-    Similar to ``minimum_device_size``, but take into account that the size of
-    the journal affects the size left of the device
-    """
-    msg = 'Unable to use device %s %s, LVs would be smaller than 5GB'
-    for device in devices:
-        device_size = disk.Size(b=device.lvm_size.b)
-        lv_size = (device_size / osds_per_device) - journal_size
-        if lv_size < disk.Size(gb=5):
-            raise RuntimeError(msg % (device_size, device.path))
-
-
-def no_lvm_membership(devices):
-    """
-    Do not allow devices that are part of LVM
-    """
-    msg = 'Unable to use device, already a member of LVM: %s'
-    for device in devices:
-        if device.is_lvm_member:
-            raise RuntimeError(msg % device.abspath)
-
-
-def has_common_vg(ssd_devices):
-    """
-    Ensure that devices have a common VG between them
-    """
-    msg = 'Could not find a common VG between devices: %s'
-    ssd_vgs = {}
-
-    for ssd_device in ssd_devices:
-        vgs = lvm.get_device_vgs(ssd_device.abspath)
-        if not vgs:
-            continue
-        for vg in vgs:
-            try:
-                ssd_vgs[vg.name].append(ssd_device.abspath)
-            except KeyError:
-                ssd_vgs[vg.name] = [ssd_device.abspath]
-    # len of 1 means they all have a common vg, and len of 0 means that these
-    # are blank
-    if len(ssd_vgs) <= 1:
-        return
-    raise RuntimeError(msg % ', '.join(ssd_vgs.keys()))
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_bluestore.py
deleted file mode 100644 (file)
index ba54ea5..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-import pytest
-from ceph_volume.devices.lvm.strategies import bluestore
-from ceph_volume.api import lvm
-
-
-class TestSingleType(object):
-
-    def test_hdd_device_is_large_enough(self, fakedevice, factory):
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        computed_osd = bluestore.SingleType.with_auto_devices(args, devices).computed['osds'][0]
-        assert computed_osd['data']['percentage'] == 100
-        assert computed_osd['data']['parts'] == 1
-        assert computed_osd['data']['human_readable_size'] == '5.00 GB'
-        assert computed_osd['data']['path'] == '/dev/sda'
-
-    def test_sdd_device_is_large_enough(self, fakedevice, factory):
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        ]
-        computed_osd = bluestore.SingleType.with_auto_devices(args, devices).computed['osds'][0]
-        assert computed_osd['data']['percentage'] == 100
-        assert computed_osd['data']['parts'] == 1
-        assert computed_osd['data']['human_readable_size'] == '5.00 GB'
-        assert computed_osd['data']['path'] == '/dev/sda'
-
-    def test_device_cannot_have_many_osds_per_device(self, fakedevice, factory):
-        args = factory(filtered_devices=[], osds_per_device=3,
-                       block_db_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            bluestore.SingleType.with_auto_devices(args, devices)
-        assert 'Unable to use device 5.66 GB /dev/sda' in str(error.value)
-
-    def test_device_is_lvm_member_fails(self, fakedevice, factory):
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=True, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            bluestore.SingleType.with_auto_devices(args, devices)
-        assert 'Unable to use device, already a member of LVM' in str(error.value)
-
-
-class TestMixedType(object):
-
-    def test_filter_all_data_devs(self, fakedevice, factory, monkeypatch):
-        # in this scenario the user passed a already used device to be used for
-        # data and an unused device to be used as db device.
-        db_dev = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        data_dev = fakedevice(used_by_ceph=True, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        args = factory(filtered_devices=[data_dev], osds_per_device=1,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        monkeypatch.setattr(lvm, 'VolumeGroup', lambda x, **kw: [])
-        bluestore.MixedType(args, [], [db_dev], [])
-
-
-class TestMixedTypeConfiguredSize(object):
-    # uses a block.db size that has been configured via ceph.conf, instead of
-    # defaulting to 'as large as possible'
-
-    def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        # 3GB block.db in ceph.conf
-        conf_ceph(get_safe=lambda *a: 3147483640)
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        devices = [ssd, hdd]
-
-        osd = bluestore.MixedType.with_auto_devices(args, devices).computed['osds'][0]
-        assert osd['data']['percentage'] == 100
-        assert osd['data']['human_readable_size'] == '5.66 GB'
-        assert osd['data']['path'] == '/dev/sda'
-        # a new vg will be created
-        assert osd['block.db']['path'] == 'vg: vg/lv'
-        assert osd['block.db']['percentage'] == 100
-
-    def test_ssd_device_is_not_large_enough(self, fakedevice, factory, conf_ceph):
-        # 7GB block.db in ceph.conf
-        conf_ceph(get_safe=lambda *a: 7747483640)
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        devices = [ssd, hdd]
-
-        with pytest.raises(RuntimeError) as error:
-            bluestore.MixedType.with_auto_devices(args, devices).computed['osds'][0]
-        expected = 'Not enough space in fast devices (5.66 GB) to create 1 x 7.22 GB block.db LV'
-        assert expected in str(error.value)
-
-    def test_multi_hdd_device_is_not_large_enough(self, fakedevice, factory, conf_ceph):
-        # 3GB block.db in ceph.conf
-        conf_ceph(get_safe=lambda *a: 3147483640)
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=60737400000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        devices = [ssd, hdd]
-
-        with pytest.raises(RuntimeError) as error:
-            bluestore.MixedType.with_auto_devices(args, devices)
-        expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB'
-        assert expected in str(error.value)
-
-
-class TestMixedTypeLargeAsPossible(object):
-
-    def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: None)
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        devices = [ssd, hdd]
-
-        osd = bluestore.MixedType.with_auto_devices(args, devices).computed['osds'][0]
-        assert osd['data']['percentage'] == 100
-        assert osd['data']['human_readable_size'] == '5.66 GB'
-        assert osd['data']['path'] == '/dev/sda'
-        # a new vg will be created
-        assert osd['block.db']['path'] == 'vg: vg/lv'
-        # as large as possible
-        assert osd['block.db']['percentage'] == 100
-
-    def test_multi_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: None)
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=60073740000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=60073740000))
-        devices = [ssd, hdd]
-
-        osd = bluestore.MixedType.with_auto_devices(args, devices).computed['osds'][0]
-        assert osd['data']['percentage'] == 50
-        assert osd['data']['human_readable_size'] == '27.97 GB'
-        assert osd['data']['path'] == '/dev/sda'
-        # a new vg will be created
-        assert osd['block.db']['path'] == 'vg: vg/lv'
-        # as large as possible
-        assert osd['block.db']['percentage'] == 50
-
-    def test_multi_hdd_device_is_not_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: None)
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=60737400000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        devices = [ssd, hdd]
-
-        with pytest.raises(RuntimeError) as error:
-            bluestore.MixedType.with_auto_devices(args, devices)
-        expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB'
-        assert expected in str(error.value)
-
-
-class TestMixedTypeWithExplicitDevices(object):
-
-    def test_multi_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: None)
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=60073740000))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=60073740000))
-
-        osd = bluestore.MixedType(args, [hdd], [], [ssd]).computed['osds'][0]
-        assert osd['data']['percentage'] == 50
-        assert osd['data']['human_readable_size'] == '27.97 GB'
-        assert osd['data']['path'] == '/dev/sda'
-        # a new vg will be created
-        assert osd['block.wal']['path'] == 'vg: vg/lv'
-        # as large as possible
-        assert osd['block.wal']['percentage'] == 50
-
-    def test_wal_device_is_not_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: None)
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       block_db_size=None, block_wal_size=None,
-                       osd_ids=[])
-        ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=1610612736))
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=60073740000))
-
-        with pytest.raises(RuntimeError) as error:
-            bluestore.MixedType(args, [hdd], [], [ssd]).computed['osds'][0]
-        expected = 'Unable to use device 1.50 GB /dev/sda, LVs would be smaller than 1GB'
-        assert expected in str(error.value), str(error)
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_filestore.py
deleted file mode 100644 (file)
index 4fd94f5..0000000
+++ /dev/null
@@ -1,218 +0,0 @@
-import pytest
-from mock.mock import patch
-from ceph_volume.devices.lvm.strategies import filestore
-from ceph_volume.api import lvm
-
-
-class TestSingleType(object):
-
-    def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=12073740000))
-        ]
-        computed_osd = filestore.SingleType.with_auto_devices(args, devices).computed['osds'][0]
-        assert computed_osd['data']['percentage'] == 55
-        assert computed_osd['data']['parts'] == 1
-        assert computed_osd['data']['human_readable_size'] == '6.24 GB'
-        assert computed_osd['data']['path'] == '/dev/sda'
-
-    def test_hdd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_ssd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=12073740000))
-        ]
-        computed_osd = filestore.SingleType.with_auto_devices(args, devices).computed['osds'][0]
-        assert computed_osd['data']['percentage'] == 55
-        assert computed_osd['data']['parts'] == 1
-        assert computed_osd['data']['human_readable_size'] == '6.24 GB'
-        assert computed_osd['data']['path'] == '/dev/sda'
-
-    def test_ssd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_ssd_device_multi_osd(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=4,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=16073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_hdd_device_multi_osd(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=4,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=16073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_device_is_lvm_member_fails(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=True, rotational=True, sys_api=dict(size=12073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        assert 'Unable to use device, already a member of LVM' in str(error.value)
-
-    def test_hdd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
-        assert msg in str(error.value)
-
-    def test_ssd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.SingleType.with_auto_devices(args, devices)
-        msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
-        assert msg in str(error.value)
-
-
-class TestMixedType(object):
-
-    def test_minimum_size_is_not_met(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000)),
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.MixedType.with_auto_devices(args, devices)
-        msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
-        assert msg in str(error.value)
-
-    def test_ssd_device_is_not_large_enough(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '7120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000)),
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.MixedType.with_auto_devices(args, devices)
-        msg = "Not enough space in fast devices (5.66 GB) to create 1 x 6.95 GB journal LV"
-        assert msg in str(error.value)
-
-    def test_hdd_device_is_lvm_member_fails(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=False, sys_api=dict(size=6073740000)),
-            fakedevice(used_by_ceph=False, is_lvm_member=True, rotational=True, sys_api=dict(size=6073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.MixedType.with_auto_devices(args, devices)
-        assert 'Unable to use device, already a member of LVM' in str(error.value)
-
-    @patch('ceph_volume.devices.lvm.strategies.strategies.MixedStrategy.get_common_vg')
-    def test_ssd_is_lvm_member_doesnt_fail(self,
-                                           patched_get_common_vg,
-                                           fakedevice,
-                                           factory,
-                                           conf_ceph):
-        ssd = fakedevice(
-            used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
-        )
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-        vg = lvm.VolumeGroup(
-                vg_name='fast', lv_name='foo',
-                lv_path='/dev/vg/foo', lv_tags="ceph.type=data",
-                vg_extent_size=1024*1024*1024, vg_free_count=7)
-        patched_get_common_vg.return_value = vg
-
-
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1,
-                       journal_size=None, osd_ids=[])
-        devices = [ssd, hdd]
-
-        result = filestore.MixedType.with_auto_devices(args, devices).\
-            computed['osds'][0]
-        assert result['journal']['path'] == 'vg: fast'
-        assert result['journal']['percentage'] == 71
-        assert result['journal']['human_readable_size'] == '5.00 GB'
-
-    @patch('ceph_volume.api.lvm.get_device_vgs')
-    def test_no_common_vg(self, patched_get_device_vgs, fakedevice, factory, conf_ceph):
-        patched_get_device_vgs.side_effect = lambda x: [lvm.VolumeGroup(vg_name='{}'.format(x[-1]), vg_tags='')]
-        ssd1 = fakedevice(
-            used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
-        )
-        ssd2 = fakedevice(
-            used_by_ceph=False, is_lvm_member=True, rotational=False, sys_api=dict(size=6073740000)
-        )
-        hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, rotational=True, sys_api=dict(size=6073740000))
-
-        conf_ceph(get_safe=lambda *a: '5120')
-        args = factory(filtered_devices=[], osds_per_device=1, osd_ids=[],
-                       journal_size=None)
-        devices = [ssd1, ssd2, hdd]
-
-        with pytest.raises(RuntimeError) as error:
-            filestore.MixedType.with_auto_devices(args, devices)
-            assert 'Could not find a common VG between devices' in str(error.value)
-
-    def test_ssd_device_fails_multiple_osds(self, fakedevice, factory, conf_ceph):
-        conf_ceph(get_safe=lambda *a: '15120')
-        args = factory(filtered_devices=[], osds_per_device=2,
-                       journal_size=None, osd_ids=[])
-        devices = [
-            fakedevice(is_lvm_member=False, rotational=False, sys_api=dict(size=16073740000)),
-            fakedevice(is_lvm_member=False, rotational=True, sys_api=dict(size=16073740000))
-        ]
-        with pytest.raises(RuntimeError) as error:
-            filestore.MixedType.with_auto_devices(args, devices)
-        msg = "Not enough space in fast devices (14.97 GB) to create 2 x 14.77 GB journal LV"
-        assert msg in str(error.value)
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/strategies/test_validate.py
deleted file mode 100644 (file)
index ebb9314..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-import pytest
-from ceph_volume.util import disk
-from ceph_volume.devices.lvm.strategies import validators
-
-
-class TestMinimumDeviceSize(object):
-
-    def test_size_is_larger_than_5gb(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=6073740000))]
-        assert validators.minimum_device_size(devices) is None
-
-    def test_size_is_smaller_than_5gb(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=1073740000))]
-        with pytest.raises(RuntimeError) as error:
-            validators.minimum_device_size(devices)
-        msg = "LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_large_device_multiple_osds_fails(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=6073740000))]
-        with pytest.raises(RuntimeError) as error:
-            validators.minimum_device_size(
-                devices, osds_per_device=4
-            )
-        msg = "LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-
-class TestMinimumCollocatedDeviceSize(object):
-
-    def setup(self):
-        self.journal_size = disk.Size(gb=5)
-
-    def test_size_is_larger_than_5gb_large_journal(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=6073740000))]
-        assert validators.minimum_device_collocated_size(devices, disk.Size(mb=1)) is None
-
-    def test_size_is_larger_than_5gb_large_journal_fails(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=1073740000))]
-        with pytest.raises(RuntimeError) as error:
-            validators.minimum_device_collocated_size(devices, self.journal_size)
-        msg = "LVs would be smaller than 5GB"
-        assert msg in str(error.value)
-
-    def test_large_device_multiple_osds_fails(self, fakedevice):
-        devices = [fakedevice(sys_api=dict(size=16073740000))]
-        with pytest.raises(RuntimeError) as error:
-            validators.minimum_device_collocated_size(
-                devices, self.journal_size, osds_per_device=3
-            )
-        msg = "LVs would be smaller than 5GB"
-        assert msg in str(error.value)
index 884112ae7fe506124412d295b1d07786609757e9..85b7033c212a883af51bd71e8e1f06111d9f15ee 100644 (file)
@@ -95,6 +95,7 @@ def get_block_db_size(lv_format=True):
         logger.debug(
             'block.db has no size configuration, will fallback to using as much as possible'
         )
+        # TODO better to return disk.Size(b=0) here
         return None
     logger.debug('bluestore_block_db_size set to %s' % conf_db_size)
     db_size = disk.Size(b=str_to_int(conf_db_size))
index 6a140fbb2715275b9fec0ad1faf3bca7fa491f7d..a8b1eec4e61cd5df4bd5c890cd6d6a5932550717 100644 (file)
@@ -12,7 +12,7 @@ osd_reused_id = """
 
 
 osd_component = """
-  {_type: <15} {path: <55} {size: <15} {percent}%"""
+  {_type: <15} {path: <55} {size: <15} {percent:.2f}%"""
 
 
 total_osds = """