From 6f1592a1146529d352184c795aae8ce12f66e554 Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Fri, 11 Sep 2020 10:36:43 +0200 Subject: [PATCH] ceph-volume: address review comments Signed-off-by: Jan Fajerski --- doc/ceph-volume/lvm/batch.rst | 91 ++++++++++++++----- .../ceph_volume/devices/lvm/batch.py | 9 +- .../ceph_volume/devices/lvm/common.py | 16 +--- .../ceph_volume/devices/lvm/prepare.py | 6 +- .../tests/devices/lvm/test_common.py | 9 ++ 5 files changed, 91 insertions(+), 40 deletions(-) create mode 100644 src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py diff --git a/doc/ceph-volume/lvm/batch.rst b/doc/ceph-volume/lvm/batch.rst index 8fc3fd0665326..6033b5aaf1b5a 100644 --- a/doc/ceph-volume/lvm/batch.rst +++ b/doc/ceph-volume/lvm/batch.rst @@ -20,17 +20,14 @@ are supported. Automatic sorting of disks -------------------------- -If ``batch`` receives only a single list of data devices and the ``--no-auto`` option - is *not* passed (currently the default), ``ceph-volume`` will auto-sort disks by its rotational - property and use non-rotating disks for ``block.db`` or ``journal`` depending - on the objectstore used. -This default behavior is now DEPRECATED and will be removed in future releases. Instead - an ``auto`` option is introduced to retain this behavior. -It is recommended to make use of the explicit device lists for ``block.db``, - ``block.wal`` and ``journal``. +If ``batch`` receives only a single list of data devices and other options are +passed , ``ceph-volume`` will auto-sort disks by its rotational +property and use non-rotating disks for ``block.db`` or ``journal`` depending +on the objectstore used. If all devices are to be used for standalone OSDs, +no matter if rotating or solid state, pass ``--no-auto``. For example assuming :term:`bluestore` is used and ``--no-auto`` is not passed, - the deprecated behavior would deploy the following, depending on the devices - passed: +the deprecated behavior would deploy the following, depending on the devices +passed: #. Devices are all spinning HDDs: 1 OSD is created per device #. Devices are all SSDs: 2 OSDs are created per device @@ -40,6 +37,12 @@ For example assuming :term:`bluestore` is used and ``--no-auto`` is not passed, .. note:: Although operations in ``ceph-volume lvm create`` allow usage of ``block.wal`` it isn't supported with the ``auto`` behavior. +This default auto-sorting behavior is now DEPRECATED and will be changed in future releases. +Instead devices are not automatically sorted unless the ``--auto`` option is passed + +It is recommended to make use of the explicit device lists for ``block.db``, + ``block.wal`` and ``journal``. + .. _ceph-volume-lvm-batch_bluestore: Reporting @@ -62,22 +65,64 @@ The ``pretty`` report format (the default) would look like this:: $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + --> passed data devices: 3 physical, 0 LVM + --> relative data size: 1.0 + --> passed block_db devices: 1 physical, 0 LVM Total OSDs: 3 + Type Path LV Size % of device + ---------------------------------------------------------------------------------------------------- + data /dev/sdb 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% + ---------------------------------------------------------------------------------------------------- + data /dev/sdc 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% + ---------------------------------------------------------------------------------------------------- + data /dev/sdd 300.00 GB 100.00% + block_db /dev/nvme0n1 66.67 GB 33.33% -**JSON reporting** -Reporting can produce a structured output with ``--format json``:: - $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + +**JSON reporting** +Reporting can produce a structured output with ``--format json`` or +``--format json-pretty``:: + + $ ceph-volume lvm batch --report --format json-pretty /dev/sdb /dev/sdc /dev/sdd --db-devices /dev/nvme0n1 + --> passed data devices: 3 physical, 0 LVM + --> relative data size: 1.0 + --> passed block_db devices: 1 physical, 0 LVM + [ + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdb", + "data_size": "300.00 GB", + "encryption": "None" + }, + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdc", + "data_size": "300.00 GB", + "encryption": "None" + }, + { + "block_db": "/dev/nvme0n1", + "block_db_size": "66.67 GB", + "data": "/dev/sdd", + "data_size": "300.00 GB", + "encryption": "None" + } + ] Sizing ====== When no sizing arguments are passed, `ceph-volume` will derive the sizing from the passed device lists (or the sorted lists when using the automatic sorting). -`ceph-volume batch` will attempt to fully utilize a devices available capacity. +`ceph-volume batch` will attempt to fully utilize a device's available capacity. Relying on automatic sizing is recommended. If one requires a different sizing policy for wal, db or journal devices, @@ -91,14 +136,14 @@ on `ceph-volume` automatic sizing. Users can provide hints to `ceph-volume` as to how many data devices should have their external volumes on a set of fast devices. These options are: -* `--block-db-slots` -* `--block-wal-slots` -* `--journal-slots` +* ``--block-db-slots`` +* ``--block-wal-slots`` +* ``--journal-slots`` -For example consider an OSD host that is supposed to contain 5 data devices and -one device for wal/db volumes. However one data device is currently broken and +For example, consider an OSD host that is supposed to contain 5 data devices and +one device for wal/db volumes. However, one data device is currently broken and is being replaced. Instead of calculating the explicit sizes for the wal/db -volume one can simply call:: +volume, one can simply call:: $ ceph-volume lvm batch --report /dev/sdb /dev/sdc /dev/sdd /dev/sde --db-devices /dev/nvme0n1 --block-db-slots 5 @@ -106,9 +151,9 @@ Explicit sizing --------------- It is also possible to provide explicit sizes to `ceph-volume` via the arguments -* `--block-db-size` -* `--block-wal-size` -* `--journal-size` +* ``--block-db-size`` +* ``--block-wal-size`` +* ``--journal-size`` `ceph-volume` will try to satisfy the requested sizes given the passed disks. If this is not possible, no OSDs will be deployed. diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py index 877fd58e67476..7b8b7b65595ea 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -117,7 +117,8 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar continue # any LV present is considered a taken slot occupied_slots = len(dev.lvs) - # TODO this only looks at the first vg on device + # this only looks at the first vg on device, unsure if there is a better + # way dev_size = dev.vg_size[0] abs_size = disk.Size(b=int(dev_size / requested_slots)) free_size = dev.vg_free[0] @@ -289,8 +290,12 @@ class Batch(object): help='Provision slots on WAL device, can remain unoccupied' ) def journal_size_in_mb_hack(size): - # give user time to adjust, then remove here + # TODO give user time to adjust, then remove this if size and size[-1].isdigit(): + mlogger.warning('DEPRECATION NOTICE') + mlogger.warning('--journal-size as integer is parsed as megabytes') + mlogger.warning('A future release will parse integers as bytes') + mlogger.warning('Add a "M" to explicitly pass a megabyte size') size += 'M' return disk.Size.parse(size) parser.add_argument( diff --git a/src/ceph-volume/ceph_volume/devices/lvm/common.py b/src/ceph-volume/ceph_volume/devices/lvm/common.py index 2ad8562ae61d7..06369e479d4c7 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/common.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/common.py @@ -145,18 +145,10 @@ filestore_args = { def get_default_args(): defaults = {} - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): val['default'] for name, val in common_args.items() - if 'default' in val}) - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): val['default'] for name, val in filestore_args.items() - if 'default' in val}) - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): val['default'] for name, val in bluestore_args.items() - if 'default' in val}) - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): None for name, val in common_args.items() - if 'default' not in val}) - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): None for name, val in filestore_args.items() - if 'default' not in val}) - defaults.update({name.strip('-').replace('-', '_').replace('.', '_'): None for name, val in bluestore_args.items() - if 'default' not in val}) + def format_name(name): + return name.strip('-').replace('-', '_').replace('.', '_') + for argset in (common_args, filestore_args, bluestore_args): + defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()}) return defaults diff --git a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py index 4f43bdcb39d93..9cb306fa9f228 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py @@ -299,7 +299,7 @@ class Prepare(object): if self.args.filestore: if not self.args.journal: logger.info(('no journal was specifed, creating journal lv ' - 'on {}').format(self.args.data)) + 'on {}').format(self.args.data)) self.args.journal = self.args.data self.args.journal_size = disk.Size(g=5) # need to adjust data size/slots for colocated journal @@ -308,9 +308,9 @@ class Prepare(object): if self.args.data_slots == 1: self.args.data_slots = 0 else: - raise RuntimeError(('Can\'t handle multiple filestore OSDs ' + raise RuntimeError('Can\'t handle multiple filestore OSDs ' 'with colocated journals yet. Please ' - 'create journal LVs manually')) + 'create journal LVs manually') tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret tags['ceph.encrypted'] = encrypted diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py new file mode 100644 index 0000000000000..c6b8a3d945508 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py @@ -0,0 +1,9 @@ +import pytest +from ceph_volume.devices.lvm import common + + +class TestCommon(object): + + def test_get_default_args_smoke(self): + default_args = common.get_default_args() + assert default_args -- 2.39.5