From 6696abf61ff6ca3e5295ce8f6f201dd6e35225de Mon Sep 17 00:00:00 2001 From: Jan Fajerski Date: Tue, 23 Jun 2020 16:58:46 +0200 Subject: [PATCH] ceph-volume batch: add ceph.conf mocking to pass tests Signed-off-by: Jan Fajerski (cherry picked from commit eef9dc7a1da6d5dde0d1b02b71301c1d7b7926a9) --- .../ceph_volume/devices/lvm/batch.py | 74 +++++++++++-------- .../tests/devices/lvm/test_batch.py | 10 ++- 2 files changed, 54 insertions(+), 30 deletions(-) diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py index b2f667d9d6538..e916142f14b18 100644 --- a/src/ceph-volume/ceph_volume/devices/lvm/batch.py +++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py @@ -1,7 +1,6 @@ import argparse import json import logging -import json from textwrap import dedent from ceph_volume import terminal, decorators from ceph_volume.util import disk, prompt_bool, arg_validators, templates @@ -71,6 +70,26 @@ def get_physical_osds(devices, args): return ret +def get_lvm_osds(lvs, args): + ''' + Goes through passed LVs and assigns planned osds + ''' + ret = [] + for lv in lvs: + if lv.used_by_ceph: + continue + osd_id = None + if args.osd_ids: + osd_id = args.osd_ids.pop() + osd = Batch.OSD("{}/{}".format(lv.vg_name, lv.lv_name), + 100.0, + disk.Size(b=int(lv.lv_size)), + 1, + osd_id) + ret.append(osd) + return ret + + def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args): requested_slots = getattr(args, '{}_slots'.format(type_)) if not requested_slots or requested_slots < fast_slots_per_device: @@ -117,6 +136,12 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar return ret +def get_lvm_fast_allocs(lvs): + return [("{}/{}".format(d.vg_name, d.lv_name), 100.0, + disk.Size(b=int(d.lv_size)), 1) for d in lvs if not + d.used_by_ceph] + + class Batch(object): help = 'Automatically size devices for multi-OSD provisioning with minimal interaction' @@ -391,37 +416,24 @@ class Batch(object): plan = self.get_deployment_layout(args, args.devices, args.journal_devices) return plan - def get_lvm_osds(self, lvs, args): - ''' - Goes through passed LVs and assigns planned osds - ''' - ret = [] - for lv in lvs: - if lv.used_by_ceph: - continue - osd_id = None - if args.osd_ids: - osd_id = args.osd_ids.pop() - osd = self.OSD("{}/{}".format(lv.vg_name, lv.lv_name), - 100.0, - disk.Size(b=int(lv.lv_size)), - 1, - osd_id) - ret.append(osd) - return ret - def get_deployment_layout(self, args, devices, fast_devices=[], very_fast_devices=[]): + ''' + The methods here are mostly just organization, error reporting and + setting up of (default) args. The hravy lifting code for the deployment + layout can be found in the static get_*_osds and get_*:fast_allocs + functions. + ''' plan = [] phys_devs = [d for d in devices if d.is_device] lvm_devs = [d.lvs[0] for d in list(set(devices) - set(phys_devs))] - mlogger.debug(('passed data_devices: {} physical, {}' - ' LVM').format(len(phys_devs), len(lvm_devs))) + mlogger.debug(('passed data devices: {} physical,' + ' {} LVM').format(len(phys_devs), len(lvm_devs))) plan.extend(get_physical_osds(phys_devs, args)) - plan.extend(self.get_lvm_osds(lvm_devs, args)) + plan.extend(get_lvm_osds(lvm_devs, args)) num_osds = len(plan) if num_osds == 0: @@ -463,11 +475,6 @@ class Batch(object): type_='block.wal') return plan - def get_lvm_fast_allocs(self, lvs): - return [("{}/{}".format(d.vg_name, d.lv_name), 100.0, - disk.Size(b=int(d.lv_size)), 1) for d in lvs if not - d.used_by_ceph] - def fast_allocations(self, devices, requested_osds, new_osds, type_): ret = [] if not devices: @@ -475,9 +482,14 @@ class Batch(object): phys_devs = [d for d in devices if d.is_device] lvm_devs = [d.lvs[0] for d in list(set(devices) - set(phys_devs))] + mlogger.debug(('passed {} devices: {} physical,' + ' {} LVM').format(type_, len(phys_devs), len(lvm_devs))) - ret.extend(self.get_lvm_fast_allocs(lvm_devs)) + ret.extend(get_lvm_fast_allocs(lvm_devs)) + # fill up uneven distributions across fast devices: 5 osds and 2 fast + # devices? create 3 slots on each device rather then deploying + # heterogeneous osds if (requested_osds - len(lvm_devs)) % len(phys_devs): fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs)) + 1 else: @@ -492,6 +504,10 @@ class Batch(object): return ret class OSD(object): + ''' + This class simply stores info about to-be-deployed OSDs and provides an + easy way to retrieve the necessary create arguments. + ''' def __init__(self, data_path, rel_size, abs_size, slots, id_): self.id_ = id_ self.data = (data_path, rel_size, abs_size, slots) diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py index 874acd6b70d47..1719bd6da4819 100644 --- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py +++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py @@ -1,4 +1,3 @@ -from functools import reduce import pytest import random from ceph_volume.devices.lvm import batch @@ -42,13 +41,16 @@ class TestBatch(object): def test_get_physical_osds_return_len(self, factory, mock_devices_available, + conf_ceph_stub, osds_per_device): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[]) osds = batch.get_physical_osds(mock_devices_available, args) assert len(osds) == len(mock_devices_available) * osds_per_device def test_get_physical_osds_rel_size(self, factory, mock_devices_available, + conf_ceph_stub, osds_per_device): args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[]) osds = batch.get_physical_osds(mock_devices_available, args) @@ -57,7 +59,9 @@ class TestBatch(object): def test_get_physical_osds_abs_size(self, factory, mock_devices_available, + conf_ceph_stub, osds_per_device): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[]) osds = batch.get_physical_osds(mock_devices_available, args) for osd, dev in zip(osds, mock_devices_available): @@ -69,7 +73,9 @@ class TestBatch(object): pass def test_get_physical_fast_allocs_length(self, factory, + conf_ceph_stub, mock_devices_available): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') args = factory(block_db_slots=None, get_block_db_size=None) fast = batch.get_physical_fast_allocs(mock_devices_available, 'block_db', 2, 2, args) @@ -83,7 +89,9 @@ class TestBatch(object): slots, occupied_prior, factory, + conf_ceph_stub, mock_device_generator): + conf_ceph_stub('[global]\nfsid=asdf-lkjh') occupied_prior = min(occupied_prior, slots) devs = [mock_device_generator() for _ in range(num_devs)] already_assigned = 0 -- 2.39.5