import argparse
import json
import logging
-import json
from textwrap import dedent
from ceph_volume import terminal, decorators
from ceph_volume.util import disk, prompt_bool, arg_validators, templates
return ret
+def get_lvm_osds(lvs, args):
+ '''
+ Goes through passed LVs and assigns planned osds
+ '''
+ ret = []
+ for lv in lvs:
+ if lv.used_by_ceph:
+ continue
+ osd_id = None
+ if args.osd_ids:
+ osd_id = args.osd_ids.pop()
+ osd = Batch.OSD("{}/{}".format(lv.vg_name, lv.lv_name),
+ 100.0,
+ disk.Size(b=int(lv.lv_size)),
+ 1,
+ osd_id)
+ ret.append(osd)
+ return ret
+
+
def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, args):
requested_slots = getattr(args, '{}_slots'.format(type_))
if not requested_slots or requested_slots < fast_slots_per_device:
return ret
+def get_lvm_fast_allocs(lvs):
+ return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
+ disk.Size(b=int(d.lv_size)), 1) for d in lvs if not
+ d.used_by_ceph]
+
+
class Batch(object):
help = 'Automatically size devices for multi-OSD provisioning with minimal interaction'
plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
return plan
- def get_lvm_osds(self, lvs, args):
- '''
- Goes through passed LVs and assigns planned osds
- '''
- ret = []
- for lv in lvs:
- if lv.used_by_ceph:
- continue
- osd_id = None
- if args.osd_ids:
- osd_id = args.osd_ids.pop()
- osd = self.OSD("{}/{}".format(lv.vg_name, lv.lv_name),
- 100.0,
- disk.Size(b=int(lv.lv_size)),
- 1,
- osd_id)
- ret.append(osd)
- return ret
-
def get_deployment_layout(self, args, devices, fast_devices=[],
very_fast_devices=[]):
+ '''
+ The methods here are mostly just organization, error reporting and
+ setting up of (default) args. The hravy lifting code for the deployment
+ layout can be found in the static get_*_osds and get_*:fast_allocs
+ functions.
+ '''
plan = []
phys_devs = [d for d in devices if d.is_device]
lvm_devs = [d.lvs[0] for d in list(set(devices) -
set(phys_devs))]
- mlogger.debug(('passed data_devices: {} physical, {}'
- ' LVM').format(len(phys_devs), len(lvm_devs)))
+ mlogger.debug(('passed data devices: {} physical,'
+ ' {} LVM').format(len(phys_devs), len(lvm_devs)))
plan.extend(get_physical_osds(phys_devs, args))
- plan.extend(self.get_lvm_osds(lvm_devs, args))
+ plan.extend(get_lvm_osds(lvm_devs, args))
num_osds = len(plan)
if num_osds == 0:
type_='block.wal')
return plan
- def get_lvm_fast_allocs(self, lvs):
- return [("{}/{}".format(d.vg_name, d.lv_name), 100.0,
- disk.Size(b=int(d.lv_size)), 1) for d in lvs if not
- d.used_by_ceph]
-
def fast_allocations(self, devices, requested_osds, new_osds, type_):
ret = []
if not devices:
phys_devs = [d for d in devices if d.is_device]
lvm_devs = [d.lvs[0] for d in list(set(devices) -
set(phys_devs))]
+ mlogger.debug(('passed {} devices: {} physical,'
+ ' {} LVM').format(type_, len(phys_devs), len(lvm_devs)))
- ret.extend(self.get_lvm_fast_allocs(lvm_devs))
+ ret.extend(get_lvm_fast_allocs(lvm_devs))
+ # fill up uneven distributions across fast devices: 5 osds and 2 fast
+ # devices? create 3 slots on each device rather then deploying
+ # heterogeneous osds
if (requested_osds - len(lvm_devs)) % len(phys_devs):
fast_slots_per_device = int((requested_osds - len(lvm_devs)) / len(phys_devs)) + 1
else:
return ret
class OSD(object):
+ '''
+ This class simply stores info about to-be-deployed OSDs and provides an
+ easy way to retrieve the necessary create arguments.
+ '''
def __init__(self, data_path, rel_size, abs_size, slots, id_):
self.id_ = id_
self.data = (data_path, rel_size, abs_size, slots)
-from functools import reduce
import pytest
import random
from ceph_volume.devices.lvm import batch
def test_get_physical_osds_return_len(self, factory,
mock_devices_available,
+ conf_ceph_stub,
osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
osds = batch.get_physical_osds(mock_devices_available, args)
assert len(osds) == len(mock_devices_available) * osds_per_device
def test_get_physical_osds_rel_size(self, factory,
mock_devices_available,
+ conf_ceph_stub,
osds_per_device):
args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
osds = batch.get_physical_osds(mock_devices_available, args)
def test_get_physical_osds_abs_size(self, factory,
mock_devices_available,
+ conf_ceph_stub,
osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(data_slots=1, osds_per_device=osds_per_device, osd_ids=[])
osds = batch.get_physical_osds(mock_devices_available, args)
for osd, dev in zip(osds, mock_devices_available):
pass
def test_get_physical_fast_allocs_length(self, factory,
+ conf_ceph_stub,
mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
args = factory(block_db_slots=None, get_block_db_size=None)
fast = batch.get_physical_fast_allocs(mock_devices_available,
'block_db', 2, 2, args)
slots,
occupied_prior,
factory,
+ conf_ceph_stub,
mock_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
occupied_prior = min(occupied_prior, slots)
devs = [mock_device_generator() for _ in range(num_devs)]
already_assigned = 0