action='store_true',
help='Only prepare all OSDs, do not activate',
)
+ parser.add_argument(
+ '--osd-ids',
+ nargs='*',
+ default=[],
+ help='Reuse existing OSD ids',
+ )
self.args = parser.parse_args(argv)
self.parser = parser
from ceph_volume.devices.lvm.create import Create
from ceph_volume.devices.lvm.prepare import Prepare
from ceph_volume.util import templates
+from ceph_volume.util.prepare import osd_id_available
from ceph_volume.exceptions import SizeAllocationError
for osd in self.computed['osds']:
string += templates.osd_header
+ if 'osd_id' in osd:
+ string += templates.osd_reused_id.format(
+ id_=osd['osd_id'])
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
# make sure that data devices do not have any LVs
validators.no_lvm_membership(self.data_devs)
+ if self.osd_ids:
+ self._validate_osd_ids()
+
def compute(self):
"""
Go through the rules needed to properly size the lvs, return
if self.args.crush_device_class:
command.extend(['--crush-device-class', self.args.crush_device_class])
+ if self.osd_ids:
+ command.extend(['--osd-id', self.osd_ids.pop(0)])
+
if self.args.prepare:
Prepare(command).main()
else:
string += templates.osd_component_titles
for osd in self.computed['osds']:
string += templates.osd_header
+ if 'osd_id' in osd:
+ string += templates.osd_reused_id.format(
+ id_=osd['osd_id'])
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
osd['block.wal']['human_readable_size'] = str(self.block_wal_size)
osd['block.wal']['percentage'] = self.wal_vg_extents['percentages']
+ if self.osd_ids:
+ osd['osd_id'] = self.osd_ids.pop(0)
+
osds.append(osd)
self.computed['changed'] = len(osds) > 0
command.append('--no-systemd')
if self.args.crush_device_class:
command.extend(['--crush-device-class', self.args.crush_device_class])
+ if 'osd_id' in osd:
+ command.extend(['--osd-id', osd['osd_id']])
if self.args.prepare:
Prepare(command).main()
if self.wal_devs:
self._validate_wal_devs()
+ if self.osd_ids:
+ self._validate_osd_ids()
+
def _validate_db_devs(self):
# do not allow non-common VG to continue
validators.has_common_vg(self.db_or_journal_devs)
self.block_wal_size,
)
raise RuntimeError(msg)
+
for osd in self.computed['osds']:
string += templates.osd_header
+ if 'osd_id' in osd:
+ string += templates.osd_reused_id.format(
+ id_=osd['osd_id'])
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
# make sure that data devices do not have any LVs
validators.no_lvm_membership(self.data_devs)
+ if self.osd_ids:
+ self._validate_osd_ids()
+
def compute(self):
"""
Go through the rules needed to properly size the lvs, return
osd['journal']['size'] = journal_size.b.as_int()
osd['journal']['percentage'] = int(100 - data_percentage)
osd['journal']['human_readable_size'] = str(journal_size)
+
+ if self.osd_ids:
+ osd['osd_id'] = self.osd_ids.pop()
+
osds.append(osd)
self.computed['changed'] = len(osds) > 0
command.append('--no-systemd')
if self.args.crush_device_class:
command.extend(['--crush-device-class', self.args.crush_device_class])
+ if 'osd_id' in osd:
+ command.extend(['--osd-id', osd['osd_id']])
if self.args.prepare:
Prepare(command).main()
for osd in self.computed['osds']:
string += templates.osd_header
+ if 'osd_id' in osd:
+ string += templates.osd_reused_id.format(
+ id_=osd['osd_id'])
string += templates.osd_component.format(
_type='[data]',
path=osd['data']['path'],
)
raise RuntimeError(msg)
+ if self.osd_ids:
+ self._validate_osd_ids()
+
def compute(self):
"""
Go through the rules needed to properly size the lvs, return
osd['journal']['size'] = self.journal_size.b.as_int()
osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
osd['journal']['human_readable_size'] = str(self.journal_size)
+
+ if self.osd_ids:
+ osd['osd_id'] = self.osd_ids.pop(0)
+
osds.append(osd)
self.computed['changed'] = len(osds) > 0
command.append('--no-systemd')
if self.args.crush_device_class:
command.extend(['--crush-device-class', self.args.crush_device_class])
+ if 'osd_id' in osd:
+ command.extend(['--osd-id', osd['osd_id']])
if self.args.prepare:
Prepare(command).main()
import json
+from ceph_volume.util.prepare import osd_id_available
class Strategy(object):
empty list for wal_devs.
'''
self.args = args
+ self.osd_ids = args.osd_ids
self.osds_per_device = args.osds_per_device
self.devices = data_devs + wal_devs + db_or_journal_devs
self.data_devs = data_devs
report['filtered_devices'] = filtered_devices
print(json.dumps(self.computed, indent=4, sort_keys=True))
+ def _validate_osd_ids(self):
+ unavailable_ids = [id_ for id_ in self.osd_ids if
+ not osd_id_available(id_)]
+ if unavailable_ids:
+ msg = ("Not all specfied OSD ids are available: {}"
+ "unavailable").format(",".join(unavailable_ids))
+ raise RuntimeError(msg)
+
@property
def total_osds(self):
return len(self.data_devs) * self.osds_per_device
class TestSingleType(object):
def test_hdd_device_is_large_enough(self, fakedevice, factory):
- args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ block_db_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
]
assert computed_osd['data']['path'] == '/dev/sda'
def test_sdd_device_is_large_enough(self, fakedevice, factory):
- args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ block_db_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
]
assert computed_osd['data']['path'] == '/dev/sda'
def test_device_cannot_have_many_osds_per_device(self, fakedevice, factory):
- args = factory(filtered_devices=[], osds_per_device=3, block_db_size=None)
+ args = factory(filtered_devices=[], osds_per_device=3,
+ block_db_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
]
assert 'Unable to use device 5.66 GB /dev/sda' in str(error)
def test_device_is_lvm_member_fails(self, fakedevice, factory):
- args = factory(filtered_devices=[], osds_per_device=1, block_db_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ block_db_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000))
]
# 3GB block.db in ceph.conf
conf_ceph(get_safe=lambda *a: 3147483640)
args = factory(filtered_devices=[], osds_per_device=1,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
devices = [ssd, hdd]
# 7GB block.db in ceph.conf
conf_ceph(get_safe=lambda *a: 7747483640)
args = factory(filtered_devices=[], osds_per_device=1,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
devices = [ssd, hdd]
# 3GB block.db in ceph.conf
conf_ceph(get_safe=lambda *a: 3147483640)
args = factory(filtered_devices=[], osds_per_device=2,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
devices = [ssd, hdd]
def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: None)
args = factory(filtered_devices=[], osds_per_device=1,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
devices = [ssd, hdd]
def test_multi_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: None)
args = factory(filtered_devices=[], osds_per_device=2,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60073740000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=60073740000))
devices = [ssd, hdd]
def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: None)
args = factory(filtered_devices=[], osds_per_device=2,
- block_db_size=None, block_wal_size=None)
+ block_db_size=None, block_wal_size=None,
+ osd_ids=[])
ssd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000))
hdd = fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
devices = [ssd, hdd]
def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=12073740000))
]
def test_hdd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
]
def test_ssd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=12073740000))
]
def test_ssd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
]
def test_ssd_device_multi_osd(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=4, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=4,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000))
]
def test_hdd_device_multi_osd(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=4, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=4,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000))
]
def test_device_is_lvm_member_fails(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=12073740000))
]
def test_hdd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
]
def test_ssd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
]
def test_minimum_size_is_not_met(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '7120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
def test_hdd_device_is_lvm_member_fails(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(used_by_ceph=False, is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
fakedevice(used_by_ceph=False, is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000))
])
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [ssd, hdd]
result = filestore.MixedType.with_auto_devices(args, devices).computed['osds'][0]
assert result['journal']['path'] == 'vg: fast'
])
conf_ceph(get_safe=lambda *a: '5120')
- args = factory(filtered_devices=[], osds_per_device=1, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=1,
+ journal_size=None, osd_ids=[])
devices = [ssd1, ssd2, hdd]
with pytest.raises(RuntimeError) as error:
filestore.MixedType.with_auto_devices(args, devices)
def test_ssd_device_fails_multiple_osds(self, stub_vgs, fakedevice, factory, conf_ceph):
conf_ceph(get_safe=lambda *a: '15120')
- args = factory(filtered_devices=[], osds_per_device=2, journal_size=None)
+ args = factory(filtered_devices=[], osds_per_device=2,
+ journal_size=None, osd_ids=[])
devices = [
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000)),
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000))
Type Path LV Size % of device"""
+osd_reused_id = """
+ OSD id {id_: <55}"""
+
+
osd_component = """
{_type: <15} {path: <55} {size: <15} {percent}%"""