Newer versions of LVM allow ``--reportformat=json``, but older versions,
like the one included in Xenial do not. LVM has the ability to filter and
format its output so we assume the output will be in a format this parser
- can handle (using ',' as a delimiter)
+ can handle (using ';' as a delimiter)
:param fields: A string, possibly using ',' to group many items, as it
would be used on the CLI
# splitting on ';' because that is what the lvm call uses as
# '--separator'
output_items = [i.strip() for i in line.split(';')]
- # map the output to the fiels
+ # map the output to the fields
report.append(
dict(zip(field_items, output_items))
)
To normalize sizing, the units are forced in 'g' which is equivalent to
gigabytes, which uses multiples of 1024 (as opposed to 1000)
"""
+ #TODO add vg_extent_size here to have that available in VolumeGroup class
fields = 'vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free,vg_free_count'
stdout, stderr, returncode = process.call(
['vgs', '--noheadings', '--readonly', '--units=g', '--separator=";"', '-o', fields],
return vgs.get(vg_name=vg_name, vg_tags=vg_tags)
+def get_device_vgs(device, name_prefix=''):
+ fields = 'vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free,vg_free_count'
+ stdout, stderr, returncode = process.call(
+ ['pvs', '--noheadings', '--readonly', '--units=g', '--separator=";"',
+ '-o', fields, device],
+ verbose_on_failure=False
+ )
+ vgs = _output_parser(stdout, fields)
+ return [VolumeGroup(**vg) for vg in vgs]
+
+
+
#################################
#
# Code for LVM Logical Volumes
return lvs[0]
-def create_lv(name, group, extents=None, size=None, tags=None, uuid_name=False, pv=None):
+def create_lv(name_prefix, uuid, vg=None, device=None, extents=None, size=None, tags=None):
"""
Create a Logical Volume in a Volume Group. Command looks like::
lvcreate -L 50G -n gfslv vg0
- ``name``, ``group``, are required. If ``size`` is provided it must follow
+ ``name_prefix`` is required. If ``size`` is provided it must follow
lvm's size notation (like 1G, or 20M). Tags are an optional dictionary and is expected to
conform to the convention of prefixing them with "ceph." like::
{"ceph.block_device": "/dev/ceph/osd-1"}
- :param uuid_name: Optionally combine the ``name`` with UUID to ensure uniqueness
- """
- if uuid_name:
- name = '%s-%s' % (name, uuid.uuid4())
- if tags is None:
- tags = {
- "ceph.osd_id": "null",
- "ceph.type": "null",
- "ceph.cluster_fsid": "null",
- "ceph.osd_fsid": "null",
- }
+ :param name_prefix: name prefix for the LV, typically somehting like ceph-osd-block
+ :param uuid: UUID to ensure uniqueness; is combined with name_prefix to
+ form the LV name
+ :param vg: optional, pass an existing VG to create LV
+ :param device: optional, device to use. Either device of vg must be passed
+ :param extends: optional, how many lvm extends to use
+ :param size: optional, LV size, must follow lvm's size notation, supersedes
+ extends
+ :param tags: optional, a dict of lvm tags to set on the LV
+ """
+ name = '{}-{}'.format(name_prefix, uuid)
+ if not vg:
+ if not device:
+ raise RuntimeError("Must either specify vg or device, none given")
+ # check if a vgs starting with ceph already exists
+ vgs = get_device_vgs(device, 'ceph')
+ if vgs:
+ vg = vgs[0].vg_name
+ else:
+ # create on if not
+ vg = create_vg(device, name_prefix='ceph').vg_name
+ assert(vg)
- # XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
- type_path_tag = {
- 'journal': 'ceph.journal_device',
- 'data': 'ceph.data_device',
- 'block': 'ceph.block_device',
- 'wal': 'ceph.wal_device',
- 'db': 'ceph.db_device',
- 'lockbox': 'ceph.lockbox_device', # XXX might not ever need this lockbox sorcery
- }
if size:
command = [
'lvcreate',
'--yes',
'-L',
- '%s' % size,
- '-n', name, group
+ '{}'.format(size),
+ '-n', name, vg
]
elif extents:
command = [
'lvcreate',
'--yes',
'-l',
- '%s' % extents,
- '-n', name, group
+ '{}'.format(extents),
+ '-n', name, vg
]
# create the lv with all the space available, this is needed because the
# system call is different for LVM
'--yes',
'-l',
'100%FREE',
- '-n', name, group
+ '-n', name, vg
]
- if pv:
- command.append(pv)
process.run(command)
- lv = get_lv(lv_name=name, vg_name=group)
- lv.set_tags(tags)
+ lv = get_lv(lv_name=name, vg_name=vg)
+ if tags is None:
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
# when creating a distinct type, the caller doesn't know what the path will
# be so this function will set it after creation using the mapping
+ # XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
+ type_path_tag = {
+ 'journal': 'ceph.journal_device',
+ 'data': 'ceph.data_device',
+ 'block': 'ceph.block_device',
+ 'wal': 'ceph.wal_device',
+ 'db': 'ceph.db_device',
+ 'lockbox': 'ceph.lockbox_device', # XXX might not ever need this lockbox sorcery
+ }
path_tag = type_path_tag.get(tags.get('ceph.type'))
if path_tag:
- lv.set_tags(
- {path_tag: lv.lv_path}
- )
+ tags.update({path_tag: lv.lv_path})
+
+ lv.set_tags(tags)
+
return lv
return len(lvs) > 0
return False
+def get_lv_by_name(name):
+ stdout, stderr, returncode = process.call(
+ ['lvs', '--noheadings', '-o', LV_FIELDS, '-S',
+ 'lv_name={}'.format(name)],
+ verbose_on_failure=False
+ )
+ lvs = _output_parser(stdout, LV_FIELDS)
+ return [Volume(**lv) for lv in lvs]
def get_lv(lv_name=None, vg_name=None, lv_path=None, lv_uuid=None, lv_tags=None, lvs=None):
"""
for part in range(0, sizing['parts']):
size = sizing['sizes']
extents = sizing['extents']
- lv_name = '%s-%s' % (name_prefix, uuid.uuid4())
lvs.append(
- create_lv(lv_name, volume_group.name, extents=extents, tags=tags)
+ create_lv(name_prefix, uuid.uuid4(), vg=volume_group.name, extents=extents, tags=tags)
)
return lvs
if not device_uuid:
return None
- device_lv = lvs.get(lv_uuid=device_uuid)
+ device_lv = lvs.get(lv_tags={'ceph.type': device_type})
if device_lv:
if is_encrypted:
encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid)
Create an OSD by assigning an ID and FSID, registering them with the
cluster with an ID and FSID, formatting and mounting the volume, adding
all the metadata to the logical volumes using LVM tags, and starting
- the OSD daemon.
+ the OSD daemon. This is a convinience command that combines the prepare
+ and activate steps.
- Existing logical volume (lv) or device:
+ Encryption is supported via dmcrypt and the --dmcrypt flag.
- ceph-volume lvm create --data {vg name/lv name} --journal /path/to/device
+ Existing logical volume (lv):
- Or:
+ ceph-volume lvm create --data {vg/lv}
- ceph-volume lvm create --data {vg name/lv name} --journal {vg name/lv name}
+ Existing block device (a logical volume will be created):
+ ceph-volume lvm create --data /path/to/device
+
+ Optionally, can consume db and wal block devices, partitions or logical
+ volumes. A device will get a logical volume, partitions and existing
+ logical volumes will be used as is:
+
+ ceph-volume lvm create --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
""")
parser = create_parser(
prog='ceph-volume lvm create',
:param argument: The command-line value that will need to be split to
retrieve the actual lv
"""
+ #TODO is this efficient?
try:
vg_name, lv_name = argument.split('/')
except (ValueError, AttributeError):
tags['ceph.%s_uuid' % device_type] = uuid
tags['ceph.%s_device' % device_type] = path
lv.set_tags(tags)
+ elif disk.is_device(device_name):
+ # We got a disk, create an lv
+ lv_type = "osd-{}".format(device_type)
+ uuid = system.generate_uuid()
+ lv = api.create_lv(
+ lv_type,
+ uuid,
+ device=device_name,
+ tags={'ceph.type': device_type})
+ path = lv.lv_path
+ tags['ceph.{}_uuid'.format(device_type)] = uuid
+ tags['ceph.{}_device'.format(device_type)] = path
+ lv.set_tags(tags)
else:
# otherwise assume this is a regular disk partition
uuid = self.get_ptuuid(device_name)
tags['ceph.%s_device' % device_type] = path
return path, uuid, tags
- def prepare_device(self, arg, device_type, cluster_fsid, osd_fsid):
+ def prepare_device(self, device, device_type, osd_uuid):
"""
Check if ``arg`` is a device or partition to create an LV out of it
with a distinct volume group name, assigning LV tags on it and
:param arg: The value of ``--data`` when parsing args
:param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore)
- :param cluster_fsid: The cluster fsid/uuid
- :param osd_fsid: The OSD fsid/uuid
+ :param osd_uuid: The OSD uuid
"""
- if disk.is_partition(arg) or disk.is_device(arg):
+ if disk.is_partition(device) or disk.is_device(device):
# we must create a vg, and then a single lv
- vg = api.create_vg(arg)
- lv_name = "osd-%s-%s" % (device_type, osd_fsid)
+ lv_name_prefix = "osd-{}".format(device_type)
return api.create_lv(
- lv_name,
- vg.name, # the volume group
+ lv_name_prefix,
+ osd_uuid,
+ device=device,
tags={'ceph.type': device_type})
else:
error = [
- 'Cannot use device (%s).' % arg,
+ 'Cannot use device ({}).'.format(device),
'A vg/lv path or an existing device is needed']
raise RuntimeError(' '.join(error))
- raise RuntimeError('no data logical volume found with: %s' % arg)
+ raise RuntimeError('no data logical volume found with: {}'.format(device))
def safe_prepare(self, args=None):
"""
'ceph.crush_device_class': crush_device_class,
}
if self.args.filestore:
+ #TODO: allow auto creation of journal on passed device, only works
+ # when physical device is passed, not LV
if not self.args.journal:
raise RuntimeError('--journal is required when using --filestore')
data_lv = self.get_lv(self.args.data)
if not data_lv:
- data_lv = self.prepare_device(self.args.data, 'data', cluster_fsid, osd_fsid)
+ data_lv = self.prepare_device(self.args.data, 'data', osd_fsid)
tags['ceph.data_device'] = data_lv.lv_path
tags['ceph.data_uuid'] = data_lv.lv_uuid
elif self.args.bluestore:
block_lv = self.get_lv(self.args.data)
if not block_lv:
- block_lv = self.prepare_device(self.args.data, 'block', cluster_fsid, osd_fsid)
+ block_lv = self.prepare_device(self.args.data, 'block', osd_fsid)
tags['ceph.block_device'] = block_lv.lv_path
tags['ceph.block_uuid'] = block_lv.lv_uuid
ceph-volume lvm prepare --data {vg/lv}
- Existing block device, that will be made a group and logical volume:
+ Existing block device (a logical volume will be created):
ceph-volume lvm prepare --data /path/to/device
- Optionally, can consume db and wal partitions or logical volumes:
+ Optionally, can consume db and wal devices, partitions or logical
+ volumes. A device will get a logical volume, partitions and existing
+ logical volumes will be used as is:
- ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {vg/lv}
+ ceph-volume lvm prepare --data {vg/lv} --block.wal {partition} --block.db {/path/to/device}
""")
parser = prepare_parser(
prog='ceph-volume lvm prepare',
from .strategies import MixedStrategy
from ceph_volume.devices.lvm.create import Create
from ceph_volume.devices.lvm.prepare import Prepare
-from ceph_volume.util import templates
+from ceph_volume.util import templates, system
from ceph_volume.exceptions import SizeAllocationError
data_path = osd['data']['path']
data_vg = data_vgs[data_path]
data_lv_extents = data_vg.sizing(parts=self.osds_per_device)['extents']
+ data_uuid = system.generate_uuid()
data_lv = lvm.create_lv(
- 'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
- )
+ 'osd-block', data_uuid, vg=data_vg.name, extents=data_lv_extents)
command = [
'--bluestore',
'--data', "%s/%s" % (data_lv.vg_name, data_lv.name),
]
if 'block.db' in osd:
+ db_uuid = system.generate_uuid()
db_lv = lvm.create_lv(
- 'osd-block-db', db_vg.name, extents=db_lv_extents, uuid_name=True
- )
+ 'osd-block-db', db_uuid, vg=db_vg.name, extents=db_lv_extents)
command.extend([ '--block.db',
'{}/{}'.format(db_lv.vg_name, db_lv.name)])
if 'block.wal' in osd:
+ wal_uuid = system.generate_uuid()
wal_lv = lvm.create_lv(
- 'osd-block-wal', wal_vg.name, extents=wal_lv_extents, uuid_name=True
- )
+ 'osd-block-wal', wal_uuid, vg=wal_vg.name, extents=wal_lv_extents)
command.extend(
['--block.wal',
'{}/{}'.format(wal_lv.vg_name, wal_lv.name)
from .strategies import MixedStrategy
from ceph_volume.devices.lvm.create import Create
from ceph_volume.devices.lvm.prepare import Prepare
-from ceph_volume.util import templates
+from ceph_volume.util import templates, system
from ceph_volume.exceptions import SizeAllocationError
device_vg = device_vgs[data_path]
data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
+ data_uuid = system.generate_uuid()
data_lv = lvm.create_lv(
- 'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True
- )
+ 'osd-data', data_uuid, vg=device_vg.name, extents=data_lv_extents)
+ journal_uuid = system.generate_uuid()
journal_lv = lvm.create_lv(
- 'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True
- )
+ 'osd-journal', journal_uuid, vg=device_vg.name, extents=journal_lv_extents)
command = ['--filestore', '--data']
command.append('%s/%s' % (device_vg.name, data_lv.name))
data_path = osd['data']['path']
data_vg = data_vgs[data_path]
data_lv_extents = data_vg.sizing(parts=1)['extents']
+ data_uuid = system.generate_uuid()
data_lv = lvm.create_lv(
- 'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True
- )
+ 'osd-data', data_uuid, vg=data_vg.name, extents=data_lv_extents)
+ journal_uuid = system.generate_uuid()
journal_lv = lvm.create_lv(
- 'osd-journal', journal_vg.name, size=journal_size, uuid_name=True
- )
+ 'osd-journal', journal_uuid, vg=journal_vg.name, size=journal_size)
command = ['--filestore', '--data']
command.append('%s/%s' % (data_vg.name, data_lv.name))
def setup(self):
self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
- def test_uses_size(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume)
- api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'})
- expected = ['lvcreate', '--yes', '-L', '5G', '-n', 'foo', 'foo_group']
- assert capture.calls[0]['args'][0] == expected
-
- def test_with_pv(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume)
- api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'}, pv='/path')
- expected = ['lvcreate', '--yes', '-L', '5G', '-n', 'foo', 'foo_group', '/path']
- assert capture.calls[0]['args'][0] == expected
-
- def test_calls_to_set_type_tag(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume)
- api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'})
- ceph_tag = ['lvchange', '--addtag', 'ceph.type=data', '/path']
- assert capture.calls[1]['args'][0] == ceph_tag
-
- def test_calls_to_set_data_tag(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume)
- api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'})
- data_tag = ['lvchange', '--addtag', 'ceph.data_device=/path', '/path']
- assert capture.calls[2]['args'][0] == data_tag
-
- def test_uses_uuid(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- monkeypatch.setattr(api, 'get_lv', lambda *a, **kw: self.foo_volume)
- api.create_lv('foo', 'foo_group', size='5G', tags={'ceph.type': 'data'}, uuid_name=True)
- result = capture.calls[0]['args'][0][5]
- assert result.startswith('foo-')
- assert len(result) == 40
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_uses_size(self, m_get_lv, m_call, m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg='foo_group', size='5G', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-L', '5G', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_uses_extents(self, m_get_lv, m_call, m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg='foo_group', extents='50', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_uses_all(self, m_get_lv, m_call, m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg='foo_group', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_calls_to_set_tags_default(self, m_get_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg='foo_group', size='5G')
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_calls_to_set_tags_arg(self, m_get_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg='foo_group', size='5G', tags={'ceph.type': 'data'})
+ tags = {
+ "ceph.type": "data",
+ "ceph.data_device": "/path"
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_device_vgs')
+ @patch('ceph_volume.api.lvm.create_vg')
+ @patch('ceph_volume.api.lvm.get_lv')
+ def test_create_vg(self, m_get_lv, m_create_vg, m_get_device_vgs, m_call,
+ m_run, monkeypatch):
+ m_get_lv.return_value = self.foo_volume
+ m_get_device_vgs.return_value = []
+ api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'})
+ m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
class TestTags(object):
def test_cannot_use_device(self):
with pytest.raises(RuntimeError) as error:
lvm.prepare.Prepare([]).prepare_device(
- '/dev/var/foo', 'data', 'asdf', '0')
+ '/dev/var/foo', 'data', '0')
assert 'Cannot use device (/dev/var/foo)' in str(error.value)
assert 'A vg/lv path or an existing device is needed' in str(error.value)