import argparse
from ceph_volume import terminal
-from ceph_volume.objectstore.lvmbluestore import LvmBlueStore as LVMActivate
-from ceph_volume.objectstore.rawbluestore import RawBlueStore as RAWActivate
+from ceph_volume.objectstore.lvm import Lvm as LVMActivate
+from ceph_volume.objectstore.raw import Raw as RAWActivate
from ceph_volume.devices.simple.activate import Activate as SimpleActivate
def _execute(self, plan: List["OSD"]) -> None:
defaults = common.get_default_args()
global_args = [
+ 'objectstore',
'bluestore',
'dmcrypt',
'with_tpm',
return plan
requested_osds = self.args.osds_per_device * len(phys_devs) + len(lvm_devs)
- if self.args.objectstore == 'bluestore':
- fast_type = 'block_db'
+ fast_type = 'block_db'
fast_allocations = self.fast_allocations(fast_devices,
requested_osds,
num_osds,
Args:
path (str): The path to the device to zap.
"""
- zap_bluestore(path)
+ if disk.has_bluestore_label(path):
+ zap_bluestore(path)
wipefs(path)
zap_data(path)
-from . import lvmbluestore
-from . import rawbluestore
+from . import lvm
+from . import raw
from typing import Any, Dict
+from enum import Enum
+class ObjectStore(str, Enum):
+ bluestore: str = 'bluestore'
+ seastore: str = 'seastore'
+
mapping: Dict[str, Any] = {
'LVM': {
- 'bluestore': lvmbluestore.LvmBlueStore
+ ObjectStore.bluestore: lvm.Lvm,
+ ObjectStore.seastore: lvm.Lvm
},
'RAW': {
- 'bluestore': rawbluestore.RawBlueStore
+ ObjectStore.bluestore: raw.Raw
}
}
self.osd_id: str = ''
self.osd_fsid: str = ''
self.cephx_lockbox_secret: str = ''
- self.objectstore: str = ''
+ self.objectstore: str = getattr(args, "objectstore", '')
self.osd_mkfs_cmd: List[str] = []
self.block_device_path: str = ''
self.dmcrypt_key: str = encryption_utils.create_dmcrypt_key()
self.with_tpm: int = int(getattr(self.args, 'with_tpm', False))
self.method: str = ''
+ self.osd_path: str = ''
+ self.key: Optional[str] = None
+ self.block_device_path: str = ''
+ self.wal_device_path: str = ''
+ self.db_device_path: str = ''
+ self.block_lv: Optional[Volume] = None
if getattr(self.args, 'dmcrypt', False):
self.encrypted = 1
if not self.with_tpm:
raise NotImplementedError()
def add_objectstore_opts(self) -> None:
- raise NotImplementedError()
+ """
+ Create the files for the OSD to function. A normal call will look like:
+
+ ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
+ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
+ --osd-data /var/lib/ceph/osd/ceph-0 \
+ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
+ --keyring /var/lib/ceph/osd/ceph-0/keyring \
+ --setuser ceph --setgroup ceph
+
+ In some cases it is required to use the keyring, when it is passed
+ in as a keyword argument it is used as part of the ceph-osd command
+ """
+
+ if self.wal_device_path:
+ self.osd_mkfs_cmd.extend(
+ ['--bluestore-block-wal-path', self.wal_device_path]
+ )
+ system.chown(self.wal_device_path)
+
+ if self.db_device_path:
+ self.osd_mkfs_cmd.extend(
+ ['--bluestore-block-db-path', self.db_device_path]
+ )
+ system.chown(self.db_device_path)
+
+ if self.get_osdspec_affinity():
+ self.osd_mkfs_cmd.extend(['--osdspec-affinity',
+ self.get_osdspec_affinity()])
+
+ def unlink_bs_symlinks(self) -> None:
+ for link_name in ['block', 'block.db', 'block.wal']:
+ link_path = os.path.join(self.osd_path, link_name)
+ if os.path.exists(link_path):
+ os.unlink(os.path.join(self.osd_path, link_name))
def prepare_osd_req(self, tmpfs: bool = True) -> None:
# create the directory
]
if self.cephx_secret is not None:
self.osd_mkfs_cmd.extend(['--keyfile', '-'])
- try:
- self.add_objectstore_opts()
- except NotImplementedError:
- logger.info("No specific objectstore options to add.")
+
+ self.add_objectstore_opts()
self.osd_mkfs_cmd.extend(self.supplementary_command)
return self.osd_mkfs_cmd
raise RuntimeError('Command failed with exit code %s: %s' %
(returncode, ' '.join(cmd)))
+ mapping: Dict[str, Any] = {'raw': ['data', 'block_db', 'block_wal'],
+ 'lvm': ['ceph.block_device', 'ceph.db_device', 'ceph.wal_device']}
+ if self.args.dmcrypt:
+ for dev_type in mapping[self.method]:
+ if self.method == 'raw':
+ path = self.args.__dict__.get(dev_type, None)
+ else:
+ if self.block_lv is not None:
+ path = self.block_lv.tags.get(dev_type, None)
+ else:
+ raise RuntimeError('Unexpected error while running bluestore mkfs.')
+ if path is not None:
+ encryption_utils.CephLuks2(path).config_luks2({'subsystem': f'ceph_fsid={self.osd_fsid}'})
+
def activate(self) -> None:
raise NotImplementedError()
device, '--unlock-key-file', temp_file_name,
'--tpm2-pcrs', '9+12', '--wipe-slot', 'tpm2']
process.call(cmd, run_on_host=True, show_command=True)
+
+ def add_label(self, key: str,
+ value: str,
+ device: str) -> None:
+ """Add a label to a BlueStore device.
+ Args:
+ key (str): The name of the label being added.
+ value (str): Value of the label being added.
+ device (str): The path of the BlueStore device.
+ Raises:
+ RuntimeError: If `ceph-bluestore-tool` command doesn't success.
+ """
+
+ command: List[str] = ['ceph-bluestore-tool',
+ 'set-label-key',
+ '-k',
+ key,
+ '-v',
+ value,
+ '--dev',
+ device]
+
+ _, err, rc = process.call(command,
+ terminal_verbose=True,
+ show_command=True)
+ if rc:
+ raise RuntimeError(f"Can't add BlueStore label '{key}' to device {device}: {err}")
\ No newline at end of file
+++ /dev/null
-import logging
-import os
-from .baseobjectstore import BaseObjectStore
-from ceph_volume.util import system
-from ceph_volume.util.encryption import CephLuks2
-from ceph_volume import process
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
-
-if TYPE_CHECKING:
- import argparse
- from ceph_volume.api.lvm import Volume
-
-logger = logging.getLogger(__name__)
-
-
-class BlueStore(BaseObjectStore):
- def __init__(self, args: "argparse.Namespace") -> None:
- super().__init__(args)
- self.args: "argparse.Namespace" = args
- self.objectstore = 'bluestore'
- self.osd_id: str = ''
- self.osd_fsid: str = ''
- self.osd_path: str = ''
- self.key: Optional[str] = None
- self.block_device_path: str = ''
- self.wal_device_path: str = ''
- self.db_device_path: str = ''
- self.block_lv: Optional[Volume] = None
-
- def add_objectstore_opts(self) -> None:
- """
- Create the files for the OSD to function. A normal call will look like:
-
- ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
- --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
- --osd-data /var/lib/ceph/osd/ceph-0 \
- --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
- --keyring /var/lib/ceph/osd/ceph-0/keyring \
- --setuser ceph --setgroup ceph
-
- In some cases it is required to use the keyring, when it is passed
- in as a keyword argument it is used as part of the ceph-osd command
- """
-
- if self.wal_device_path:
- self.osd_mkfs_cmd.extend(
- ['--bluestore-block-wal-path', self.wal_device_path]
- )
- system.chown(self.wal_device_path)
-
- if self.db_device_path:
- self.osd_mkfs_cmd.extend(
- ['--bluestore-block-db-path', self.db_device_path]
- )
- system.chown(self.db_device_path)
-
- if self.get_osdspec_affinity():
- self.osd_mkfs_cmd.extend(['--osdspec-affinity',
- self.get_osdspec_affinity()])
-
- def unlink_bs_symlinks(self) -> None:
- for link_name in ['block', 'block.db', 'block.wal']:
- link_path = os.path.join(self.osd_path, link_name)
- if os.path.exists(link_path):
- os.unlink(os.path.join(self.osd_path, link_name))
-
-
- def add_label(self, key: str,
- value: str,
- device: str) -> None:
- """Add a label to a BlueStore device.
- Args:
- key (str): The name of the label being added.
- value (str): Value of the label being added.
- device (str): The path of the BlueStore device.
- Raises:
- RuntimeError: If `ceph-bluestore-tool` command doesn't success.
- """
-
- command: List[str] = ['ceph-bluestore-tool',
- 'set-label-key',
- '-k',
- key,
- '-v',
- value,
- '--dev',
- device]
-
- _, err, rc = process.call(command,
- terminal_verbose=True,
- show_command=True)
- if rc:
- raise RuntimeError(f"Can't add BlueStore label '{key}' to device {device}: {err}")
-
- def osd_mkfs(self) -> None:
- super().osd_mkfs()
- mapping: Dict[str, Any] = {'raw': ['data', 'block_db', 'block_wal'],
- 'lvm': ['ceph.block_device', 'ceph.db_device', 'ceph.wal_device']}
- if self.args.dmcrypt:
- for dev_type in mapping[self.method]:
- if self.method == 'raw':
- path = self.args.__dict__.get(dev_type, None)
- else:
- if self.block_lv is not None:
- path = self.block_lv.tags.get(dev_type, None)
- else:
- raise RuntimeError('Unexpected error while running bluestore mkfs.')
- if path is not None:
- CephLuks2(path).config_luks2({'subsystem': f'ceph_fsid={self.osd_fsid}'})
--- /dev/null
+import json
+import logging
+import os
+from ceph_volume import conf, terminal, decorators, configuration, process
+from ceph_volume.api import lvm as api
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util import system, disk
+from ceph_volume.systemd import systemctl
+from ceph_volume.devices.lvm.common import rollback_osd
+from ceph_volume.devices.lvm.listing import direct_report
+from .baseobjectstore import BaseObjectStore
+from typing import Dict, Any, Optional, List, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+ from ceph_volume.api.lvm import Volume
+
+logger = logging.getLogger(__name__)
+
+
+class Lvm(BaseObjectStore):
+ def __init__(self, args: "argparse.Namespace") -> None:
+ super().__init__(args)
+ self.method = 'lvm'
+ self.tags: Dict[str, Any] = {}
+
+ def pre_prepare(self) -> None:
+ if self.encrypted and not self.with_tpm:
+ self.secrets['dmcrypt_key'] = self.dmcrypt_key
+
+ cluster_fsid = self.get_cluster_fsid()
+
+ self.osd_fsid = self.args.osd_fsid or system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if crush_device_class:
+ self.secrets['crush_device_class'] = crush_device_class
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(self.osd_fsid,
+ json.dumps(self.secrets),
+ osd_id=self.args.osd_id)
+ self.tags = {
+ 'ceph.osd_fsid': self.osd_fsid,
+ 'ceph.osd_id': self.osd_id,
+ 'ceph.cluster_fsid': cluster_fsid,
+ 'ceph.cluster_name': conf.cluster,
+ 'ceph.crush_device_class': crush_device_class,
+ 'ceph.osdspec_affinity': self.get_osdspec_affinity()
+ }
+
+ try:
+ vg_name, lv_name = self.args.data.split('/')
+ self.block_lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ self.block_lv = None
+
+ if not self.block_lv:
+ self.block_lv = self.prepare_data_device('block', self.osd_fsid)
+ self.block_device_path = self.block_lv.__dict__['lv_path']
+
+ self.tags['ceph.objectstore'] = self.objectstore
+ self.tags['ceph.block_device'] = self.block_lv.__dict__['lv_path']
+ self.tags['ceph.block_uuid'] = self.block_lv.__dict__['lv_uuid']
+ self.tags['ceph.cephx_lockbox_secret'] = self.cephx_lockbox_secret
+ self.tags['ceph.encrypted'] = self.encrypted
+ self.tags['ceph.with_tpm'] = 1 if self.with_tpm else 0
+ self.tags['ceph.vdo'] = api.is_vdo(self.block_lv.__dict__['lv_path'])
+
+ def prepare_data_device(self,
+ device_type: str,
+ osd_uuid: str) -> Optional["Volume"]:
+ """
+ Check if ``arg`` is a device or partition to create an LV out of it
+ with a distinct volume group name, assigning LV tags on it and
+ ultimately, returning the logical volume object. Failing to detect
+ a device or partition will result in error.
+
+ :param arg: The value of ``--data`` when parsing args
+ :param device_type: Usually ``block``
+ :param osd_uuid: The OSD uuid
+ """
+
+ device = self.args.data
+ if disk.is_partition(device) or disk.is_device(device):
+ # we must create a vg, and then a single lv
+ lv_name_prefix = "osd-{}".format(device_type)
+ kwargs = {
+ 'device': device,
+ 'tags': {'ceph.type': device_type},
+ 'slots': self.args.data_slots,
+ }
+ logger.debug('data device size: {}'.format(self.args.data_size))
+ if self.args.data_size != 0:
+ kwargs['size'] = self.args.data_size
+ return api.create_lv(
+ lv_name_prefix,
+ osd_uuid,
+ **kwargs)
+ else:
+ error = [
+ 'Cannot use device ({}).'.format(device),
+ 'A vg/lv path or an existing device is needed']
+ raise RuntimeError(' '.join(error))
+
+ def safe_prepare(self,
+ args: Optional["argparse.Namespace"] = None) -> None:
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `lvm create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args
+
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('lvm prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.osd_id)
+ raise
+ terminal.success("ceph-volume lvm prepare successful for: %s" %
+ self.args.data)
+
+ @decorators.needs_root
+ def prepare(self) -> None:
+ # 1/
+ # Need to be reworked (move it to the parent class + call super()? )
+ self.pre_prepare()
+
+ # 2/
+ self.setup_metadata_devices()
+ self.tags['ceph.type'] = 'block'
+ if self.block_lv is not None:
+ self.block_lv.set_tags(self.tags)
+
+ # 3/ encryption-only operations
+ if self.encrypted:
+ self.prepare_dmcrypt()
+
+ # 4/ osd_prepare req
+ self.prepare_osd_req()
+
+ # 5/ bluestore mkfs
+ # prepare the osd filesystem
+ self.osd_mkfs()
+
+ def prepare_dmcrypt(self) -> None:
+ # If encrypted, there is no need to create the lockbox keyring file
+ # because bluestore re-creates the files and does not have support
+ # for other files like the custom lockbox one. This will need to be
+ # done on activation. Format and open ('decrypt' devices) and
+ # re-assign the device and journal variables so that the rest of the
+ # process can use the mapper paths
+
+ device_types = ('block', 'db', 'wal')
+
+ for device_type in device_types:
+ attr_name: str = f'{device_type}_device_path'
+ path: str = self.__dict__[attr_name]
+ if path:
+ self.__dict__[attr_name] = self.luks_format_and_open(path,
+ device_type,
+ self.tags)
+
+ def luks_format_and_open(self,
+ device: str,
+ device_type: str,
+ tags: Dict[str, Any]) -> str:
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal devices are all the same
+ """
+ if not device:
+ return ''
+ tag_name = 'ceph.%s_uuid' % device_type
+ uuid = tags[tag_name]
+ # format data device
+ encryption_utils.luks_format(
+ self.dmcrypt_key,
+ device
+ )
+
+ if self.with_tpm:
+ self.enroll_tpm2(device)
+
+ encryption_utils.luks_open(
+ self.dmcrypt_key,
+ device,
+ uuid,
+ self.with_tpm)
+
+ return '/dev/mapper/%s' % uuid
+
+ def setup_metadata_devices(self) -> None:
+ """
+ Check if ``device`` is an lv, if so, set the tags, making sure to
+ update the tags with the lv_uuid and lv_path which the incoming tags
+ will not have.
+
+ If the device is not a logical volume, then retrieve the partition UUID
+ by querying ``blkid``
+ """
+ s: Dict[str, Any] = {
+ 'db': {
+ 'attr_map': 'db_device_path',
+ 'device_name': self.args.block_db,
+ 'device_size': self.args.block_db_size,
+ 'device_slots': self.args.block_db_slots,
+ },
+ 'wal': {
+ 'attr_map': 'wal_device_path',
+ 'device_name': self.args.block_wal,
+ 'device_size': self.args.block_wal_size,
+ 'device_slots': self.args.block_wal_slots,
+ }
+ }
+ for device_type, device_args in s.items():
+ device_name: str = device_args.get('device_name', None)
+ size: int = device_args.get('device_size')
+ slots: int = device_args.get('device_slots')
+ if device_name is None:
+ continue
+ _tags: Dict[str, Any] = self.tags.copy()
+ _tags['ceph.type'] = device_type
+ _tags['ceph.vdo'] = api.is_vdo(device_name)
+
+ try:
+ vg_name, lv_name = device_name.split('/')
+ lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ lv = None
+
+ if lv:
+ _tags['ceph.%s_uuid' % device_type] = lv.lv_uuid
+ _tags['ceph.%s_device' % device_type] = lv.lv_path
+ lv.set_tags(_tags)
+ elif disk.is_partition(device_name) or disk.is_device(device_name):
+ # We got a disk or a partition, create an lv
+ path = device_name
+ lv_type = "osd-{}".format(device_type)
+ name_uuid = system.generate_uuid()
+ kwargs = {
+ 'name_prefix': lv_type,
+ 'uuid': name_uuid,
+ 'vg': None,
+ 'device': device_name,
+ 'slots': slots,
+ 'extents': None,
+ 'size': None,
+ 'tags': _tags,
+ }
+ # TODO use get_block_db_size and co here to get configured size in
+ # conf file
+ if size != 0:
+ kwargs['size'] = size
+ # We do not create LV if this is a partition
+ if not disk.is_partition(device_name):
+ lv = api.create_lv(**kwargs)
+ if lv is not None:
+ path, lv_uuid = lv.lv_path, lv.lv_uuid
+ for key, value in {
+ f"ceph.{device_type}_uuid": lv_uuid,
+ f"ceph.{device_type}_device": path,
+ }.items():
+ _tags[key] = value
+ self.tags[key] = value
+ lv.set_tags(_tags)
+ setattr(self, f'{device_type}_device_path', path)
+
+ def get_osd_device_path(self,
+ osd_lvs: List["Volume"],
+ device_type: str,
+ dmcrypt_secret: str = '') -> Optional[str]:
+ """
+ ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we
+ can query LVs on system and fallback to querying the uuid if that is
+ not present.
+
+ Return a path if possible, failing to do that a ``None``, since some of
+ these devices are optional.
+ """
+ # TODO(guits): this should be moved in a new function get_device_uuid_from_lv()
+ osd_block_lv = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ if osd_block_lv:
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ logger.debug('Found block device (%s) with encryption: %s',
+ osd_block_lv.name, is_encrypted)
+ uuid_tag = 'ceph.%s_uuid' % device_type
+ device_uuid = osd_block_lv.tags.get(uuid_tag, '')
+ if not device_uuid:
+ return None
+
+ device_lv: Optional["Volume"] = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == device_type:
+ device_lv = lv
+ break
+ if device_lv:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret,
+ device_lv.__dict__['lv_path'],
+ device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return device_lv.__dict__['lv_path']
+
+ # this could be a regular device, so query it with blkid
+ physical_device = disk.get_device_from_partuuid(device_uuid)
+ if physical_device:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret,
+ physical_device,
+ device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return physical_device
+
+ raise RuntimeError('could not find %s with uuid %s' % (device_type,
+ device_uuid))
+
+ def _activate(self,
+ osd_lvs: List["Volume"],
+ no_systemd: bool = False,
+ no_tmpfs: bool = False) -> None:
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ else:
+ raise RuntimeError('could not find a bluestore OSD to activate')
+
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ dmcrypt_secret = ''
+ osd_id = osd_block_lv.tags['ceph.osd_id']
+ conf.cluster = osd_block_lv.tags['ceph.cluster_name']
+ osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
+ configuration.load_ceph_conf_path(
+ osd_block_lv.tags['ceph.cluster_name'])
+ configuration.load()
+
+ # mount on tmpfs the osd directory
+ self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(self.osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
+
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+ self.unlink_bs_symlinks()
+
+ # encryption is handled here, before priming the OSD dir
+ if is_encrypted:
+ osd_lv_path = '/dev/mapper/%s' % osd_block_lv.__dict__['lv_uuid']
+ lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
+ self.with_tpm = osd_block_lv.tags.get('ceph.with_tpm') == '1'
+ if not self.with_tpm:
+ encryption_utils.write_lockbox_keyring(osd_id,
+ osd_fsid,
+ lockbox_secret)
+ dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
+ lv_path: str = osd_block_lv.__dict__['lv_path']
+ if disk.has_holders(lv_path):
+ real_path_device = os.path.realpath(lv_path)
+ holders = disk.get_block_device_holders()
+
+ if real_path_device in holders.keys() and real_path_device in holders.values():
+ osd_lv_path = disk.get_lvm_mapper_path_from_dm(next(k for k, v in holders.items() if v == real_path_device))
+ else:
+ encryption_utils.luks_open(dmcrypt_secret,
+ osd_block_lv.__dict__['lv_path'],
+ osd_block_lv.__dict__['lv_uuid'],
+ with_tpm=self.with_tpm)
+ else:
+ osd_lv_path = osd_block_lv.__dict__['lv_path']
+
+ db_device_path = \
+ self.get_osd_device_path(osd_lvs, 'db',
+ dmcrypt_secret=dmcrypt_secret)
+ wal_device_path = \
+ self.get_osd_device_path(osd_lvs,
+ 'wal',
+ dmcrypt_secret=dmcrypt_secret)
+
+ # Once symlinks are removed, the osd dir can be 'primed again.
+ # chown first, regardless of what currently exists so that
+ # ``prime-osd-dir`` can succeed even if permissions are
+ # somehow messed up.
+ system.chown(self.osd_path)
+ objectstore = osd_block_lv.tags.get('ceph.objectstore', 'bluestore')
+ if objectstore == 'bluestore':
+ prime_command = [
+ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
+ 'prime-osd-dir', '--dev', osd_lv_path,
+ '--path', self.osd_path, '--no-mon-config']
+
+ process.run(prime_command)
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ process.run(['ln',
+ '-snf',
+ osd_lv_path,
+ os.path.join(self.osd_path, 'block')])
+ system.chown(os.path.join(self.osd_path, 'block'))
+ system.chown(self.osd_path)
+ if db_device_path:
+ destination = os.path.join(self.osd_path, 'block.db')
+ process.run(['ln', '-snf', db_device_path, destination])
+ system.chown(db_device_path)
+ system.chown(destination)
+ if wal_device_path:
+ destination = os.path.join(self.osd_path, 'block.wal')
+ process.run(['ln', '-snf', wal_device_path, destination])
+ system.chown(wal_device_path)
+ system.chown(destination)
+
+ if no_systemd is False:
+ # enable the ceph-volume unit for this OSD
+ systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
+
+ # enable the OSD
+ systemctl.enable_osd(osd_id)
+
+ # start the OSD
+ systemctl.start_osd(osd_id)
+ terminal.success("ceph-volume lvm activate successful for osd ID: %s" %
+ osd_id)
+
+ @decorators.needs_root
+ def activate_all(self) -> None:
+ listed_osds = direct_report()
+ osds = {}
+ for osd_id, devices in listed_osds.items():
+ # the metadata for all devices in each OSD will contain
+ # the FSID which is required for activation
+ for device in devices:
+ fsid = device.get('tags', {}).get('ceph.osd_fsid')
+ if fsid:
+ osds[fsid] = osd_id
+ break
+ if not osds:
+ terminal.warning('Was unable to find any OSDs to activate')
+ terminal.warning('Verify OSDs are present with '
+ '"ceph-volume lvm list"')
+ return
+ for osd_fsid, osd_id in osds.items():
+ if not self.args.no_systemd and systemctl.osd_is_active(osd_id):
+ terminal.warning(
+ 'OSD ID %s FSID %s process is active. '
+ 'Skipping activation' % (osd_id, osd_fsid)
+ )
+ else:
+ terminal.info('Activating OSD ID %s FSID %s' % (osd_id,
+ osd_fsid))
+ self.activate(self.args, osd_id=osd_id, osd_fsid=osd_fsid)
+
+ @decorators.needs_root
+ def activate(self,
+ args: Optional["argparse.Namespace"] = None,
+ osd_id: Optional[str] = None,
+ osd_fsid: Optional[str] = None) -> None:
+ """
+ :param args: The parsed arguments coming from the CLI
+ :param osd_id: When activating all, this gets populated with an
+ existing OSD ID
+ :param osd_fsid: When activating all, this gets populated with an
+ existing OSD FSID
+ """
+ osd_id = osd_id if osd_id else self.args.osd_id
+ osd_fsid = osd_fsid if osd_fsid else self.args.osd_fsid
+
+ if osd_id and osd_fsid:
+ tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
+ elif not osd_id and osd_fsid:
+ tags = {'ceph.osd_fsid': osd_fsid}
+ elif osd_id and not osd_fsid:
+ raise RuntimeError('could not activate osd.{}, please provide the '
+ 'osd_fsid too'.format(osd_id))
+ else:
+ raise RuntimeError('Please provide both osd_id and osd_fsid')
+ lvs = api.get_lvs(tags=tags)
+ if not lvs:
+ raise RuntimeError('could not find osd.%s with osd_fsid %s' %
+ (osd_id, osd_fsid))
+
+ self._activate(lvs, self.args.no_systemd, getattr(self.args,
+ 'no_tmpfs',
+ False))
+++ /dev/null
-import json
-import logging
-import os
-from ceph_volume import conf, terminal, decorators, configuration, process
-from ceph_volume.api import lvm as api
-from ceph_volume.util import prepare as prepare_utils
-from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util import system, disk
-from ceph_volume.systemd import systemctl
-from ceph_volume.devices.lvm.common import rollback_osd
-from ceph_volume.devices.lvm.listing import direct_report
-from .bluestore import BlueStore
-from typing import Dict, Any, Optional, List, TYPE_CHECKING
-
-if TYPE_CHECKING:
- import argparse
- from ceph_volume.api.lvm import Volume
-
-logger = logging.getLogger(__name__)
-
-
-class LvmBlueStore(BlueStore):
- def __init__(self, args: "argparse.Namespace") -> None:
- super().__init__(args)
- self.method = 'lvm'
- self.tags: Dict[str, Any] = {}
-
- def pre_prepare(self) -> None:
- if self.encrypted and not self.with_tpm:
- self.secrets['dmcrypt_key'] = self.dmcrypt_key
-
- cluster_fsid = self.get_cluster_fsid()
-
- self.osd_fsid = self.args.osd_fsid or system.generate_uuid()
- crush_device_class = self.args.crush_device_class
- if crush_device_class:
- self.secrets['crush_device_class'] = crush_device_class
- # reuse a given ID if it exists, otherwise create a new ID
- self.osd_id = prepare_utils.create_id(self.osd_fsid,
- json.dumps(self.secrets),
- osd_id=self.args.osd_id)
- self.tags = {
- 'ceph.osd_fsid': self.osd_fsid,
- 'ceph.osd_id': self.osd_id,
- 'ceph.cluster_fsid': cluster_fsid,
- 'ceph.cluster_name': conf.cluster,
- 'ceph.crush_device_class': crush_device_class,
- 'ceph.osdspec_affinity': self.get_osdspec_affinity()
- }
-
- try:
- vg_name, lv_name = self.args.data.split('/')
- self.block_lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- self.block_lv = None
-
- if not self.block_lv:
- self.block_lv = self.prepare_data_device('block', self.osd_fsid)
- self.block_device_path = self.block_lv.__dict__['lv_path']
-
- self.tags['ceph.block_device'] = self.block_lv.__dict__['lv_path']
- self.tags['ceph.block_uuid'] = self.block_lv.__dict__['lv_uuid']
- self.tags['ceph.cephx_lockbox_secret'] = self.cephx_lockbox_secret
- self.tags['ceph.encrypted'] = self.encrypted
- self.tags['ceph.with_tpm'] = 1 if self.with_tpm else 0
- self.tags['ceph.vdo'] = api.is_vdo(self.block_lv.__dict__['lv_path'])
-
- def prepare_data_device(self,
- device_type: str,
- osd_uuid: str) -> Optional["Volume"]:
- """
- Check if ``arg`` is a device or partition to create an LV out of it
- with a distinct volume group name, assigning LV tags on it and
- ultimately, returning the logical volume object. Failing to detect
- a device or partition will result in error.
-
- :param arg: The value of ``--data`` when parsing args
- :param device_type: Usually ``block``
- :param osd_uuid: The OSD uuid
- """
-
- device = self.args.data
- if disk.is_partition(device) or disk.is_device(device):
- # we must create a vg, and then a single lv
- lv_name_prefix = "osd-{}".format(device_type)
- kwargs = {
- 'device': device,
- 'tags': {'ceph.type': device_type},
- 'slots': self.args.data_slots,
- }
- logger.debug('data device size: {}'.format(self.args.data_size))
- if self.args.data_size != 0:
- kwargs['size'] = self.args.data_size
- return api.create_lv(
- lv_name_prefix,
- osd_uuid,
- **kwargs)
- else:
- error = [
- 'Cannot use device ({}).'.format(device),
- 'A vg/lv path or an existing device is needed']
- raise RuntimeError(' '.join(error))
-
- def safe_prepare(self,
- args: Optional["argparse.Namespace"] = None) -> None:
- """
- An intermediate step between `main()` and `prepare()` so that we can
- capture the `self.osd_id` in case we need to rollback
-
- :param args: Injected args, usually from `lvm create` which compounds
- both `prepare` and `create`
- """
- if args is not None:
- self.args = args
-
- try:
- self.prepare()
- except Exception:
- logger.exception('lvm prepare was unable to complete')
- logger.info('will rollback OSD ID creation')
- rollback_osd(self.osd_id)
- raise
- terminal.success("ceph-volume lvm prepare successful for: %s" %
- self.args.data)
-
- @decorators.needs_root
- def prepare(self) -> None:
- # 1/
- # Need to be reworked (move it to the parent class + call super()? )
- self.pre_prepare()
-
- # 2/
- self.setup_metadata_devices()
- self.tags['ceph.type'] = 'block'
- if self.block_lv is not None:
- self.block_lv.set_tags(self.tags)
-
- # 3/ encryption-only operations
- if self.encrypted:
- self.prepare_dmcrypt()
-
- # 4/ osd_prepare req
- self.prepare_osd_req()
-
- # 5/ bluestore mkfs
- # prepare the osd filesystem
- self.osd_mkfs()
-
- def prepare_dmcrypt(self) -> None:
- # If encrypted, there is no need to create the lockbox keyring file
- # because bluestore re-creates the files and does not have support
- # for other files like the custom lockbox one. This will need to be
- # done on activation. Format and open ('decrypt' devices) and
- # re-assign the device and journal variables so that the rest of the
- # process can use the mapper paths
-
- device_types = ('block', 'db', 'wal')
-
- for device_type in device_types:
- attr_name: str = f'{device_type}_device_path'
- path: str = self.__dict__[attr_name]
- if path:
- self.__dict__[attr_name] = self.luks_format_and_open(path,
- device_type,
- self.tags)
-
- def luks_format_and_open(self,
- device: str,
- device_type: str,
- tags: Dict[str, Any]) -> str:
- """
- Helper for devices that are encrypted. The operations needed for
- block, db, wal devices are all the same
- """
- if not device:
- return ''
- tag_name = 'ceph.%s_uuid' % device_type
- uuid = tags[tag_name]
- # format data device
- encryption_utils.luks_format(
- self.dmcrypt_key,
- device
- )
-
- if self.with_tpm:
- self.enroll_tpm2(device)
-
- encryption_utils.luks_open(
- self.dmcrypt_key,
- device,
- uuid,
- self.with_tpm)
-
- return '/dev/mapper/%s' % uuid
-
- def setup_metadata_devices(self) -> None:
- """
- Check if ``device`` is an lv, if so, set the tags, making sure to
- update the tags with the lv_uuid and lv_path which the incoming tags
- will not have.
-
- If the device is not a logical volume, then retrieve the partition UUID
- by querying ``blkid``
- """
- s: Dict[str, Any] = {
- 'db': {
- 'attr_map': 'db_device_path',
- 'device_name': self.args.block_db,
- 'device_size': self.args.block_db_size,
- 'device_slots': self.args.block_db_slots,
- },
- 'wal': {
- 'attr_map': 'wal_device_path',
- 'device_name': self.args.block_wal,
- 'device_size': self.args.block_wal_size,
- 'device_slots': self.args.block_wal_slots,
- }
- }
- for device_type, device_args in s.items():
- device_name: str = device_args.get('device_name', None)
- size: int = device_args.get('device_size')
- slots: int = device_args.get('device_slots')
- if device_name is None:
- continue
- _tags: Dict[str, Any] = self.tags.copy()
- _tags['ceph.type'] = device_type
- _tags['ceph.vdo'] = api.is_vdo(device_name)
-
- try:
- vg_name, lv_name = device_name.split('/')
- lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- lv = None
-
- if lv:
- _tags['ceph.%s_uuid' % device_type] = lv.lv_uuid
- _tags['ceph.%s_device' % device_type] = lv.lv_path
- lv.set_tags(_tags)
- elif disk.is_partition(device_name) or disk.is_device(device_name):
- # We got a disk or a partition, create an lv
- path = device_name
- lv_type = "osd-{}".format(device_type)
- name_uuid = system.generate_uuid()
- kwargs = {
- 'name_prefix': lv_type,
- 'uuid': name_uuid,
- 'vg': None,
- 'device': device_name,
- 'slots': slots,
- 'extents': None,
- 'size': None,
- 'tags': _tags,
- }
- # TODO use get_block_db_size and co here to get configured size in
- # conf file
- if size != 0:
- kwargs['size'] = size
- # We do not create LV if this is a partition
- if not disk.is_partition(device_name):
- lv = api.create_lv(**kwargs)
- if lv is not None:
- path, lv_uuid = lv.lv_path, lv.lv_uuid
- for key, value in {
- f"ceph.{device_type}_uuid": lv_uuid,
- f"ceph.{device_type}_device": path,
- }.items():
- _tags[key] = value
- self.tags[key] = value
- lv.set_tags(_tags)
- setattr(self, f'{device_type}_device_path', path)
-
- def get_osd_device_path(self,
- osd_lvs: List["Volume"],
- device_type: str,
- dmcrypt_secret: str = '') -> Optional[str]:
- """
- ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we
- can query LVs on system and fallback to querying the uuid if that is
- not present.
-
- Return a path if possible, failing to do that a ``None``, since some of
- these devices are optional.
- """
- # TODO(guits): this should be moved in a new function get_device_uuid_from_lv()
- osd_block_lv = None
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == 'block':
- osd_block_lv = lv
- break
- if osd_block_lv:
- is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
- logger.debug('Found block device (%s) with encryption: %s',
- osd_block_lv.name, is_encrypted)
- uuid_tag = 'ceph.%s_uuid' % device_type
- device_uuid = osd_block_lv.tags.get(uuid_tag, '')
- if not device_uuid:
- return None
-
- device_lv: Optional["Volume"] = None
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == device_type:
- device_lv = lv
- break
- if device_lv:
- if is_encrypted:
- encryption_utils.luks_open(dmcrypt_secret,
- device_lv.__dict__['lv_path'],
- device_uuid)
- return '/dev/mapper/%s' % device_uuid
- return device_lv.__dict__['lv_path']
-
- # this could be a regular device, so query it with blkid
- physical_device = disk.get_device_from_partuuid(device_uuid)
- if physical_device:
- if is_encrypted:
- encryption_utils.luks_open(dmcrypt_secret,
- physical_device,
- device_uuid)
- return '/dev/mapper/%s' % device_uuid
- return physical_device
-
- raise RuntimeError('could not find %s with uuid %s' % (device_type,
- device_uuid))
-
- def _activate(self,
- osd_lvs: List["Volume"],
- no_systemd: bool = False,
- no_tmpfs: bool = False) -> None:
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == 'block':
- osd_block_lv = lv
- break
- else:
- raise RuntimeError('could not find a bluestore OSD to activate')
-
- is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
- dmcrypt_secret = ''
- osd_id = osd_block_lv.tags['ceph.osd_id']
- conf.cluster = osd_block_lv.tags['ceph.cluster_name']
- osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
- configuration.load_ceph_conf_path(
- osd_block_lv.tags['ceph.cluster_name'])
- configuration.load()
-
- # mount on tmpfs the osd directory
- self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.path_is_mounted(self.osd_path):
- # mkdir -p and mount as tmpfs
- prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
-
- # XXX This needs to be removed once ceph-bluestore-tool can deal with
- # symlinks that exist in the osd dir
- self.unlink_bs_symlinks()
-
- # encryption is handled here, before priming the OSD dir
- if is_encrypted:
- osd_lv_path = '/dev/mapper/%s' % osd_block_lv.__dict__['lv_uuid']
- lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
- self.with_tpm = osd_block_lv.tags.get('ceph.with_tpm') == '1'
- if not self.with_tpm:
- encryption_utils.write_lockbox_keyring(osd_id,
- osd_fsid,
- lockbox_secret)
- dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
- lv_path: str = osd_block_lv.__dict__['lv_path']
- if disk.has_holders(lv_path):
- real_path_device = os.path.realpath(lv_path)
- holders = disk.get_block_device_holders()
-
- if real_path_device in holders.keys() and real_path_device in holders.values():
- osd_lv_path = disk.get_lvm_mapper_path_from_dm(next(k for k, v in holders.items() if v == real_path_device))
- else:
- encryption_utils.luks_open(dmcrypt_secret,
- osd_block_lv.__dict__['lv_path'],
- osd_block_lv.__dict__['lv_uuid'],
- with_tpm=self.with_tpm)
- else:
- osd_lv_path = osd_block_lv.__dict__['lv_path']
-
- db_device_path = \
- self.get_osd_device_path(osd_lvs, 'db',
- dmcrypt_secret=dmcrypt_secret)
- wal_device_path = \
- self.get_osd_device_path(osd_lvs,
- 'wal',
- dmcrypt_secret=dmcrypt_secret)
-
- # Once symlinks are removed, the osd dir can be 'primed again.
- # chown first, regardless of what currently exists so that
- # ``prime-osd-dir`` can succeed even if permissions are
- # somehow messed up.
- system.chown(self.osd_path)
- prime_command = [
- 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
- 'prime-osd-dir', '--dev', osd_lv_path,
- '--path', self.osd_path, '--no-mon-config']
-
- process.run(prime_command)
- # always re-do the symlink regardless if it exists, so that the block,
- # block.wal, and block.db devices that may have changed can be mapped
- # correctly every time
- process.run(['ln',
- '-snf',
- osd_lv_path,
- os.path.join(self.osd_path, 'block')])
- system.chown(os.path.join(self.osd_path, 'block'))
- system.chown(self.osd_path)
- if db_device_path:
- destination = os.path.join(self.osd_path, 'block.db')
- process.run(['ln', '-snf', db_device_path, destination])
- system.chown(db_device_path)
- system.chown(destination)
- if wal_device_path:
- destination = os.path.join(self.osd_path, 'block.wal')
- process.run(['ln', '-snf', wal_device_path, destination])
- system.chown(wal_device_path)
- system.chown(destination)
-
- if no_systemd is False:
- # enable the ceph-volume unit for this OSD
- systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
-
- # enable the OSD
- systemctl.enable_osd(osd_id)
-
- # start the OSD
- systemctl.start_osd(osd_id)
- terminal.success("ceph-volume lvm activate successful for osd ID: %s" %
- osd_id)
-
- @decorators.needs_root
- def activate_all(self) -> None:
- listed_osds = direct_report()
- osds = {}
- for osd_id, devices in listed_osds.items():
- # the metadata for all devices in each OSD will contain
- # the FSID which is required for activation
- for device in devices:
- fsid = device.get('tags', {}).get('ceph.osd_fsid')
- if fsid:
- osds[fsid] = osd_id
- break
- if not osds:
- terminal.warning('Was unable to find any OSDs to activate')
- terminal.warning('Verify OSDs are present with '
- '"ceph-volume lvm list"')
- return
- for osd_fsid, osd_id in osds.items():
- if not self.args.no_systemd and systemctl.osd_is_active(osd_id):
- terminal.warning(
- 'OSD ID %s FSID %s process is active. '
- 'Skipping activation' % (osd_id, osd_fsid)
- )
- else:
- terminal.info('Activating OSD ID %s FSID %s' % (osd_id,
- osd_fsid))
- self.activate(self.args, osd_id=osd_id, osd_fsid=osd_fsid)
-
- @decorators.needs_root
- def activate(self,
- args: Optional["argparse.Namespace"] = None,
- osd_id: Optional[str] = None,
- osd_fsid: Optional[str] = None) -> None:
- """
- :param args: The parsed arguments coming from the CLI
- :param osd_id: When activating all, this gets populated with an
- existing OSD ID
- :param osd_fsid: When activating all, this gets populated with an
- existing OSD FSID
- """
- osd_id = osd_id if osd_id else self.args.osd_id
- osd_fsid = osd_fsid if osd_fsid else self.args.osd_fsid
-
- if osd_id and osd_fsid:
- tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
- elif not osd_id and osd_fsid:
- tags = {'ceph.osd_fsid': osd_fsid}
- elif osd_id and not osd_fsid:
- raise RuntimeError('could not activate osd.{}, please provide the '
- 'osd_fsid too'.format(osd_id))
- else:
- raise RuntimeError('Please provide both osd_id and osd_fsid')
- lvs = api.get_lvs(tags=tags)
- if not lvs:
- raise RuntimeError('could not find osd.%s with osd_fsid %s' %
- (osd_id, osd_fsid))
-
- self._activate(lvs, self.args.no_systemd, getattr(self.args,
- 'no_tmpfs',
- False))
--- /dev/null
+import logging
+import json
+import os
+from .baseobjectstore import BaseObjectStore
+from ceph_volume import terminal, decorators, conf, process
+from ceph_volume.util import system, disk
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util.device import Device
+from ceph_volume.devices.lvm.common import rollback_osd
+from ceph_volume.devices.raw.list import direct_report
+from typing import Any, Dict, List, Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+
+logger = logging.getLogger(__name__)
+
+
+class Raw(BaseObjectStore):
+ def __init__(self, args: "argparse.Namespace") -> None:
+ super().__init__(args)
+ self.method = 'raw'
+ self.devices: List[str] = getattr(args, 'devices', [])
+ self.osd_id = getattr(self.args, 'osd_id', '')
+ self.osd_fsid = getattr(self.args, 'osd_fsid', '')
+ self.block_device_path = getattr(self.args, 'data', '')
+ self.db_device_path = getattr(self.args, 'block_db', '')
+ self.wal_device_path = getattr(self.args, 'block_wal', '')
+
+ def prepare_dmcrypt(self) -> None:
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal, devices are all the same
+ """
+
+ for device, device_type in [(self.block_device_path, 'block'),
+ (self.db_device_path, 'db'),
+ (self.wal_device_path, 'wal')]:
+
+ if device:
+ kname = disk.lsblk(device)['KNAME']
+ mapping = 'ceph-{}-{}-{}-dmcrypt'.format(self.osd_fsid,
+ kname,
+ device_type)
+ # format data device
+ encryption_utils.luks_format(
+ self.dmcrypt_key,
+ device
+ )
+ if self.with_tpm:
+ self.enroll_tpm2(device)
+ encryption_utils.luks_open(
+ self.dmcrypt_key,
+ device,
+ mapping,
+ self.with_tpm
+ )
+ self.__dict__[f'{device_type}_device_path'] = \
+ '/dev/mapper/{}'.format(mapping) # TODO(guits): need to preserve path or find a way to get the parent device from the mapper ?
+
+ def safe_prepare(self,
+ args: Optional["argparse.Namespace"] = None) -> None:
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `raw create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args # This should be moved (to __init__ ?)
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('raw prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.osd_id)
+ raise
+ dmcrypt_log = 'dmcrypt' if hasattr(args, 'dmcrypt') else 'clear'
+ terminal.success("ceph-volume raw {} prepare "
+ "successful for: {}".format(dmcrypt_log,
+ self.args.data))
+
+ @decorators.needs_root
+ def prepare(self) -> None:
+ self.osd_fsid = system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if self.encrypted and not self.with_tpm:
+ self.dmcrypt_key = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET', '')
+ self.secrets['dmcrypt_key'] = self.dmcrypt_key
+ if crush_device_class:
+ self.secrets['crush_device_class'] = crush_device_class
+
+ tmpfs = not self.args.no_tmpfs
+
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(
+ self.osd_fsid, json.dumps(self.secrets), self.osd_id)
+
+ if self.encrypted:
+ self.prepare_dmcrypt()
+
+ self.prepare_osd_req(tmpfs=tmpfs)
+
+ # prepare the osd filesystem
+ self.osd_mkfs()
+
+ def _activate(self, osd_id: str, osd_fsid: str) -> None:
+ # mount on tmpfs the osd directory
+ self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(self.osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=not self.args.no_tmpfs)
+
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+
+ self.unlink_bs_symlinks()
+
+ # Once symlinks are removed, the osd dir can be 'primed again. chown
+ # first, regardless of what currently exists so that ``prime-osd-dir``
+ # can succeed even if permissions are somehow messed up
+ system.chown(self.osd_path)
+ prime_command = [
+ 'ceph-bluestore-tool',
+ 'prime-osd-dir',
+ '--path', self.osd_path,
+ '--no-mon-config',
+ '--dev', self.block_device_path,
+ ]
+ process.run(prime_command)
+
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ prepare_utils.link_block(self.block_device_path, osd_id)
+
+ if self.db_device_path:
+ prepare_utils.link_db(self.db_device_path, osd_id, osd_fsid)
+
+ if self.wal_device_path:
+ prepare_utils.link_wal(self.wal_device_path, osd_id, osd_fsid)
+
+ system.chown(self.osd_path)
+ terminal.success("ceph-volume raw activate "
+ "successful for osd ID: %s" % osd_id)
+
+ @decorators.needs_root
+ def activate(self) -> None:
+ """Activate Ceph OSDs on the system.
+
+ This function activates Ceph Object Storage Daemons (OSDs) on the system.
+ It iterates over all block devices, checking if they have a LUKS2 signature and
+ are encrypted for Ceph. If a device's OSD fsid matches and it is enrolled with TPM2,
+ the function pre-activates it. After collecting the relevant devices, it attempts to
+ activate any OSDs found.
+
+ Raises:
+ RuntimeError: If no matching OSDs are found to activate.
+ """
+ assert self.devices or self.osd_id or self.osd_fsid
+
+ activated_any: bool = False
+
+ for d in disk.lsblk_all(abspath=True):
+ device: str = d.get('NAME', '')
+ luks2 = encryption_utils.CephLuks2(device)
+ if luks2.is_ceph_encrypted:
+ if luks2.is_tpm2_enrolled and self.osd_fsid == luks2.osd_fsid:
+ self.pre_activate_tpm2(device)
+ found = direct_report(self.devices)
+
+ holders = disk.get_block_device_holders()
+ for osd_uuid, meta in found.items():
+ realpath_device = os.path.realpath(meta['device'])
+ parent_device = holders.get(realpath_device)
+ if parent_device and any('ceph.cluster_fsid' in lv.lv_tags for lv in Device(parent_device).lvs):
+ continue
+ osd_id = meta['osd_id']
+ if self.osd_id is not None and str(osd_id) != str(self.osd_id):
+ continue
+ if self.osd_fsid is not None and osd_uuid != self.osd_fsid:
+ continue
+ self.block_device_path = meta.get('device')
+ self.db_device_path = meta.get('device_db', '')
+ self.wal_device_path = meta.get('device_wal', '')
+ logger.info(f'Activating osd.{osd_id} uuid {osd_uuid} cluster {meta["ceph_fsid"]}')
+ self._activate(osd_id, osd_uuid)
+ activated_any = True
+
+ if not activated_any:
+ raise RuntimeError('did not find any matching OSD to activate')
+
+ def pre_activate_tpm2(self, device: str) -> None:
+ """Pre-activate a TPM2-encrypted device for Ceph.
+
+ This function pre-activates a TPM2-encrypted device for Ceph by opening the
+ LUKS encryption, checking the BlueStore header, and renaming the device
+ mapper according to the BlueStore mapping type.
+
+ Args:
+ device (str): The path to the device to be pre-activated.
+
+ Raises:
+ RuntimeError: If the device does not have a BlueStore signature.
+ """
+ bs_mapping_type: Dict[str, str] = {'bluefs db': 'db',
+ 'bluefs wal': 'wal',
+ 'main': 'block'}
+ self.with_tpm = 1
+ self.temp_mapper: str = f'activating-{os.path.basename(device)}'
+ self.temp_mapper_path: str = f'/dev/mapper/{self.temp_mapper}'
+ if not disk.BlockSysFs(device).has_active_dmcrypt_mapper:
+ encryption_utils.luks_open(
+ '',
+ device,
+ self.temp_mapper,
+ self.with_tpm
+ )
+ bluestore_header: Dict[str, Any] = disk.get_bluestore_header(self.temp_mapper_path)
+ if not bluestore_header:
+ raise RuntimeError(f"{device} doesn't have BlueStore signature.")
+
+ kname: str = disk.get_parent_device_from_mapper(self.temp_mapper_path, abspath=False)
+ device_type = bs_mapping_type[bluestore_header[self.temp_mapper_path]['description']]
+ new_mapper: str = f'ceph-{self.osd_fsid}-{kname}-{device_type}-dmcrypt'
+ self.block_device_path = f'/dev/mapper/{new_mapper}'
+ self.devices.append(self.block_device_path)
+ # An option could be to simply rename the mapper but the uuid remains unchanged in sysfs
+ encryption_utils.luks_close(self.temp_mapper)
+ encryption_utils.luks_open('', device, new_mapper, self.with_tpm)
+++ /dev/null
-import logging
-import json
-import os
-from .bluestore import BlueStore
-from ceph_volume import terminal, decorators, conf, process
-from ceph_volume.util import system, disk
-from ceph_volume.util import prepare as prepare_utils
-from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util.device import Device
-from ceph_volume.devices.lvm.common import rollback_osd
-from ceph_volume.devices.raw.list import direct_report
-from typing import Any, Dict, List, Optional, TYPE_CHECKING
-
-if TYPE_CHECKING:
- import argparse
-
-logger = logging.getLogger(__name__)
-
-
-class RawBlueStore(BlueStore):
- def __init__(self, args: "argparse.Namespace") -> None:
- super().__init__(args)
- self.method = 'raw'
- self.devices: List[str] = getattr(args, 'devices', [])
- self.osd_id = getattr(self.args, 'osd_id', '')
- self.osd_fsid = getattr(self.args, 'osd_fsid', '')
- self.block_device_path = getattr(self.args, 'data', '')
- self.db_device_path = getattr(self.args, 'block_db', '')
- self.wal_device_path = getattr(self.args, 'block_wal', '')
-
- def prepare_dmcrypt(self) -> None:
- """
- Helper for devices that are encrypted. The operations needed for
- block, db, wal, devices are all the same
- """
-
- for device, device_type in [(self.block_device_path, 'block'),
- (self.db_device_path, 'db'),
- (self.wal_device_path, 'wal')]:
-
- if device:
- kname = disk.lsblk(device)['KNAME']
- mapping = 'ceph-{}-{}-{}-dmcrypt'.format(self.osd_fsid,
- kname,
- device_type)
- # format data device
- encryption_utils.luks_format(
- self.dmcrypt_key,
- device
- )
- if self.with_tpm:
- self.enroll_tpm2(device)
- encryption_utils.luks_open(
- self.dmcrypt_key,
- device,
- mapping,
- self.with_tpm
- )
- self.__dict__[f'{device_type}_device_path'] = \
- '/dev/mapper/{}'.format(mapping) # TODO(guits): need to preserve path or find a way to get the parent device from the mapper ?
-
- def safe_prepare(self,
- args: Optional["argparse.Namespace"] = None) -> None:
- """
- An intermediate step between `main()` and `prepare()` so that we can
- capture the `self.osd_id` in case we need to rollback
-
- :param args: Injected args, usually from `raw create` which compounds
- both `prepare` and `create`
- """
- if args is not None:
- self.args = args # This should be moved (to __init__ ?)
- try:
- self.prepare()
- except Exception:
- logger.exception('raw prepare was unable to complete')
- logger.info('will rollback OSD ID creation')
- rollback_osd(self.osd_id)
- raise
- dmcrypt_log = 'dmcrypt' if hasattr(args, 'dmcrypt') else 'clear'
- terminal.success("ceph-volume raw {} prepare "
- "successful for: {}".format(dmcrypt_log,
- self.args.data))
-
- @decorators.needs_root
- def prepare(self) -> None:
- self.osd_fsid = system.generate_uuid()
- crush_device_class = self.args.crush_device_class
- if self.encrypted and not self.with_tpm:
- self.dmcrypt_key = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET', '')
- self.secrets['dmcrypt_key'] = self.dmcrypt_key
- if crush_device_class:
- self.secrets['crush_device_class'] = crush_device_class
-
- tmpfs = not self.args.no_tmpfs
-
- # reuse a given ID if it exists, otherwise create a new ID
- self.osd_id = prepare_utils.create_id(
- self.osd_fsid, json.dumps(self.secrets), self.osd_id)
-
- if self.encrypted:
- self.prepare_dmcrypt()
-
- self.prepare_osd_req(tmpfs=tmpfs)
-
- # prepare the osd filesystem
- self.osd_mkfs()
-
- def _activate(self, osd_id: str, osd_fsid: str) -> None:
- # mount on tmpfs the osd directory
- self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.path_is_mounted(self.osd_path):
- # mkdir -p and mount as tmpfs
- prepare_utils.create_osd_path(osd_id, tmpfs=not self.args.no_tmpfs)
-
- # XXX This needs to be removed once ceph-bluestore-tool can deal with
- # symlinks that exist in the osd dir
-
- self.unlink_bs_symlinks()
-
- # Once symlinks are removed, the osd dir can be 'primed again. chown
- # first, regardless of what currently exists so that ``prime-osd-dir``
- # can succeed even if permissions are somehow messed up
- system.chown(self.osd_path)
- prime_command = [
- 'ceph-bluestore-tool',
- 'prime-osd-dir',
- '--path', self.osd_path,
- '--no-mon-config',
- '--dev', self.block_device_path,
- ]
- process.run(prime_command)
-
- # always re-do the symlink regardless if it exists, so that the block,
- # block.wal, and block.db devices that may have changed can be mapped
- # correctly every time
- prepare_utils.link_block(self.block_device_path, osd_id)
-
- if self.db_device_path:
- prepare_utils.link_db(self.db_device_path, osd_id, osd_fsid)
-
- if self.wal_device_path:
- prepare_utils.link_wal(self.wal_device_path, osd_id, osd_fsid)
-
- system.chown(self.osd_path)
- terminal.success("ceph-volume raw activate "
- "successful for osd ID: %s" % osd_id)
-
- @decorators.needs_root
- def activate(self) -> None:
- """Activate Ceph OSDs on the system.
-
- This function activates Ceph Object Storage Daemons (OSDs) on the system.
- It iterates over all block devices, checking if they have a LUKS2 signature and
- are encrypted for Ceph. If a device's OSD fsid matches and it is enrolled with TPM2,
- the function pre-activates it. After collecting the relevant devices, it attempts to
- activate any OSDs found.
-
- Raises:
- RuntimeError: If no matching OSDs are found to activate.
- """
- assert self.devices or self.osd_id or self.osd_fsid
-
- activated_any: bool = False
-
- for d in disk.lsblk_all(abspath=True):
- device: str = d.get('NAME', '')
- luks2 = encryption_utils.CephLuks2(device)
- if luks2.is_ceph_encrypted:
- if luks2.is_tpm2_enrolled and self.osd_fsid == luks2.osd_fsid:
- self.pre_activate_tpm2(device)
- found = direct_report(self.devices)
-
- holders = disk.get_block_device_holders()
- for osd_uuid, meta in found.items():
- realpath_device = os.path.realpath(meta['device'])
- parent_device = holders.get(realpath_device)
- if parent_device and any('ceph.cluster_fsid' in lv.lv_tags for lv in Device(parent_device).lvs):
- continue
- osd_id = meta['osd_id']
- if self.osd_id is not None and str(osd_id) != str(self.osd_id):
- continue
- if self.osd_fsid is not None and osd_uuid != self.osd_fsid:
- continue
- self.block_device_path = meta.get('device')
- self.db_device_path = meta.get('device_db', '')
- self.wal_device_path = meta.get('device_wal', '')
- logger.info(f'Activating osd.{osd_id} uuid {osd_uuid} cluster {meta["ceph_fsid"]}')
- self._activate(osd_id, osd_uuid)
- activated_any = True
-
- if not activated_any:
- raise RuntimeError('did not find any matching OSD to activate')
-
- def pre_activate_tpm2(self, device: str) -> None:
- """Pre-activate a TPM2-encrypted device for Ceph.
-
- This function pre-activates a TPM2-encrypted device for Ceph by opening the
- LUKS encryption, checking the BlueStore header, and renaming the device
- mapper according to the BlueStore mapping type.
-
- Args:
- device (str): The path to the device to be pre-activated.
-
- Raises:
- RuntimeError: If the device does not have a BlueStore signature.
- """
- bs_mapping_type: Dict[str, str] = {'bluefs db': 'db',
- 'bluefs wal': 'wal',
- 'main': 'block'}
- self.with_tpm = 1
- self.temp_mapper: str = f'activating-{os.path.basename(device)}'
- self.temp_mapper_path: str = f'/dev/mapper/{self.temp_mapper}'
- if not disk.BlockSysFs(device).has_active_dmcrypt_mapper:
- encryption_utils.luks_open(
- '',
- device,
- self.temp_mapper,
- self.with_tpm
- )
- bluestore_header: Dict[str, Any] = disk.get_bluestore_header(self.temp_mapper_path)
- if not bluestore_header:
- raise RuntimeError(f"{device} doesn't have BlueStore signature.")
-
- kname: str = disk.get_parent_device_from_mapper(self.temp_mapper_path, abspath=False)
- device_type = bs_mapping_type[bluestore_header[self.temp_mapper_path]['description']]
- new_mapper: str = f'ceph-{self.osd_fsid}-{kname}-{device_type}-dmcrypt'
- self.block_device_path = f'/dev/mapper/{new_mapper}'
- self.devices.append(self.block_device_path)
- # An option could be to simply rename the mapper but the uuid remains unchanged in sysfs
- encryption_utils.luks_close(self.temp_mapper)
- encryption_utils.luks_open('', device, new_mapper, self.with_tpm)
--- /dev/null
+import os
+import pytest
+from unittest.mock import patch
+from ceph_volume import process, exceptions
+from ceph_volume.api import lvm as api
+
+
+class TestParseTags(object):
+
+ def test_no_tags_means_empty_dict(self):
+ result = api.parse_tags('')
+ assert result == {}
+
+ def test_single_tag_gets_parsed(self):
+ result = api.parse_tags('ceph.osd_something=1')
+ assert result == {'ceph.osd_something': '1'}
+
+ def test_non_ceph_tags_are_skipped(self):
+ result = api.parse_tags('foo')
+ assert result == {}
+
+ def test_mixed_non_ceph_tags(self):
+ result = api.parse_tags('foo,ceph.bar=1')
+ assert result == {'ceph.bar': '1'}
+
+ def test_multiple_csv_expands_in_dict(self):
+ result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000')
+ # assert them piecemeal to avoid the un-ordered dict nature
+ assert result['ceph.osd_something'] == '1'
+ assert result['ceph.foo'] == '2'
+ assert result['ceph.fsid'] == '0000'
+
+
+class TestVolume:
+ def test_is_ceph_device(self):
+ lv_tags = "ceph.type=data,ceph.osd_id=0"
+ osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags)
+ assert api.is_ceph_device(osd)
+
+ @pytest.mark.parametrize('dev',
+ [api.VolumeGroup(vg_name='foo'),
+ api.Volume(lv_name='vg/no_osd',
+ lv_tags='',
+ lv_path='lv/path'),
+ api.Volume(lv_name='vg/no_osd',
+ lv_tags='ceph.osd_id=null',
+ lv_path='lv/path')])
+ def test_is_not_ceph_device(self, dev):
+ assert not api.is_ceph_device(dev)
+
+ def test_no_empty_lv_name(self):
+ with pytest.raises(ValueError):
+ api.Volume(lv_name='', lv_tags='')
+
+
+class TestVolumeGroup(object):
+
+ def test_volume_group_no_empty_name(self):
+ with pytest.raises(ValueError):
+ api.VolumeGroup(vg_name='')
+
+
+class TestVolumeGroupFree(object):
+
+ def test_integer_gets_produced(self):
+ vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304)
+ assert vg.free == 100 * 4194304
+
+
+class TestCreateLVs(object):
+
+ def setup_method(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=999)
+
+ def test_creates_correct_lv_number_from_parts(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert len(lvs) == 4
+
+ def test_suffixes_the_size_arg(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert lvs[0][1]['extents'] == 249
+
+ def test_only_uses_free_size(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=1000)
+ lvs = api.create_lvs(vg, parts=4)
+ assert lvs[0][1]['extents'] == 250
+
+ def test_null_tags_are_set_by_default(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ kwargs = api.create_lvs(self.vg, parts=4)[0][1]
+ assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null']
+
+ def test_fallback_to_one_part(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg)
+ assert len(lvs) == 1
+
+
+class TestVolumeGroupSizing(object):
+
+ def setup_method(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_free_count=1024)
+
+ def test_parts_and_size_errors(self):
+ with pytest.raises(ValueError) as error:
+ self.vg.sizing(parts=4, size=10)
+ assert "Cannot process sizing" in str(error.value)
+
+ def test_zero_parts_produces_100_percent(self):
+ result = self.vg.sizing(parts=0)
+ assert result['percentages'] == 100
+
+ def test_two_parts_produces_50_percent(self):
+ result = self.vg.sizing(parts=2)
+ assert result['percentages'] == 50
+
+ def test_two_parts_produces_half_size(self):
+ result = self.vg.sizing(parts=2)
+ assert result['sizes'] == 512
+
+ def test_half_size_produces_round_sizes(self):
+ result = self.vg.sizing(size=512)
+ assert result['sizes'] == 512
+ assert result['percentages'] == 50
+ assert result['parts'] == 2
+
+ def test_bit_more_than_half_size_allocates_full_size(self):
+ # 513 can't allocate more than 1, so it just fallsback to using the
+ # whole device
+ result = self.vg.sizing(size=513)
+ assert result['sizes'] == 1024
+ assert result['percentages'] == 100
+ assert result['parts'] == 1
+
+ def test_extents_are_halfed_rounded_down(self):
+ result = self.vg.sizing(size=512)
+ assert result['extents'] == 512
+
+ def test_bit_less_size_rounds_down(self):
+ result = self.vg.sizing(size=129)
+ assert result['sizes'] == 146
+ assert result['percentages'] == 14
+ assert result['parts'] == 7
+
+ def test_unable_to_allocate_past_free_size(self):
+ with pytest.raises(exceptions.SizeAllocationError):
+ self.vg.sizing(size=2048)
+
+
+class TestRemoveLV(object):
+
+ def test_removes_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 0)
+ monkeypatch.setattr(process, 'call', mock_call)
+ assert api.remove_lv("vg/lv")
+
+ def test_removes_lv_object(self, fake_call):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ api.remove_lv(foo_volume)
+ # last argument from the list passed to process.call
+ assert fake_call.calls[0]['args'][0][-1] == '/path'
+
+ def test_fails_to_remove_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ with pytest.raises(RuntimeError):
+ api.remove_lv("vg/lv")
+
+
+class TestCreateLV(object):
+
+ def setup_method(self):
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ self.foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="100",
+ vg_free_count="100")
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'})
+ expected = (['lvcreate', '--yes', '-l', '100', '-n', 'foo-1234-abcd', 'foo_group'])
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size_adjust_if_1percent_over(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="1000",
+ vg_free_count="1000")
+ m_get_single_lv.return_value = foo_volume
+ # 423624704 should be just under 1% off of the available size 419430400
+ api.create_lv('foo', '1234-abcd', vg=foo_group, size=4232052736, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-1234-abcd', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_size_too_large(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ with pytest.raises(RuntimeError):
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, extents='50', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-1234-abcd', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @pytest.mark.parametrize("test_input,expected",
+ [(2, 50),
+ (3, 33),])
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_slots(self, m_get_single_lv, m_call, m_run, monkeypatch, test_input, expected):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-1234-abcd', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_uses_all(self, m_get_single_lv, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-1234-abcd', 'foo_group']
+ m_run.assert_called_with(expected, run_on_host=True)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_calls_to_set_tags_default(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group)
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_calls_to_set_tags_arg(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ api.create_lv('foo', '1234-abcd', vg=self.foo_group, tags={'ceph.type': 'data'})
+ tags = {
+ "ceph.type": "data",
+ "ceph.data_device": "/path"
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_device_vgs')
+ @patch('ceph_volume.api.lvm.create_vg')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_create_vg(self, m_get_single_lv, m_create_vg, m_get_device_vgs, m_call,
+ m_run, monkeypatch):
+ m_get_single_lv.return_value = self.foo_volume
+ m_get_device_vgs.return_value = []
+ api.create_lv('foo', '1234-abcd', device='dev/foo', size='5G', tags={'ceph.type': 'data'})
+ m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
+
+
+class TestTags(object):
+
+ def setup_method(self):
+ self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean',
+ vg_name='foo_group',
+ lv_tags='')
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path',
+ vg_name='foo_group',
+ lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2')
+
+ def test_set_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean']
+ assert capture.calls[0]['args'][0] == expected
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+
+ def test_set_clear_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+ self.foo_volume_clean.clear_tag('foo')
+ expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean']
+ assert self.foo_volume_clean.tags == {}
+ assert capture.calls[1]['args'][0] == expected
+
+ def test_set_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+ assert self.foo_volume.tags == tags
+
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'}
+ self.foo_volume.set_tags(tags)
+ assert self.foo_volume.tags == tags
+
+ self.foo_volume.set_tag('ceph.foo1', 'other1')
+ tags['ceph.foo1'] = 'other1'
+ assert self.foo_volume.tags == tags
+
+ expected = [
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']),
+ sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+ def test_clear_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+
+ self.foo_volume_clean.set_tags(tags)
+ assert self.foo_volume_clean.tags == tags
+ self.foo_volume_clean.clear_tags()
+ assert self.foo_volume_clean.tags == {}
+
+ expected = [
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+
+class TestExtendVG(object):
+
+ def setup_method(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestReduceVG(object):
+
+ def setup_method(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestCreateVG(object):
+
+ def setup_method(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_no_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('ceph-')
+
+ def test_devices_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph')
+ result = fake_run.calls[0]['args'][0]
+ expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb']
+ assert result == expected
+
+ def test_name_prefix(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name_prefix='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('master-')
+
+ def test_specific_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2] == 'master'
+
+#
+# The following tests are pretty gnarly. VDO detection is very convoluted and
+# involves correlating information from device mappers, realpaths, slaves of
+# those mappers, and parents or related mappers. This makes it very hard to
+# patch nicely or keep tests short and readable. These tests are trying to
+# ensure correctness, the better approach will be to do some functional testing
+# with VDO.
+#
+
+
+@pytest.fixture
+def disable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: False)
+
+
+@pytest.fixture
+def enable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: True)
+
+
+# Stub for os.listdir
+
+
+class ListDir(object):
+
+ def __init__(self, paths):
+ self.paths = paths
+ self._normalize_paths()
+ self.listdir = os.listdir
+
+ def _normalize_paths(self):
+ for k, v in self.paths.items():
+ self.paths[k.rstrip('/')] = v.rstrip('/')
+
+ def add(self, original, fake):
+ self.paths[original.rstrip('/')] = fake.rstrip('/')
+
+ def __call__(self, path):
+ return self.listdir(self.paths[path.rstrip('/')])
+
+
+@pytest.fixture(scope='function')
+def listdir(monkeypatch):
+ def apply(paths=None, stub=None):
+ if not stub:
+ stub = ListDir(paths)
+ if paths:
+ for original, fake in paths.items():
+ stub.add(original, fake)
+
+ monkeypatch.setattr('os.listdir', stub)
+ return apply
+
+
+@pytest.fixture(scope='function')
+def makedirs(tmpdir):
+ def create(directory):
+ path = os.path.join(str(tmpdir), directory)
+ os.makedirs(path)
+ return path
+ create.base = str(tmpdir)
+ return create
+
+
+class TestIsVdo(object):
+
+ def test_no_vdo_dir(self, disable_kvdo_path):
+ assert api._is_vdo('/path') is False
+
+ def test_exceptions_return_false(self, monkeypatch):
+ def throw():
+ raise Exception()
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw)
+ assert api.is_vdo('/path') == '0'
+
+ def test_is_vdo_returns_a_string(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True)
+ assert api.is_vdo('/path') == '1'
+
+ def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/mapper/vdo0') is False
+
+ def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3'])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/dm-3') is True
+
+ def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4'])
+ assert api._is_vdo('/dev/dm-4') is True
+
+
+class TestVdoSlaves(object):
+
+ def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/mapper/vdo0' in result
+ assert 'vdo0' in result
+
+ def test_slaves_are_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ makedirs('sys/block/vdo0/slaves/dm-4')
+ makedirs('dev/mapper/vdo0')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+
+class TestVDOParents(object):
+
+ def test_parents_are_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-3')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+ def test_parents_are_not_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-5')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert result == []
+
+
+class TestSplitNameParser(object):
+
+ def test_keys_are_parsed_without_prefix(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert result['VG_NAME'] == 'vg'
+ assert result['LV_NAME'] == 'lv'
+ assert result['LV_LAYER'] == ''
+
+ def test_vg_name_sans_mapper(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert '/dev/mapper' not in result['VG_NAME']
+
+
+class TestGetDeviceVgs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'vg_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_vgs('/dev/foo')
+ assert vgs == []
+
+class TestGetDeviceLvs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'lv_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_lvs('/dev/foo')
+ assert vgs == []
+
+
+# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get
+# tested automatically while testing api.make_filters_lvmcmd_ready()
+class TestMakeFiltersLVMCMDReady(object):
+
+ def test_with_no_filters_and_no_tags(self):
+ retval = api.make_filters_lvmcmd_ready(None, None)
+
+ assert isinstance(retval, str)
+ assert retval == ''
+
+ def test_with_filters_and_no_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, None)
+
+ assert isinstance(retval, str)
+ for k, v in filters.items():
+ assert k in retval
+ assert v in retval
+
+ def test_with_no_filters_and_with_tags(self):
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(None, tags)
+
+ assert isinstance(retval, str)
+ assert 'tags' in retval
+ for k, v in tags.items():
+ assert k in retval
+ assert v in retval
+ assert retval.find('tags') < retval.find(k) < retval.find(v)
+
+ def test_with_filters_and_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, tags)
+
+ assert isinstance(retval, str)
+ for f, t in zip(filters.items(), tags.items()):
+ assert f[0] in retval
+ assert f[1] in retval
+ assert t[0] in retval
+ assert t[1] in retval
+ assert retval.find(f[0]) < retval.find(f[1]) < \
+ retval.find('tags') < retval.find(t[0]) < retval.find(t[1])
+
+
+class TestGetPVs(object):
+
+ def test_get_pvs(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
+ vg_name='vg2')
+ pvs = [pv1, pv2]
+ stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
+ '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == len(pvs)
+ for pv, pv_ in zip(pvs, pvs_):
+ assert pv_.pv_name == pv.pv_name
+
+ def test_get_pvs_single_pv(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pvs = [pv1]
+ stdout = ['{};;;;;;'.format(pv1.pv_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == 1
+ assert pvs_[0].pv_name == pvs[0].pv_name
+
+ def test_get_pvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_pvs() == []
+
+
+class TestGetVGs(object):
+
+ def test_get_vgs(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg1')
+ vg2 = api.VolumeGroup(vg_name='vg2')
+ vgs = [vg1, vg2]
+ stdout = ['{};;;;;;'.format(vg1.vg_name),
+ '{};;;;;;'.format(vg2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == len(vgs)
+ for vg, vg_ in zip(vgs, vgs_):
+ assert vg_.vg_name == vg.vg_name
+
+ def test_get_vgs_single_vg(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1]
+ stdout = ['{};;;;;;'.format(vg1.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == 1
+ assert vgs_[0].vg_name == vgs[0].vg_name
+
+ def test_get_vgs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_vgs() == []
+
+
+class TestGetLVs(object):
+
+ def test_get_lvs(self, monkeypatch):
+ lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
+ lv_name='lv1', vg_name='vg1')
+ lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
+ lv_name='lv2', vg_name='vg2')
+ lvs = [lv1, lv2]
+ stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
+ lv1.vg_name),
+ '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
+ lv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ for lv, lv_ in zip(lvs, lvs_):
+ assert lv.__dict__ == lv_.__dict__
+
+ def test_get_lvs_single_lv(self, monkeypatch):
+ stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+ lvs = []
+ lvs.append((api.Volume(lv_tags='ceph.type=data',
+ lv_path='/dev/vg/lv',
+ lv_name='lv', vg_name='vg')))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ assert lvs[0].__dict__ == lvs_[0].__dict__
+
+ def test_get_lvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_lvs() == []
+
+
+class TestGetSinglePV(object):
+
+ @patch('ceph_volume.api.lvm.get_pvs')
+ def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs):
+ fake_pvs = []
+ fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
+ fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={}))
+
+ m_get_pvs.return_value = fake_pvs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_pv()
+ assert "matched more than 1 PV present on this host." in str(e.value)
+
+ @patch('ceph_volume.api.lvm.get_pvs')
+ def test_get_single_pv_no_match_returns_none(self, m_get_pvs):
+ m_get_pvs.return_value = []
+
+ pv = api.get_single_pv()
+ assert pv == None
+
+ @patch('ceph_volume.api.lvm.get_pvs')
+ def test_get_single_pv_one_match(self, m_get_pvs):
+ fake_pvs = []
+ fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
+ m_get_pvs.return_value = fake_pvs
+
+ pv = api.get_single_pv()
+
+ assert isinstance(pv, api.PVolume)
+ assert pv.name == '/dev/sda'
+
+
+class TestGetSingleVG(object):
+
+ @patch('ceph_volume.api.lvm.get_vgs')
+ def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs):
+ fake_vgs = []
+ fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
+ fake_vgs.append(api.VolumeGroup(vg_name='vg2'))
+
+ m_get_vgs.return_value = fake_vgs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_vg()
+ assert "matched more than 1 VG present on this host." in str(e.value)
+
+ @patch('ceph_volume.api.lvm.get_vgs')
+ def test_get_single_vg_no_match_returns_none(self, m_get_vgs):
+ m_get_vgs.return_value = []
+
+ vg = api.get_single_vg()
+ assert vg == None
+
+ @patch('ceph_volume.api.lvm.get_vgs')
+ def test_get_single_vg_one_match(self, m_get_vgs):
+ fake_vgs = []
+ fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
+ m_get_vgs.return_value = fake_vgs
+
+ vg = api.get_single_vg()
+
+ assert isinstance(vg, api.VolumeGroup)
+ assert vg.name == 'vg1'
+
+class TestGetSingleLV(object):
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs):
+ fake_lvs = []
+ fake_lvs.append(api.Volume(lv_name='lv1',
+ lv_path='/dev/vg1/lv1',
+ vg_name='vg1',
+ lv_tags='',
+ lv_uuid='fake-uuid'))
+ fake_lvs.append(api.Volume(lv_name='lv1',
+ lv_path='/dev/vg2/lv1',
+ vg_name='vg2',
+ lv_tags='',
+ lv_uuid='fake-uuid'))
+ m_get_lvs.return_value = fake_lvs
+
+ with pytest.raises(RuntimeError) as e:
+ api.get_single_lv()
+ assert "matched more than 1 LV present on this host" in str(e.value)
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_get_single_lv_no_match_returns_none(self, m_get_lvs):
+ m_get_lvs.return_value = []
+
+ lv = api.get_single_lv()
+ assert lv == None
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_get_single_lv_one_match(self, m_get_lvs):
+ fake_lvs = []
+ fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid'))
+ m_get_lvs.return_value = fake_lvs
+
+ lv_ = api.get_single_lv()
+
+ assert isinstance(lv_, api.Volume)
+ assert lv_.name == 'lv1'
+++ /dev/null
-import os
-import pytest
-from unittest.mock import patch
-from ceph_volume import process, exceptions
-from ceph_volume.api import lvm as api
-
-
-class TestParseTags(object):
-
- def test_no_tags_means_empty_dict(self):
- result = api.parse_tags('')
- assert result == {}
-
- def test_single_tag_gets_parsed(self):
- result = api.parse_tags('ceph.osd_something=1')
- assert result == {'ceph.osd_something': '1'}
-
- def test_non_ceph_tags_are_skipped(self):
- result = api.parse_tags('foo')
- assert result == {}
-
- def test_mixed_non_ceph_tags(self):
- result = api.parse_tags('foo,ceph.bar=1')
- assert result == {'ceph.bar': '1'}
-
- def test_multiple_csv_expands_in_dict(self):
- result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000')
- # assert them piecemeal to avoid the un-ordered dict nature
- assert result['ceph.osd_something'] == '1'
- assert result['ceph.foo'] == '2'
- assert result['ceph.fsid'] == '0000'
-
-
-class TestVolume:
- def test_is_ceph_device(self):
- lv_tags = "ceph.type=data,ceph.osd_id=0"
- osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags)
- assert api.is_ceph_device(osd)
-
- @pytest.mark.parametrize('dev',
- [api.VolumeGroup(vg_name='foo'),
- api.Volume(lv_name='vg/no_osd',
- lv_tags='',
- lv_path='lv/path'),
- api.Volume(lv_name='vg/no_osd',
- lv_tags='ceph.osd_id=null',
- lv_path='lv/path')])
- def test_is_not_ceph_device(self, dev):
- assert not api.is_ceph_device(dev)
-
- def test_no_empty_lv_name(self):
- with pytest.raises(ValueError):
- api.Volume(lv_name='', lv_tags='')
-
-
-class TestVolumeGroup(object):
-
- def test_volume_group_no_empty_name(self):
- with pytest.raises(ValueError):
- api.VolumeGroup(vg_name='')
-
-
-class TestVolumeGroupFree(object):
-
- def test_integer_gets_produced(self):
- vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304)
- assert vg.free == 100 * 4194304
-
-
-class TestCreateLVs(object):
-
- def setup_method(self):
- self.vg = api.VolumeGroup(vg_name='ceph',
- vg_extent_size=1073741824,
- vg_extent_count=99999999,
- vg_free_count=999)
-
- def test_creates_correct_lv_number_from_parts(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
- lvs = api.create_lvs(self.vg, parts=4)
- assert len(lvs) == 4
-
- def test_suffixes_the_size_arg(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
- lvs = api.create_lvs(self.vg, parts=4)
- assert lvs[0][1]['extents'] == 249
-
- def test_only_uses_free_size(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
- vg = api.VolumeGroup(vg_name='ceph',
- vg_extent_size=1073741824,
- vg_extent_count=99999999,
- vg_free_count=1000)
- lvs = api.create_lvs(vg, parts=4)
- assert lvs[0][1]['extents'] == 250
-
- def test_null_tags_are_set_by_default(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
- kwargs = api.create_lvs(self.vg, parts=4)[0][1]
- assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null']
-
- def test_fallback_to_one_part(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
- lvs = api.create_lvs(self.vg)
- assert len(lvs) == 1
-
-
-class TestVolumeGroupSizing(object):
-
- def setup_method(self):
- self.vg = api.VolumeGroup(vg_name='ceph',
- vg_extent_size=1073741824,
- vg_free_count=1024)
-
- def test_parts_and_size_errors(self):
- with pytest.raises(ValueError) as error:
- self.vg.sizing(parts=4, size=10)
- assert "Cannot process sizing" in str(error.value)
-
- def test_zero_parts_produces_100_percent(self):
- result = self.vg.sizing(parts=0)
- assert result['percentages'] == 100
-
- def test_two_parts_produces_50_percent(self):
- result = self.vg.sizing(parts=2)
- assert result['percentages'] == 50
-
- def test_two_parts_produces_half_size(self):
- result = self.vg.sizing(parts=2)
- assert result['sizes'] == 512
-
- def test_half_size_produces_round_sizes(self):
- result = self.vg.sizing(size=512)
- assert result['sizes'] == 512
- assert result['percentages'] == 50
- assert result['parts'] == 2
-
- def test_bit_more_than_half_size_allocates_full_size(self):
- # 513 can't allocate more than 1, so it just fallsback to using the
- # whole device
- result = self.vg.sizing(size=513)
- assert result['sizes'] == 1024
- assert result['percentages'] == 100
- assert result['parts'] == 1
-
- def test_extents_are_halfed_rounded_down(self):
- result = self.vg.sizing(size=512)
- assert result['extents'] == 512
-
- def test_bit_less_size_rounds_down(self):
- result = self.vg.sizing(size=129)
- assert result['sizes'] == 146
- assert result['percentages'] == 14
- assert result['parts'] == 7
-
- def test_unable_to_allocate_past_free_size(self):
- with pytest.raises(exceptions.SizeAllocationError):
- self.vg.sizing(size=2048)
-
-
-class TestRemoveLV(object):
-
- def test_removes_lv(self, monkeypatch):
- def mock_call(cmd, **kw):
- return ('', '', 0)
- monkeypatch.setattr(process, 'call', mock_call)
- assert api.remove_lv("vg/lv")
-
- def test_removes_lv_object(self, fake_call):
- foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
- api.remove_lv(foo_volume)
- # last argument from the list passed to process.call
- assert fake_call.calls[0]['args'][0][-1] == '/path'
-
- def test_fails_to_remove_lv(self, monkeypatch):
- def mock_call(cmd, **kw):
- return ('', '', 1)
- monkeypatch.setattr(process, 'call', mock_call)
- with pytest.raises(RuntimeError):
- api.remove_lv("vg/lv")
-
-
-class TestCreateLV(object):
-
- def setup_method(self):
- self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
- self.foo_group = api.VolumeGroup(vg_name='foo_group',
- vg_extent_size="4194304",
- vg_extent_count="100",
- vg_free_count="100")
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_size(self, m_get_single_lv, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'})
- expected = (['lvcreate', '--yes', '-l', '100', '-n', 'foo-1234-abcd', 'foo_group'])
- m_run.assert_called_with(expected, run_on_host=True)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_size_adjust_if_1percent_over(self, m_get_single_lv, m_call, m_run, monkeypatch):
- foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
- foo_group = api.VolumeGroup(vg_name='foo_group',
- vg_extent_size="4194304",
- vg_extent_count="1000",
- vg_free_count="1000")
- m_get_single_lv.return_value = foo_volume
- # 423624704 should be just under 1% off of the available size 419430400
- api.create_lv('foo', '1234-abcd', vg=foo_group, size=4232052736, tags={'ceph.type': 'data'})
- expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-1234-abcd', 'foo_group']
- m_run.assert_called_with(expected, run_on_host=True)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_size_too_large(self, m_get_single_lv, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- with pytest.raises(RuntimeError):
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_extents(self, m_get_single_lv, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, extents='50', tags={'ceph.type': 'data'})
- expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-1234-abcd', 'foo_group']
- m_run.assert_called_with(expected, run_on_host=True)
-
- @pytest.mark.parametrize("test_input,expected",
- [(2, 50),
- (3, 33),])
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_slots(self, m_get_single_lv, m_call, m_run, monkeypatch, test_input, expected):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'})
- expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-1234-abcd', 'foo_group']
- m_run.assert_called_with(expected, run_on_host=True)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_uses_all(self, m_get_single_lv, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, tags={'ceph.type': 'data'})
- expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-1234-abcd', 'foo_group']
- m_run.assert_called_with(expected, run_on_host=True)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.Volume.set_tags')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_calls_to_set_tags_default(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group)
- tags = {
- "ceph.osd_id": "null",
- "ceph.type": "null",
- "ceph.cluster_fsid": "null",
- "ceph.osd_fsid": "null",
- }
- m_set_tags.assert_called_with(tags)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.Volume.set_tags')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_calls_to_set_tags_arg(self, m_get_single_lv, m_set_tags, m_call, m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- api.create_lv('foo', '1234-abcd', vg=self.foo_group, tags={'ceph.type': 'data'})
- tags = {
- "ceph.type": "data",
- "ceph.data_device": "/path"
- }
- m_set_tags.assert_called_with(tags)
-
- @patch('ceph_volume.api.lvm.process.run')
- @patch('ceph_volume.api.lvm.process.call')
- @patch('ceph_volume.api.lvm.get_device_vgs')
- @patch('ceph_volume.api.lvm.create_vg')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_create_vg(self, m_get_single_lv, m_create_vg, m_get_device_vgs, m_call,
- m_run, monkeypatch):
- m_get_single_lv.return_value = self.foo_volume
- m_get_device_vgs.return_value = []
- api.create_lv('foo', '1234-abcd', device='dev/foo', size='5G', tags={'ceph.type': 'data'})
- m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
-
-
-class TestTags(object):
-
- def setup_method(self):
- self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean',
- vg_name='foo_group',
- lv_tags='')
- self.foo_volume = api.Volume(lv_name='foo', lv_path='/path',
- vg_name='foo_group',
- lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2')
-
- def test_set_tag(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- self.foo_volume_clean.set_tag('foo', 'bar')
- expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean']
- assert capture.calls[0]['args'][0] == expected
- assert self.foo_volume_clean.tags == {'foo': 'bar'}
-
- def test_set_clear_tag(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- self.foo_volume_clean.set_tag('foo', 'bar')
- assert self.foo_volume_clean.tags == {'foo': 'bar'}
- self.foo_volume_clean.clear_tag('foo')
- expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean']
- assert self.foo_volume_clean.tags == {}
- assert capture.calls[1]['args'][0] == expected
-
- def test_set_tags(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
- assert self.foo_volume.tags == tags
-
- tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'}
- self.foo_volume.set_tags(tags)
- assert self.foo_volume.tags == tags
-
- self.foo_volume.set_tag('ceph.foo1', 'other1')
- tags['ceph.foo1'] = 'other1'
- assert self.foo_volume.tags == tags
-
- expected = [
- sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
- 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']),
- sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']),
- sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
- 'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']),
- sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']),
- ]
- # The order isn't guaranted
- for call in capture.calls:
- assert sorted(call['args'][0]) in expected
- assert len(capture.calls) == len(expected)
-
- def test_clear_tags(self, monkeypatch, capture):
- monkeypatch.setattr(process, 'run', capture)
- monkeypatch.setattr(process, 'call', capture)
- tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
-
- self.foo_volume_clean.set_tags(tags)
- assert self.foo_volume_clean.tags == tags
- self.foo_volume_clean.clear_tags()
- assert self.foo_volume_clean.tags == {}
-
- expected = [
- sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
- 'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2',
- '/pathclean']),
- sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
- 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2',
- '/pathclean']),
- ]
- # The order isn't guaranted
- for call in capture.calls:
- assert sorted(call['args'][0]) in expected
- assert len(capture.calls) == len(expected)
-
-
-class TestExtendVG(object):
-
- def setup_method(self):
- self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
-
- def test_uses_single_device_in_list(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.extend_vg(self.foo_volume, ['/dev/sda'])
- expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
- assert fake_run.calls[0]['args'][0] == expected
-
- def test_uses_single_device(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.extend_vg(self.foo_volume, '/dev/sda')
- expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
- assert fake_run.calls[0]['args'][0] == expected
-
- def test_uses_multiple_devices(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
- expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
- assert fake_run.calls[0]['args'][0] == expected
-
-
-class TestReduceVG(object):
-
- def setup_method(self):
- self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
-
- def test_uses_single_device_in_list(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.reduce_vg(self.foo_volume, ['/dev/sda'])
- expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
- assert fake_run.calls[0]['args'][0] == expected
-
- def test_uses_single_device(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.reduce_vg(self.foo_volume, '/dev/sda')
- expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
- assert fake_run.calls[0]['args'][0] == expected
-
- def test_uses_multiple_devices(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
- expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
- assert fake_run.calls[0]['args'][0] == expected
-
-
-class TestCreateVG(object):
-
- def setup_method(self):
- self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
-
- def test_no_name(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.create_vg('/dev/sda')
- result = fake_run.calls[0]['args'][0]
- assert '/dev/sda' in result
- assert result[-2].startswith('ceph-')
-
- def test_devices_list(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph')
- result = fake_run.calls[0]['args'][0]
- expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb']
- assert result == expected
-
- def test_name_prefix(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.create_vg('/dev/sda', name_prefix='master')
- result = fake_run.calls[0]['args'][0]
- assert '/dev/sda' in result
- assert result[-2].startswith('master-')
-
- def test_specific_name(self, monkeypatch, fake_run):
- monkeypatch.setattr(api, 'get_single_vg', lambda **kw: True)
- api.create_vg('/dev/sda', name='master')
- result = fake_run.calls[0]['args'][0]
- assert '/dev/sda' in result
- assert result[-2] == 'master'
-
-#
-# The following tests are pretty gnarly. VDO detection is very convoluted and
-# involves correlating information from device mappers, realpaths, slaves of
-# those mappers, and parents or related mappers. This makes it very hard to
-# patch nicely or keep tests short and readable. These tests are trying to
-# ensure correctness, the better approach will be to do some functional testing
-# with VDO.
-#
-
-
-@pytest.fixture
-def disable_kvdo_path(monkeypatch):
- monkeypatch.setattr('os.path.isdir', lambda x, **kw: False)
-
-
-@pytest.fixture
-def enable_kvdo_path(monkeypatch):
- monkeypatch.setattr('os.path.isdir', lambda x, **kw: True)
-
-
-# Stub for os.listdir
-
-
-class ListDir(object):
-
- def __init__(self, paths):
- self.paths = paths
- self._normalize_paths()
- self.listdir = os.listdir
-
- def _normalize_paths(self):
- for k, v in self.paths.items():
- self.paths[k.rstrip('/')] = v.rstrip('/')
-
- def add(self, original, fake):
- self.paths[original.rstrip('/')] = fake.rstrip('/')
-
- def __call__(self, path):
- return self.listdir(self.paths[path.rstrip('/')])
-
-
-@pytest.fixture(scope='function')
-def listdir(monkeypatch):
- def apply(paths=None, stub=None):
- if not stub:
- stub = ListDir(paths)
- if paths:
- for original, fake in paths.items():
- stub.add(original, fake)
-
- monkeypatch.setattr('os.listdir', stub)
- return apply
-
-
-@pytest.fixture(scope='function')
-def makedirs(tmpdir):
- def create(directory):
- path = os.path.join(str(tmpdir), directory)
- os.makedirs(path)
- return path
- create.base = str(tmpdir)
- return create
-
-
-class TestIsVdo(object):
-
- def test_no_vdo_dir(self, disable_kvdo_path):
- assert api._is_vdo('/path') is False
-
- def test_exceptions_return_false(self, monkeypatch):
- def throw():
- raise Exception()
- monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw)
- assert api.is_vdo('/path') == '0'
-
- def test_is_vdo_returns_a_string(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True)
- assert api.is_vdo('/path') == '1'
-
- def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
- kvdo_path = makedirs('sys/kvdo')
- listdir(paths={'/sys/kvdo': kvdo_path})
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
- assert api._is_vdo('/dev/mapper/vdo0') is False
-
- def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
- kvdo_path = makedirs('sys/kvdo')
- listdir(paths={'/sys/kvdo': kvdo_path})
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3'])
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
- assert api._is_vdo('/dev/dm-3') is True
-
- def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
- kvdo_path = makedirs('sys/kvdo')
- listdir(paths={'/sys/kvdo': kvdo_path})
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
- monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4'])
- assert api._is_vdo('/dev/dm-4') is True
-
-
-class TestVdoSlaves(object):
-
- def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch):
- slaves_path = makedirs('sys/block/vdo0/slaves')
- listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
- monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
- result = sorted(api._vdo_slaves(['vdo0']))
- assert '/dev/mapper/vdo0' in result
- assert 'vdo0' in result
-
- def test_slaves_are_found(self, makedirs, listdir, monkeypatch):
- slaves_path = makedirs('sys/block/vdo0/slaves')
- makedirs('sys/block/vdo0/slaves/dm-4')
- makedirs('dev/mapper/vdo0')
- listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
- monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
- result = sorted(api._vdo_slaves(['vdo0']))
- assert '/dev/dm-4' in result
- assert 'dm-4' in result
-
-
-class TestVDOParents(object):
-
- def test_parents_are_found(self, makedirs, listdir):
- block_path = makedirs('sys/block')
- slaves_path = makedirs('sys/block/dm-4/slaves')
- makedirs('sys/block/dm-4/slaves/dm-3')
- listdir(paths={
- '/sys/block/dm-4/slaves': slaves_path,
- '/sys/block': block_path})
- result = api._vdo_parents(['dm-3'])
- assert '/dev/dm-4' in result
- assert 'dm-4' in result
-
- def test_parents_are_not_found(self, makedirs, listdir):
- block_path = makedirs('sys/block')
- slaves_path = makedirs('sys/block/dm-4/slaves')
- makedirs('sys/block/dm-4/slaves/dm-5')
- listdir(paths={
- '/sys/block/dm-4/slaves': slaves_path,
- '/sys/block': block_path})
- result = api._vdo_parents(['dm-3'])
- assert result == []
-
-
-class TestSplitNameParser(object):
-
- def test_keys_are_parsed_without_prefix(self):
- line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
- result = api._splitname_parser(line)
- assert result['VG_NAME'] == 'vg'
- assert result['LV_NAME'] == 'lv'
- assert result['LV_LAYER'] == ''
-
- def test_vg_name_sans_mapper(self):
- line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
- result = api._splitname_parser(line)
- assert '/dev/mapper' not in result['VG_NAME']
-
-
-class TestGetDeviceVgs(object):
-
- @patch('ceph_volume.process.call')
- @patch('ceph_volume.api.lvm._output_parser')
- def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall):
- patched_output_parser.return_value = [{'vg_name': ''}]
- pcall.return_value = ('', '', '')
- vgs = api.get_device_vgs('/dev/foo')
- assert vgs == []
-
-class TestGetDeviceLvs(object):
-
- @patch('ceph_volume.process.call')
- @patch('ceph_volume.api.lvm._output_parser')
- def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall):
- patched_output_parser.return_value = [{'lv_name': ''}]
- pcall.return_value = ('', '', '')
- vgs = api.get_device_lvs('/dev/foo')
- assert vgs == []
-
-
-# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get
-# tested automatically while testing api.make_filters_lvmcmd_ready()
-class TestMakeFiltersLVMCMDReady(object):
-
- def test_with_no_filters_and_no_tags(self):
- retval = api.make_filters_lvmcmd_ready(None, None)
-
- assert isinstance(retval, str)
- assert retval == ''
-
- def test_with_filters_and_no_tags(self):
- filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
-
- retval = api.make_filters_lvmcmd_ready(filters, None)
-
- assert isinstance(retval, str)
- for k, v in filters.items():
- assert k in retval
- assert v in retval
-
- def test_with_no_filters_and_with_tags(self):
- tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
-
- retval = api.make_filters_lvmcmd_ready(None, tags)
-
- assert isinstance(retval, str)
- assert 'tags' in retval
- for k, v in tags.items():
- assert k in retval
- assert v in retval
- assert retval.find('tags') < retval.find(k) < retval.find(v)
-
- def test_with_filters_and_tags(self):
- filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
- tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
-
- retval = api.make_filters_lvmcmd_ready(filters, tags)
-
- assert isinstance(retval, str)
- for f, t in zip(filters.items(), tags.items()):
- assert f[0] in retval
- assert f[1] in retval
- assert t[0] in retval
- assert t[1] in retval
- assert retval.find(f[0]) < retval.find(f[1]) < \
- retval.find('tags') < retval.find(t[0]) < retval.find(t[1])
-
-
-class TestGetPVs(object):
-
- def test_get_pvs(self, monkeypatch):
- pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
- vg_name='vg1')
- pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
- vg_name='vg2')
- pvs = [pv1, pv2]
- stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
- '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
-
- pvs_ = api.get_pvs()
- assert len(pvs_) == len(pvs)
- for pv, pv_ in zip(pvs, pvs_):
- assert pv_.pv_name == pv.pv_name
-
- def test_get_pvs_single_pv(self, monkeypatch):
- pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
- vg_name='vg1')
- pvs = [pv1]
- stdout = ['{};;;;;;'.format(pv1.pv_name)]
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
-
- pvs_ = api.get_pvs()
- assert len(pvs_) == 1
- assert pvs_[0].pv_name == pvs[0].pv_name
-
- def test_get_pvs_empty(self, monkeypatch):
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
- assert api.get_pvs() == []
-
-
-class TestGetVGs(object):
-
- def test_get_vgs(self, monkeypatch):
- vg1 = api.VolumeGroup(vg_name='vg1')
- vg2 = api.VolumeGroup(vg_name='vg2')
- vgs = [vg1, vg2]
- stdout = ['{};;;;;;'.format(vg1.vg_name),
- '{};;;;;;'.format(vg2.vg_name)]
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
-
- vgs_ = api.get_vgs()
- assert len(vgs_) == len(vgs)
- for vg, vg_ in zip(vgs, vgs_):
- assert vg_.vg_name == vg.vg_name
-
- def test_get_vgs_single_vg(self, monkeypatch):
- vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1]
- stdout = ['{};;;;;;'.format(vg1.vg_name)]
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
-
- vgs_ = api.get_vgs()
- assert len(vgs_) == 1
- assert vgs_[0].vg_name == vgs[0].vg_name
-
- def test_get_vgs_empty(self, monkeypatch):
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
- assert api.get_vgs() == []
-
-
-class TestGetLVs(object):
-
- def test_get_lvs(self, monkeypatch):
- lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
- lv_name='lv1', vg_name='vg1')
- lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
- lv_name='lv2', vg_name='vg2')
- lvs = [lv1, lv2]
- stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
- lv1.vg_name),
- '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
- lv2.vg_name)]
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
-
- lvs_ = api.get_lvs()
- assert len(lvs_) == len(lvs)
- for lv, lv_ in zip(lvs, lvs_):
- assert lv.__dict__ == lv_.__dict__
-
- def test_get_lvs_single_lv(self, monkeypatch):
- stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
- lvs = []
- lvs.append((api.Volume(lv_tags='ceph.type=data',
- lv_path='/dev/vg/lv',
- lv_name='lv', vg_name='vg')))
-
- lvs_ = api.get_lvs()
- assert len(lvs_) == len(lvs)
- assert lvs[0].__dict__ == lvs_[0].__dict__
-
- def test_get_lvs_empty(self, monkeypatch):
- monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
- assert api.get_lvs() == []
-
-
-class TestGetSinglePV(object):
-
- @patch('ceph_volume.api.lvm.get_pvs')
- def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs):
- fake_pvs = []
- fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
- fake_pvs.append(api.PVolume(pv_name='/dev/sdb', pv_tags={}))
-
- m_get_pvs.return_value = fake_pvs
-
- with pytest.raises(RuntimeError) as e:
- api.get_single_pv()
- assert "matched more than 1 PV present on this host." in str(e.value)
-
- @patch('ceph_volume.api.lvm.get_pvs')
- def test_get_single_pv_no_match_returns_none(self, m_get_pvs):
- m_get_pvs.return_value = []
-
- pv = api.get_single_pv()
- assert pv == None
-
- @patch('ceph_volume.api.lvm.get_pvs')
- def test_get_single_pv_one_match(self, m_get_pvs):
- fake_pvs = []
- fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
- m_get_pvs.return_value = fake_pvs
-
- pv = api.get_single_pv()
-
- assert isinstance(pv, api.PVolume)
- assert pv.name == '/dev/sda'
-
-
-class TestGetSingleVG(object):
-
- @patch('ceph_volume.api.lvm.get_vgs')
- def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs):
- fake_vgs = []
- fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
- fake_vgs.append(api.VolumeGroup(vg_name='vg2'))
-
- m_get_vgs.return_value = fake_vgs
-
- with pytest.raises(RuntimeError) as e:
- api.get_single_vg()
- assert "matched more than 1 VG present on this host." in str(e.value)
-
- @patch('ceph_volume.api.lvm.get_vgs')
- def test_get_single_vg_no_match_returns_none(self, m_get_vgs):
- m_get_vgs.return_value = []
-
- vg = api.get_single_vg()
- assert vg == None
-
- @patch('ceph_volume.api.lvm.get_vgs')
- def test_get_single_vg_one_match(self, m_get_vgs):
- fake_vgs = []
- fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
- m_get_vgs.return_value = fake_vgs
-
- vg = api.get_single_vg()
-
- assert isinstance(vg, api.VolumeGroup)
- assert vg.name == 'vg1'
-
-class TestGetSingleLV(object):
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs):
- fake_lvs = []
- fake_lvs.append(api.Volume(lv_name='lv1',
- lv_path='/dev/vg1/lv1',
- vg_name='vg1',
- lv_tags='',
- lv_uuid='fake-uuid'))
- fake_lvs.append(api.Volume(lv_name='lv1',
- lv_path='/dev/vg2/lv1',
- vg_name='vg2',
- lv_tags='',
- lv_uuid='fake-uuid'))
- m_get_lvs.return_value = fake_lvs
-
- with pytest.raises(RuntimeError) as e:
- api.get_single_lv()
- assert "matched more than 1 LV present on this host" in str(e.value)
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_get_single_lv_no_match_returns_none(self, m_get_lvs):
- m_get_lvs.return_value = []
-
- lv = api.get_single_lv()
- assert lv == None
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_get_single_lv_one_match(self, m_get_lvs):
- fake_lvs = []
- fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid'))
- m_get_lvs.return_value = fake_lvs
-
- lv_ = api.get_single_lv()
-
- assert isinstance(lv_, api.Volume)
- assert lv_.name == 'lv1'
from ceph_volume.util import disk
from ceph_volume.util import device
from ceph_volume.util.constants import ceph_disk_guids
-from ceph_volume import conf, configuration, objectstore
-from ceph_volume.objectstore.rawbluestore import RawBlueStore
+from ceph_volume import conf, configuration
+from ceph_volume.objectstore.baseobjectstore import BaseObjectStore
+from ceph_volume.objectstore.raw import Raw
from typing import Any, Dict, List, Optional, Callable
def factory() -> Callable[..., argparse.Namespace]:
return argparse.Namespace
-def objectstore_bluestore_factory(**kw):
- o = objectstore.bluestore.BlueStore([])
+def objectstore_factory(**kw):
+ o = BaseObjectStore([])
for k, v in kw.items():
setattr(o, k, v)
return o
@pytest.fixture
-def objectstore_bluestore():
- return objectstore_bluestore_factory
+def objectstore():
+ return objectstore_factory
@pytest.fixture
@pytest.fixture
def mock_lvm_direct_report(monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: lvm_direct_report_data)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: lvm_direct_report_data)
@pytest.fixture
def mock_raw_direct_report(monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.rawbluestore.direct_report', lambda x: raw_direct_report_data)
+ monkeypatch.setattr('ceph_volume.objectstore.raw.direct_report', lambda x: raw_direct_report_data)
@pytest.fixture
def fake_lsblk_all(monkeypatch: Any) -> Callable:
return apply
@pytest.fixture
-def rawbluestore(factory: type[Factory]) -> RawBlueStore:
+def rawbluestore(factory: type[Factory]) -> Raw:
args = factory(devices=['/dev/foo'])
- with patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ=='])):
- r = RawBlueStore(args) # type: ignore
+ with patch('ceph_volume.objectstore.raw.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ=='])):
+ r = Raw(args) # type: ignore
return r
from ceph_volume.devices.lvm import activate
from ceph_volume.api import lvm as api
from ceph_volume.tests.conftest import Capture
-from ceph_volume import objectstore
+from ceph_volume.objectstore import lvm
#from ceph_volume.util.prepare import create_key
from unittest.mock import patch, call
from argparse import Namespace
volumes = []
volumes.append(FooVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
- monkeypatch.setattr(objectstore.lvmbluestore.LvmBlueStore,
+ monkeypatch.setattr(lvm.Lvm,
'_activate',
capture)
args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args=args)
a.objectstore.activate()
assert capture.calls[0]['args'][0] == [FooVolume]
def test_osd_id_no_osd_fsid(self, m_create_key, is_root):
args = Args(osd_id=42, osd_fsid=None)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args=args)
with pytest.raises(RuntimeError) as result:
a.objectstore.activate()
assert result.value.args[0] == 'could not activate osd.42, please provide the osd_fsid too'
def test_no_osd_id_no_osd_fsid(self, m_create_key, is_root):
args = Args(osd_id=None, osd_fsid=None)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args=args)
with pytest.raises(RuntimeError) as result:
a.objectstore.activate()
assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(lvm.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(lvm.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args=args)
a.objectstore.activate()
assert fake_enable.calls == []
assert fake_start_osd.calls == []
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(lvm.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(lvm.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args)
a.objectstore.activate()
assert fake_enable.calls != []
assert fake_start_osd.calls != []
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(lvm.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(lvm.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
bluestore=True, auto_detect_objectstore=True)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args)
a.objectstore.activate()
assert fake_enable.calls == []
assert fake_start_osd.calls == []
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(lvm.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(lvm.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True, auto_detect_objectstore=False)
a = activate.Activate([])
- a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore = lvm.Lvm(args=args)
a.objectstore.activate()
assert fake_enable.calls != []
assert fake_start_osd.calls != []
@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
-@patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate_all')
-@patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
+@patch('ceph_volume.objectstore.lvm.Lvm.activate_all')
+@patch('ceph_volume.objectstore.lvm.Lvm.activate')
class TestActivateFlags(object):
def test_default_objectstore(self, m_activate, m_activate_all, m_create_key, capture):
class TestActivateAll(object):
def test_does_not_detect_osds(self, m_create_key, capsys, is_root, monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: {})
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: {})
args = ['--all']
activation = activate.Activate(args)
activation.main()
assert 'Verify OSDs are present with ' in err
def test_detects_running_osds(self, m_create_key, capsys, is_root, capture, monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.systemctl.osd_is_active', lambda x: True)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.systemctl.osd_is_active', lambda x: True)
args = ['--all']
activation = activate.Activate(args)
activation.main()
assert 'a8789a96ce8b process is active. Skipping activation' in err
assert 'b8218eaa1634 process is active. Skipping activation' in err
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
+ @patch('ceph_volume.objectstore.lvm.Lvm.activate')
def test_detects_osds_to_activate_systemd(self, m_activate, m_create_key, is_root, monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.systemctl.osd_is_active', lambda x: False)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.systemctl.osd_is_active', lambda x: False)
args = ['--all', '--bluestore']
a = activate.Activate(args)
a.main()
]
m_activate.assert_has_calls(calls)
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
+ @patch('ceph_volume.objectstore.lvm.Lvm.activate')
def test_detects_osds_to_activate_no_systemd(self, m_activate, m_create_key, is_root, monkeypatch):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: direct_report)
args = ['--all', '--no-systemd', '--bluestore']
a = activate.Activate(args)
a.main()
import pytest
from ceph_volume.devices import raw
from unittest.mock import patch, MagicMock
-from ceph_volume import objectstore
+from ceph_volume.objectstore.raw import Raw
class TestRaw(object):
def _setup(self, **kw):
args = kw.get('args', [])
self.p = raw.prepare.Prepare([])
- self.p.objectstore = objectstore.rawbluestore.RawBlueStore(args=args)
+ self.p.objectstore = Raw(args=args)
for k, v in kw.items():
setattr(self.p.objectstore, k, v)
m_luks_format.assert_called_with(self.p.objectstore.dmcrypt_key, '/dev/wal-foo')
assert self.p.objectstore.__dict__['wal_device_path'] == '/dev/mapper/ceph-789-foo-wal-dmcrypt'
- @patch('ceph_volume.objectstore.rawbluestore.rollback_osd')
- @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare')
+ @patch('ceph_volume.objectstore.raw.rollback_osd')
+ @patch('ceph_volume.objectstore.raw.Raw.prepare')
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd, m_create_key):
m_valid_device.return_value = '/dev/foo'
with pytest.raises(NotImplementedError):
BaseObjectStore([]).safe_prepare(args=None)
- def test_add_objectstore_opts(self):
- with pytest.raises(NotImplementedError):
- BaseObjectStore([]).add_objectstore_opts()
-
@patch('ceph_volume.util.prepare.create_osd_path')
@patch('ceph_volume.util.prepare.link_block')
@patch('ceph_volume.util.prepare.get_monmap')
'--setuser', 'ceph',
'--setgroup', 'ceph']
- def test_osd_mkfs_ok(self, monkeypatch, fake_call):
- bo = BaseObjectStore([])
+ def test_osd_mkfs_ok(self, monkeypatch, fake_call, objectstore):
+ args = objectstore(dmcrypt=False)
+ bo = BaseObjectStore(args)
bo.get_osd_path = lambda: '/var/lib/ceph/osd/ceph-123/'
bo.build_osd_mkfs_cmd = lambda: ['ceph-osd', '--mkfs', 'some', 'fake', 'args']
monkeypatch.setattr(system, 'chown', lambda path: 0)
@patch('time.sleep', Mock())
@patch('ceph_volume.process.call', return_value=([], [], 11))
- def test_osd_mkfs_fails_EWOULDBLOCK(self, m_call, monkeypatch):
- bo = BaseObjectStore([])
+ def test_osd_mkfs_fails_EWOULDBLOCK(self, m_call, monkeypatch, objectstore):
+ args = objectstore(dmcrypt=False)
+ bo = BaseObjectStore(args)
bo.get_osd_path = lambda: '/var/lib/ceph/osd/ceph-123/'
bo.build_osd_mkfs_cmd = lambda: ['ceph-osd', '--mkfs', 'some', 'fake', 'args']
monkeypatch.setattr(system, 'chown', lambda path: 0)
def test_activate(self):
with pytest.raises(NotImplementedError):
BaseObjectStore([]).activate()
+
+ @patch('ceph_volume.objectstore.baseobjectstore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.b = BaseObjectStore([])
+ self.b.osd_mkfs_cmd = ['binary', 'arg1']
+
+ def test_add_objectstore_opts_wal_device_path(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.wal_device_path = '/dev/nvme0n1'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-wal-path', '/dev/nvme0n1']
+
+ def test_add_objectstore_opts_db_device_path(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.db_device_path = '/dev/ssd1'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-db-path', '/dev/ssd1']
+
+ def test_add_objectstore_opts_osdspec_affinity(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.get_osdspec_affinity = lambda: 'foo'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--osdspec-affinity', 'foo']
\ No newline at end of file
+++ /dev/null
-from unittest.mock import patch, Mock
-from ceph_volume.objectstore.bluestore import BlueStore
-
-
-class TestBlueStore:
- @patch('ceph_volume.objectstore.baseobjectstore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
- def setup_method(self, m_create_key):
- self.b = BlueStore([])
- self.b.osd_mkfs_cmd = ['binary', 'arg1']
-
- def test_add_objectstore_opts_wal_device_path(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
- self.b.wal_device_path = '/dev/nvme0n1'
- self.b.add_objectstore_opts()
- assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-wal-path', '/dev/nvme0n1']
-
- def test_add_objectstore_opts_db_device_path(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
- self.b.db_device_path = '/dev/ssd1'
- self.b.add_objectstore_opts()
- assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-db-path', '/dev/ssd1']
-
- def test_add_objectstore_opts_osdspec_affinity(self, monkeypatch):
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
- self.b.get_osdspec_affinity = lambda: 'foo'
- self.b.add_objectstore_opts()
- assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--osdspec-affinity', 'foo']
\ No newline at end of file
--- /dev/null
+import pytest
+from argparse import Namespace
+from unittest.mock import patch, Mock, MagicMock, call
+from ceph_volume.objectstore.lvm import Lvm
+from ceph_volume.api.lvm import Volume
+from ceph_volume.util import system, disk
+from typing import Callable
+
+
+class TestLvm:
+ @patch('ceph_volume.objectstore.lvm.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.lvm = Lvm([])
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ @patch('ceph_volume.objectstore.lvm.prepare_utils.create_id', Mock(return_value='111'))
+ def test_pre_prepare_lv(self, m_get_single_lv, factory):
+ args = factory(objectstore='seastore',
+ cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ data='vg_foo/lv_foo')
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm.encrypted = True
+ self.lvm.dmcrypt_key = 'fake-dmcrypt-key'
+ self.lvm.args = args
+ self.lvm.objectstore = 'seastore'
+ self.lvm.pre_prepare()
+ assert self.lvm.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
+ assert self.lvm.secrets['crush_device_class'] == 'ssd'
+ assert self.lvm.osd_id == '111'
+ assert self.lvm.block_device_path == '/fake-path'
+ assert self.lvm.tags == {'ceph.osd_fsid': 'abc123',
+ 'ceph.osd_id': '111',
+ 'ceph.cluster_fsid': 'abcd',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': 'ssd',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.objectstore': 'seastore',
+ 'ceph.encrypted': True,
+ 'ceph.vdo': '0',
+ 'ceph.with_tpm': 0}
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ @patch('ceph_volume.objectstore.lvm.prepare_utils.create_id', Mock(return_value='111'))
+ def test_pre_prepare_lv_with_dmcrypt_and_tpm(self, m_get_single_lv, factory):
+ args = factory(objectstore='seastore',
+ cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ data='vg_foo/lv_foo',
+ dmcrypt=True,
+ with_tpm=True)
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm.encrypted = True
+ self.lvm.with_tpm = True
+ self.lvm.dmcrypt_key = 'fake-dmcrypt-key-tpm2'
+ self.lvm.args = args
+ self.lvm.objectstore = 'seastore'
+ self.lvm.pre_prepare()
+ assert 'dmcrypt_key' not in self.lvm.secrets.keys()
+ assert self.lvm.secrets['crush_device_class'] == 'ssd'
+ assert self.lvm.osd_id == '111'
+ assert self.lvm.block_device_path == '/fake-path'
+ assert self.lvm.tags == {'ceph.osd_fsid': 'abc123',
+ 'ceph.osd_id': '111',
+ 'ceph.cluster_fsid': 'abcd',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': 'ssd',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.encrypted': True,
+ 'ceph.objectstore': 'seastore',
+ 'ceph.vdo': '0',
+ 'ceph.with_tpm': 1}
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.objectstore.lvm.prepare_utils.create_id', Mock(return_value='111'))
+ def test_pre_prepare_no_lv(self, factory):
+ args = factory(cluster_fsid='abcd',
+ objectstore='seastore',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ data='/dev/foo',
+ dmcrypt_key='fake-dmcrypt-key')
+ self.lvm.prepare_data_device = lambda x, y: Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm.encrypted = True
+ self.lvm.dmcrypt_key = 'fake-dmcrypt-key'
+ self.lvm.args = args
+ self.lvm.objectstore = 'seastore'
+ self.lvm.pre_prepare()
+ assert self.lvm.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
+ assert self.lvm.secrets['crush_device_class'] == 'ssd'
+ assert self.lvm.osd_id == '111'
+ assert self.lvm.block_device_path == '/fake-path'
+ assert self.lvm.tags == {'ceph.osd_fsid': 'abc123',
+ 'ceph.osd_id': '111',
+ 'ceph.cluster_fsid': 'abcd',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': 'ssd',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.encrypted': True,
+ 'ceph.vdo': '0',
+ 'ceph.with_tpm': 0,
+ 'ceph.objectstore': 'seastore'}
+
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
+ @patch('ceph_volume.api.lvm.create_lv')
+ def test_prepare_data_device(self,
+ m_create_lv: MagicMock,
+ factory: Callable[..., Namespace]) -> None:
+ args = factory(data='/dev/foo1',
+ data_slots=1,
+ data_size=102400)
+ self.lvm.args = args
+ m_create_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='abcd')
+ assert self.lvm.prepare_data_device('block', 'abcd') == m_create_lv.return_value
+ assert self.lvm.args.data_size == 102400
+
+ @patch('ceph_volume.util.disk.is_device', Mock(return_value=False))
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=False))
+ def test_prepare_data_device_fails(self, factory):
+ args = factory(data='/dev/foo')
+ self.lvm.args = args
+ with pytest.raises(RuntimeError) as error:
+ self.lvm.prepare_data_device('block', 'abcd')
+ assert ('Cannot use device (/dev/foo). '
+ 'A vg/lv path or an existing device is needed') == str(error.value)
+
+ @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_safe_prepare(self, m_get_single_lv, factory):
+ args = factory(data='vg_foo/lv_foo')
+ self.lvm.args = args
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm.prepare = MagicMock()
+ self.lvm.safe_prepare()
+ assert self.lvm.prepare.called
+
+ @patch('ceph_volume.objectstore.lvm.Lvm.prepare', Mock(side_effect=Exception))
+ @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
+ @patch('ceph_volume.objectstore.lvm.rollback_osd')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_safe_prepare_raises_exception(self, m_get_single_lv, m_rollback_osd, factory):
+ args = factory(data='/dev/foo')
+ self.lvm.args = args
+ self.lvm.osd_id = '111'
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ m_rollback_osd.return_value = MagicMock()
+ with pytest.raises(Exception):
+ self.lvm.safe_prepare()
+ assert m_rollback_osd.mock_calls == [call('111')]
+
+ @patch('ceph_volume.objectstore.lvm.Lvm.pre_prepare', Mock(return_value=None))
+ @patch('ceph_volume.objectstore.lvm.Lvm.prepare_dmcrypt', MagicMock())
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.prepare_osd_req', MagicMock())
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.osd_mkfs', MagicMock())
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid', Mock(return_value='c6798f59-01'))
+ @patch('ceph_volume.api.lvm.Volume.set_tags', MagicMock())
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_prepare(self,
+ m_get_single_lv: MagicMock,
+ is_root: Callable[..., None],
+ factory: Callable[..., Namespace]) -> None:
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ args = factory(data='vg_foo/lv_foo',
+ block_wal='/dev/foo1',
+ block_db='/dev/foo2',
+ block_wal_size=123,
+ block_db_size=123,
+ block_wal_slots=1,
+ block_db_slots=1,
+ with_tpm=False
+ )
+ self.lvm.args = args
+ self.lvm.block_lv = MagicMock()
+ self.lvm.secrets['dmcrypt_key'] = 'fake-secret'
+ self.lvm.prepare()
+ assert self.lvm.wal_device_path == '/dev/foo1'
+ assert self.lvm.db_device_path == '/dev/foo2'
+ assert self.lvm.block_lv.set_tags.mock_calls == [call({
+ 'ceph.type': 'block',
+ })]
+ assert not self.lvm.prepare_dmcrypt.called
+ assert self.lvm.osd_mkfs.called
+ assert self.lvm.prepare_osd_req.called
+
+ def test_prepare_dmcrypt(self):
+ self.lvm.secrets = {'dmcrypt_key': 'fake-secret'}
+ self.lvm.tags = {'ceph.block_uuid': 'block-uuid1',
+ 'ceph.db_uuid': 'db-uuid2',
+ 'ceph.wal_uuid': 'wal-uuid3',
+ 'ceph.with_tpm': 0}
+ self.lvm.block_device_path = '/dev/sdb'
+ self.lvm.db_device_path = '/dev/sdc'
+ self.lvm.wal_device_path = '/dev/sdb'
+ self.lvm.luks_format_and_open = lambda *a: f'/dev/mapper/{a[2]["ceph."+a[1]+"_uuid"]}'
+ self.lvm.prepare_dmcrypt()
+ assert self.lvm.block_device_path == '/dev/mapper/block-uuid1'
+ assert self.lvm.db_device_path == '/dev/mapper/db-uuid2'
+ assert self.lvm.wal_device_path == '/dev/mapper/wal-uuid3'
+
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_open')
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_format')
+ def test_luks_format_and_open(self, m_luks_format, m_luks_open):
+ result = self.lvm.luks_format_and_open('/dev/foo',
+ 'block',
+ {'ceph.block_uuid': 'block-uuid1'})
+ assert result == '/dev/mapper/block-uuid1'
+
+ @patch('ceph_volume.objectstore.lvm.Lvm.enroll_tpm2', Mock(return_value=MagicMock()))
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_open')
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_format')
+ def test_luks_format_and_open_with_tpm(self, m_luks_format, m_luks_open):
+ self.lvm.with_tpm = True
+ result = self.lvm.luks_format_and_open('/dev/foo',
+ 'block',
+ {'ceph.block_uuid': 'block-uuid1'})
+ assert result == '/dev/mapper/block-uuid1'
+ self.lvm.enroll_tpm2.assert_called_once()
+
+ def test_luks_format_and_open_not_device(self):
+ result = self.lvm.luks_format_and_open('',
+ 'block',
+ {})
+ assert result == ''
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ @patch('ceph_volume.util.system.generate_uuid',
+ Mock(return_value='d83fa1ca-bd68-4c75-bdc2-464da58e8abd'))
+ @patch('ceph_volume.api.lvm.create_lv')
+ @patch('ceph_volume.util.disk.is_device', Mock(return_value=True))
+ def test_setup_metadata_devices_is_device(self,
+ m_create_lv: MagicMock,
+ m_set_tags: MagicMock,
+ factory: Callable[..., Namespace]) -> None:
+ m_create_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ args = factory(cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ block_db='/dev/db',
+ block_db_size=disk.Size(gb=200),
+ block_db_slots=1,
+ block_wal=None,
+ block_wal_size='0',
+ block_wal_slots=None)
+ self.lvm.args = args
+ self.lvm.setup_metadata_devices()
+ assert m_create_lv.mock_calls == [call(name_prefix='osd-db',
+ uuid='d83fa1ca-bd68-4c75-bdc2-464da58e8abd',
+ vg=None,
+ device='/dev/db',
+ slots=1,
+ extents=None,
+ size=disk.Size(gb=200),
+ tags={'ceph.type': 'db',
+ 'ceph.vdo': '0',
+ 'ceph.db_device': '/fake-path',
+ 'ceph.db_uuid': 'fake-uuid'})]
+
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ def test_setup_metadata_devices_is_lv(self,
+ m_set_tags: MagicMock,
+ m_get_single_lv: MagicMock,
+ factory: Callable[..., Namespace]) -> None:
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ args = factory(cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ block_db='vg1/lv1',
+ block_db_size=disk.Size(gb=200),
+ block_db_slots=1,
+ block_wal=None,
+ block_wal_size='0',
+ block_wal_slots=None)
+ self.lvm.args = args
+ self.lvm.setup_metadata_devices()
+ assert m_set_tags.mock_calls == [call({
+ 'ceph.type': 'db',
+ 'ceph.vdo': '0',
+ 'ceph.db_uuid': 'fake-uuid',
+ 'ceph.db_device': '/fake-path'
+ })]
+
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid', Mock(return_value='c6798f59-01'))
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ @patch('ceph_volume.api.lvm.create_lv')
+ def test_setup_metadata_devices_partition(self,
+ m_create_lv: MagicMock,
+ m_set_tags: MagicMock,
+ factory: Callable[..., Namespace]) -> None:
+ args = factory(cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ block_db='/dev/foo1',
+ block_db_size=disk.Size(gb=200),
+ block_db_slots=1,
+ block_wal=None,
+ block_wal_size='0',
+ block_wal_slots=None)
+ self.lvm.args = args
+ self.lvm.setup_metadata_devices()
+ m_create_lv.assert_not_called()
+ m_set_tags.assert_not_called()
+
+ def test_get_osd_device_path_lv_block(self):
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm.get_osd_device_path(lvs, 'block') == '/fake-path'
+
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_open', MagicMock())
+ def test_get_osd_device_path_lv_block_encrypted(self):
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.encrypted=1',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm.get_osd_device_path(lvs, 'block') == '/dev/mapper/fake-block-uuid'
+
+ def test_get_osd_device_path_lv_db(self):
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-db-uuid')]
+ assert self.lvm.get_osd_device_path(lvs, 'db') == '/fake-db-path'
+
+ def test_get_osd_device_path_no_device_uuid(self):
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-db-uuid')]
+ assert not self.lvm.get_osd_device_path(lvs, 'db')
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.luks_open', MagicMock())
+ def test_get_osd_device_path_phys_encrypted(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = '/dev/sda1'
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,ceph.encrypted=1',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm.get_osd_device_path(lvs, 'db') == '/dev/mapper/fake-db-uuid'
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ def test_get_osd_device_path_phys(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = '/dev/sda1'
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
+ lv_uuid='fake-block-uuid')]
+ self.lvm.get_osd_device_path(lvs, 'db')
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ def test_get_osd_device_path_phys_raises_exception(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = ''
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
+ lv_uuid='fake-block-uuid')]
+ with pytest.raises(RuntimeError):
+ self.lvm.get_osd_device_path(lvs, 'db')
+
+ def test__activate_raises_exception(self):
+ lvs = [Volume(lv_name='lv_foo-db',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=db,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-db-uuid')]
+ with pytest.raises(RuntimeError) as error:
+ self.lvm._activate(lvs)
+ assert str(error.value) == 'could not find a bluestore OSD to activate'
+
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.write_lockbox_keyring', MagicMock())
+ @patch('ceph_volume.objectstore.lvm.encryption_utils.get_dmcrypt_key', MagicMock())
+ @patch('ceph_volume.objectstore.lvm.prepare_utils.create_osd_path')
+ @patch('ceph_volume.terminal.success')
+ @pytest.mark.parametrize("encrypted", ["ceph.encrypted=0", "ceph.encrypted=1"])
+ def test__activate(self,
+ m_success, m_create_osd_path,
+ monkeypatch, fake_run, fake_call, encrypted, conf_ceph_stub, patch_udevdata):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda path: False)
+ m_create_osd_path.return_value = MagicMock()
+ m_success.return_value = MagicMock()
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags=f'ceph.type=block,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags=f'ceph.type=db,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-db-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-wal-path',
+ vg_name='vg_foo_wal',
+ lv_tags=f'ceph.type=wal,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-wal-uuid')]
+ self.lvm._activate(lvs)
+ if encrypted == "ceph.encrypted=0":
+ assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
+ 'prime-osd-dir', '--dev', '/fake-block-path',
+ '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-block-path',
+ '/var/lib/ceph/osd/ceph-0/block'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-db-path',
+ '/var/lib/ceph/osd/ceph-0/block.db'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-wal-path',
+ '/var/lib/ceph/osd/ceph-0/block.wal'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable',
+ 'ceph-volume@lvm-0-abcd'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'start', 'ceph-osd@0'],),
+ 'kwargs': {}}]
+ else:
+ assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
+ 'prime-osd-dir', '--dev', '/dev/mapper/fake-block-uuid',
+ '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-block-uuid',
+ '/var/lib/ceph/osd/ceph-0/block'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-db-uuid',
+ '/var/lib/ceph/osd/ceph-0/block.db'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-wal-uuid',
+ '/var/lib/ceph/osd/ceph-0/block.wal'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', 'ceph-volume@lvm-0-abcd'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'start', 'ceph-osd@0'],),
+ 'kwargs': {}}]
+ assert m_success.mock_calls == [call('ceph-volume lvm activate successful for osd ID: 0')]
+
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
+ def test_activate_all(self,
+ m_create_key,
+ mock_lvm_direct_report,
+ is_root,
+ factory,
+ fake_run):
+ args = factory(no_systemd=True)
+ self.lvm.args = args
+ self.lvm.activate = MagicMock()
+ self.lvm.activate_all()
+ assert self.lvm.activate.mock_calls == [call(args,
+ osd_id='1',
+ osd_fsid='824f7edf-371f-4b75-9231-4ab62a32d5c0'),
+ call(args,
+ osd_id='0',
+ osd_fsid='a0e07c5b-bee1-4ea2-ae07-cb89deda9b27')]
+
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
+ def test_activate_all_no_osd_found(self,
+ m_create_key,
+ is_root,
+ factory,
+ fake_run,
+ monkeypatch,
+ capsys):
+ monkeypatch.setattr('ceph_volume.objectstore.lvm.direct_report', lambda: {})
+ args = factory(no_systemd=True)
+ self.lvm.args = args
+ self.lvm.activate_all()
+ stdout, stderr = capsys.readouterr()
+ assert "Was unable to find any OSDs to activate" in stderr
+ assert "Verify OSDs are present with" in stderr
+
+ @patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=True)
+ def test_activate_all_osd_is_active(self,
+ mock_lvm_direct_report,
+ is_root,
+ factory,
+ fake_run):
+ args = factory(no_systemd=False)
+ self.lvm.args = args
+ self.lvm.activate = MagicMock()
+ self.lvm.activate_all()
+ assert self.lvm.activate.mock_calls == []
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_osd_id_and_fsid(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(osd_id='1',
+ osd_fsid='824f7edf',
+ no_systemd=True)
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags=f'ceph.osd_id={args.osd_id},ceph.osd_fsid={args.osd_fsid}',
+ lv_uuid='fake-uuid')]
+ m_get_lvs.return_value = lvs
+ self.lvm.args = args
+ self.lvm._activate = MagicMock()
+ self.lvm.activate()
+ assert self.lvm._activate.mock_calls == [call(lvs, True, False)]
+ assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_id': '1',
+ 'ceph.osd_fsid': '824f7edf'})]
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_not_osd_id_and_fsid(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id=None,
+ osd_fsid='824f7edf')
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')]
+ m_get_lvs.return_value = lvs
+ self.lvm.args = args
+ self.lvm._activate = MagicMock()
+ self.lvm.activate()
+ assert self.lvm._activate.mock_calls == [call(lvs, True, False)]
+ assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_fsid': '824f7edf'})]
+
+ def test_activate_osd_id_and_not_fsid(self,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id='1',
+ osd_fsid=None)
+ self.lvm.args = args
+ self.lvm._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm.activate()
+ assert str(error.value) == 'could not activate osd.1, please provide the osd_fsid too'
+
+ def test_activate_not_osd_id_and_not_fsid(self,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id=None,
+ osd_fsid=None)
+ self.lvm.args = args
+ self.lvm._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm.activate()
+ assert str(error.value) == 'Please provide both osd_id and osd_fsid'
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_couldnt_find_osd(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(osd_id='1',
+ osd_fsid='824f7edf',
+ no_systemd=True)
+ lvs = []
+ m_get_lvs.return_value = lvs
+ self.lvm.args = args
+ self.lvm._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm.activate()
+ assert str(error.value) == 'could not find osd.1 with osd_fsid 824f7edf'
\ No newline at end of file
+++ /dev/null
-import pytest
-from argparse import Namespace
-from unittest.mock import patch, Mock, MagicMock, call
-from ceph_volume.objectstore.lvmbluestore import LvmBlueStore
-from ceph_volume.api.lvm import Volume
-from ceph_volume.util import system, disk
-from typing import Callable
-
-
-class TestLvmBlueStore:
- @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
- def setup_method(self, m_create_key):
- self.lvm_bs = LvmBlueStore([])
-
- @patch('ceph_volume.conf.cluster', 'ceph')
- @patch('ceph_volume.api.lvm.get_single_lv')
- @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_id', Mock(return_value='111'))
- def test_pre_prepare_lv(self, m_get_single_lv, factory):
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- data='vg_foo/lv_foo')
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- self.lvm_bs.encrypted = True
- self.lvm_bs.dmcrypt_key = 'fake-dmcrypt-key'
- self.lvm_bs.args = args
- self.lvm_bs.pre_prepare()
- assert self.lvm_bs.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
- assert self.lvm_bs.secrets['crush_device_class'] == 'ssd'
- assert self.lvm_bs.osd_id == '111'
- assert self.lvm_bs.block_device_path == '/fake-path'
- assert self.lvm_bs.tags == {'ceph.osd_fsid': 'abc123',
- 'ceph.osd_id': '111',
- 'ceph.cluster_fsid': 'abcd',
- 'ceph.cluster_name': 'ceph',
- 'ceph.crush_device_class': 'ssd',
- 'ceph.osdspec_affinity': '',
- 'ceph.block_device': '/fake-path',
- 'ceph.block_uuid': 'fake-uuid',
- 'ceph.cephx_lockbox_secret': '',
- 'ceph.encrypted': True,
- 'ceph.vdo': '0',
- 'ceph.with_tpm': 0}
-
- @patch('ceph_volume.conf.cluster', 'ceph')
- @patch('ceph_volume.api.lvm.get_single_lv')
- @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_id', Mock(return_value='111'))
- def test_pre_prepare_lv_with_dmcrypt_and_tpm(self, m_get_single_lv, factory):
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- data='vg_foo/lv_foo',
- dmcrypt=True,
- with_tpm=True)
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- self.lvm_bs.encrypted = True
- self.lvm_bs.with_tpm = True
- self.lvm_bs.dmcrypt_key = 'fake-dmcrypt-key-tpm2'
- self.lvm_bs.args = args
- self.lvm_bs.pre_prepare()
- assert 'dmcrypt_key' not in self.lvm_bs.secrets.keys()
- assert self.lvm_bs.secrets['crush_device_class'] == 'ssd'
- assert self.lvm_bs.osd_id == '111'
- assert self.lvm_bs.block_device_path == '/fake-path'
- assert self.lvm_bs.tags == {'ceph.osd_fsid': 'abc123',
- 'ceph.osd_id': '111',
- 'ceph.cluster_fsid': 'abcd',
- 'ceph.cluster_name': 'ceph',
- 'ceph.crush_device_class': 'ssd',
- 'ceph.osdspec_affinity': '',
- 'ceph.block_device': '/fake-path',
- 'ceph.block_uuid': 'fake-uuid',
- 'ceph.cephx_lockbox_secret': '',
- 'ceph.encrypted': True,
- 'ceph.vdo': '0',
- 'ceph.with_tpm': 1}
-
- @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_id', Mock(return_value='111'))
- def test_pre_prepare_no_lv(self, factory):
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- data='/dev/foo',
- dmcrypt_key='fake-dmcrypt-key')
- self.lvm_bs.prepare_data_device = lambda x, y: Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- self.lvm_bs.encrypted = True
- self.lvm_bs.dmcrypt_key = 'fake-dmcrypt-key'
- self.lvm_bs.args = args
- self.lvm_bs.pre_prepare()
- assert self.lvm_bs.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
- assert self.lvm_bs.secrets['crush_device_class'] == 'ssd'
- assert self.lvm_bs.osd_id == '111'
- assert self.lvm_bs.block_device_path == '/fake-path'
- assert self.lvm_bs.tags == {'ceph.osd_fsid': 'abc123',
- 'ceph.osd_id': '111',
- 'ceph.cluster_fsid': 'abcd',
- 'ceph.cluster_name': None,
- 'ceph.crush_device_class': 'ssd',
- 'ceph.osdspec_affinity': '',
- 'ceph.block_device': '/fake-path',
- 'ceph.block_uuid': 'fake-uuid',
- 'ceph.cephx_lockbox_secret': '',
- 'ceph.encrypted': True,
- 'ceph.vdo': '0',
- 'ceph.with_tpm': 0}
-
- @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
- @patch('ceph_volume.api.lvm.create_lv')
- def test_prepare_data_device(self,
- m_create_lv: MagicMock,
- factory: Callable[..., Namespace]) -> None:
- args = factory(data='/dev/foo1',
- data_slots=1,
- data_size=102400)
- self.lvm_bs.args = args
- m_create_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='abcd')
- assert self.lvm_bs.prepare_data_device('block', 'abcd') == m_create_lv.return_value
- assert self.lvm_bs.args.data_size == 102400
-
- @patch('ceph_volume.util.disk.is_device', Mock(return_value=False))
- @patch('ceph_volume.util.disk.is_partition', Mock(return_value=False))
- def test_prepare_data_device_fails(self, factory):
- args = factory(data='/dev/foo')
- self.lvm_bs.args = args
- with pytest.raises(RuntimeError) as error:
- self.lvm_bs.prepare_data_device('block', 'abcd')
- assert ('Cannot use device (/dev/foo). '
- 'A vg/lv path or an existing device is needed') == str(error.value)
-
- @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_safe_prepare(self, m_get_single_lv, factory):
- args = factory(data='vg_foo/lv_foo')
- self.lvm_bs.args = args
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- self.lvm_bs.prepare = MagicMock()
- self.lvm_bs.safe_prepare()
- assert self.lvm_bs.prepare.called
-
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.prepare', Mock(side_effect=Exception))
- @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
- # @patch('ceph_volume.devices.lvm.common.rollback_osd')
- @patch('ceph_volume.objectstore.lvmbluestore.rollback_osd')
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_safe_prepare_raises_exception(self, m_get_single_lv, m_rollback_osd, factory):
- args = factory(data='/dev/foo')
- self.lvm_bs.args = args
- self.lvm_bs.osd_id = '111'
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- m_rollback_osd.return_value = MagicMock()
- with pytest.raises(Exception):
- self.lvm_bs.safe_prepare()
- assert m_rollback_osd.mock_calls == [call('111')]
-
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.pre_prepare', Mock(return_value=None))
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.prepare_dmcrypt', MagicMock())
- @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.prepare_osd_req', MagicMock())
- @patch('ceph_volume.objectstore.bluestore.BlueStore.osd_mkfs', MagicMock())
- @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
- @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid', Mock(return_value='c6798f59-01'))
- @patch('ceph_volume.api.lvm.Volume.set_tags', MagicMock())
- @patch('ceph_volume.api.lvm.get_single_lv')
- def test_prepare(self,
- m_get_single_lv: MagicMock,
- is_root: Callable[..., None],
- factory: Callable[..., Namespace]) -> None:
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- args = factory(data='vg_foo/lv_foo',
- block_wal='/dev/foo1',
- block_db='/dev/foo2',
- block_wal_size=123,
- block_db_size=123,
- block_wal_slots=1,
- block_db_slots=1,
- with_tpm=False
- )
- self.lvm_bs.args = args
- self.lvm_bs.block_lv = MagicMock()
- self.lvm_bs.secrets['dmcrypt_key'] = 'fake-secret'
- self.lvm_bs.prepare()
- assert self.lvm_bs.wal_device_path == '/dev/foo1'
- assert self.lvm_bs.db_device_path == '/dev/foo2'
- assert self.lvm_bs.block_lv.set_tags.mock_calls == [call({
- 'ceph.type': 'block',
- })]
- assert not self.lvm_bs.prepare_dmcrypt.called
- assert self.lvm_bs.osd_mkfs.called
- assert self.lvm_bs.prepare_osd_req.called
-
- def test_prepare_dmcrypt(self):
- self.lvm_bs.secrets = {'dmcrypt_key': 'fake-secret'}
- self.lvm_bs.tags = {'ceph.block_uuid': 'block-uuid1',
- 'ceph.db_uuid': 'db-uuid2',
- 'ceph.wal_uuid': 'wal-uuid3',
- 'ceph.with_tpm': 0}
- self.lvm_bs.block_device_path = '/dev/sdb'
- self.lvm_bs.db_device_path = '/dev/sdc'
- self.lvm_bs.wal_device_path = '/dev/sdb'
- self.lvm_bs.luks_format_and_open = lambda *a: f'/dev/mapper/{a[2]["ceph."+a[1]+"_uuid"]}'
- self.lvm_bs.prepare_dmcrypt()
- assert self.lvm_bs.block_device_path == '/dev/mapper/block-uuid1'
- assert self.lvm_bs.db_device_path == '/dev/mapper/db-uuid2'
- assert self.lvm_bs.wal_device_path == '/dev/mapper/wal-uuid3'
-
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open')
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_format')
- def test_luks_format_and_open(self, m_luks_format, m_luks_open):
- result = self.lvm_bs.luks_format_and_open('/dev/foo',
- 'block',
- {'ceph.block_uuid': 'block-uuid1'})
- assert result == '/dev/mapper/block-uuid1'
-
- @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.enroll_tpm2', Mock(return_value=MagicMock()))
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open')
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_format')
- def test_luks_format_and_open_with_tpm(self, m_luks_format, m_luks_open):
- self.lvm_bs.with_tpm = True
- result = self.lvm_bs.luks_format_and_open('/dev/foo',
- 'block',
- {'ceph.block_uuid': 'block-uuid1'})
- assert result == '/dev/mapper/block-uuid1'
- self.lvm_bs.enroll_tpm2.assert_called_once()
-
- def test_luks_format_and_open_not_device(self):
- result = self.lvm_bs.luks_format_and_open('',
- 'block',
- {})
- assert result == ''
-
- @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
- @patch('ceph_volume.util.system.generate_uuid',
- Mock(return_value='d83fa1ca-bd68-4c75-bdc2-464da58e8abd'))
- @patch('ceph_volume.api.lvm.create_lv')
- @patch('ceph_volume.util.disk.is_device', Mock(return_value=True))
- def test_setup_metadata_devices_is_device(self,
- m_create_lv: MagicMock,
- m_set_tags: MagicMock,
- factory: Callable[..., Namespace]) -> None:
- m_create_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- block_db='/dev/db',
- block_db_size=disk.Size(gb=200),
- block_db_slots=1,
- block_wal=None,
- block_wal_size='0',
- block_wal_slots=None)
- self.lvm_bs.args = args
- self.lvm_bs.setup_metadata_devices()
- assert m_create_lv.mock_calls == [call(name_prefix='osd-db',
- uuid='d83fa1ca-bd68-4c75-bdc2-464da58e8abd',
- vg=None,
- device='/dev/db',
- slots=1,
- extents=None,
- size=disk.Size(gb=200),
- tags={'ceph.type': 'db',
- 'ceph.vdo': '0',
- 'ceph.db_device': '/fake-path',
- 'ceph.db_uuid': 'fake-uuid'})]
-
- @patch('ceph_volume.api.lvm.get_single_lv')
- @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
- def test_setup_metadata_devices_is_lv(self,
- m_set_tags: MagicMock,
- m_get_single_lv: MagicMock,
- factory: Callable[..., Namespace]) -> None:
- m_get_single_lv.return_value = Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- block_db='vg1/lv1',
- block_db_size=disk.Size(gb=200),
- block_db_slots=1,
- block_wal=None,
- block_wal_size='0',
- block_wal_slots=None)
- self.lvm_bs.args = args
- self.lvm_bs.setup_metadata_devices()
- assert m_set_tags.mock_calls == [call({
- 'ceph.type': 'db',
- 'ceph.vdo': '0',
- 'ceph.db_uuid': 'fake-uuid',
- 'ceph.db_device': '/fake-path'
- })]
-
- @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
- @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid', Mock(return_value='c6798f59-01'))
- @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
- @patch('ceph_volume.api.lvm.create_lv')
- def test_setup_metadata_devices_partition(self,
- m_create_lv: MagicMock,
- m_set_tags: MagicMock,
- factory: Callable[..., Namespace]) -> None:
- args = factory(cluster_fsid='abcd',
- osd_fsid='abc123',
- crush_device_class='ssd',
- osd_id='111',
- block_db='/dev/foo1',
- block_db_size=disk.Size(gb=200),
- block_db_slots=1,
- block_wal=None,
- block_wal_size='0',
- block_wal_slots=None)
- self.lvm_bs.args = args
- self.lvm_bs.setup_metadata_devices()
- m_create_lv.assert_not_called()
- m_set_tags.assert_not_called()
-
- def test_get_osd_device_path_lv_block(self):
- lvs = [Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
- lv_uuid='fake-block-uuid')]
- assert self.lvm_bs.get_osd_device_path(lvs, 'block') == '/fake-path'
-
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open', MagicMock())
- def test_get_osd_device_path_lv_block_encrypted(self):
- lvs = [Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.encrypted=1',
- lv_uuid='fake-block-uuid')]
- assert self.lvm_bs.get_osd_device_path(lvs, 'block') == '/dev/mapper/fake-block-uuid'
-
- def test_get_osd_device_path_lv_db(self):
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
- lv_uuid='fake-block-uuid'),
- Volume(lv_name='lv_foo-db',
- lv_path='/fake-db-path',
- vg_name='vg_foo_db',
- lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
- lv_uuid='fake-db-uuid')]
- assert self.lvm_bs.get_osd_device_path(lvs, 'db') == '/fake-db-path'
-
- def test_get_osd_device_path_no_device_uuid(self):
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
- lv_uuid='fake-block-uuid'),
- Volume(lv_name='lv_foo-db',
- lv_path='/fake-db-path',
- vg_name='vg_foo_db',
- lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid',
- lv_uuid='fake-db-uuid')]
- assert not self.lvm_bs.get_osd_device_path(lvs, 'db')
-
- @patch('ceph_volume.util.disk.get_device_from_partuuid')
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open', MagicMock())
- def test_get_osd_device_path_phys_encrypted(self, m_get_device_from_partuuid):
- m_get_device_from_partuuid.return_value = '/dev/sda1'
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,ceph.encrypted=1',
- lv_uuid='fake-block-uuid')]
- assert self.lvm_bs.get_osd_device_path(lvs, 'db') == '/dev/mapper/fake-db-uuid'
-
- @patch('ceph_volume.util.disk.get_device_from_partuuid')
- def test_get_osd_device_path_phys(self, m_get_device_from_partuuid):
- m_get_device_from_partuuid.return_value = '/dev/sda1'
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
- lv_uuid='fake-block-uuid')]
- self.lvm_bs.get_osd_device_path(lvs, 'db')
-
- @patch('ceph_volume.util.disk.get_device_from_partuuid')
- def test_get_osd_device_path_phys_raises_exception(self, m_get_device_from_partuuid):
- m_get_device_from_partuuid.return_value = ''
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
- lv_uuid='fake-block-uuid')]
- with pytest.raises(RuntimeError):
- self.lvm_bs.get_osd_device_path(lvs, 'db')
-
- def test__activate_raises_exception(self):
- lvs = [Volume(lv_name='lv_foo-db',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='ceph.type=db,ceph.db_uuid=fake-db-uuid',
- lv_uuid='fake-db-uuid')]
- with pytest.raises(RuntimeError) as error:
- self.lvm_bs._activate(lvs)
- assert str(error.value) == 'could not find a bluestore OSD to activate'
-
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.write_lockbox_keyring', MagicMock())
- @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.get_dmcrypt_key', MagicMock())
- @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_osd_path')
- @patch('ceph_volume.terminal.success')
- @pytest.mark.parametrize("encrypted", ["ceph.encrypted=0", "ceph.encrypted=1"])
- def test__activate(self,
- m_success, m_create_osd_path,
- monkeypatch, fake_run, fake_call, encrypted, conf_ceph_stub, patch_udevdata):
- conf_ceph_stub('[global]\nfsid=asdf-lkjh')
- monkeypatch.setattr(system, 'chown', lambda path: 0)
- monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
- monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda path: False)
- m_create_osd_path.return_value = MagicMock()
- m_success.return_value = MagicMock()
- lvs = [Volume(lv_name='lv_foo-block',
- lv_path='/fake-block-path',
- vg_name='vg_foo',
- lv_tags=f'ceph.type=block,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
- lv_uuid='fake-block-uuid'),
- Volume(lv_name='lv_foo-db',
- lv_path='/fake-db-path',
- vg_name='vg_foo_db',
- lv_tags=f'ceph.type=db,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
- lv_uuid='fake-db-uuid'),
- Volume(lv_name='lv_foo-db',
- lv_path='/fake-wal-path',
- vg_name='vg_foo_wal',
- lv_tags=f'ceph.type=wal,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
- lv_uuid='fake-wal-uuid')]
- self.lvm_bs._activate(lvs)
- if encrypted == "ceph.encrypted=0":
- assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
- 'prime-osd-dir', '--dev', '/fake-block-path',
- '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/fake-block-path',
- '/var/lib/ceph/osd/ceph-0/block'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/fake-db-path',
- '/var/lib/ceph/osd/ceph-0/block.db'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/fake-wal-path',
- '/var/lib/ceph/osd/ceph-0/block.wal'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'enable',
- 'ceph-volume@lvm-0-abcd'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'start', 'ceph-osd@0'],),
- 'kwargs': {}}]
- else:
- assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
- 'prime-osd-dir', '--dev', '/dev/mapper/fake-block-uuid',
- '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/dev/mapper/fake-block-uuid',
- '/var/lib/ceph/osd/ceph-0/block'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/dev/mapper/fake-db-uuid',
- '/var/lib/ceph/osd/ceph-0/block.db'],),
- 'kwargs': {}},
- {'args': (['ln', '-snf', '/dev/mapper/fake-wal-uuid',
- '/var/lib/ceph/osd/ceph-0/block.wal'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'enable', 'ceph-volume@lvm-0-abcd'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
- 'kwargs': {}},
- {'args': (['systemctl', 'start', 'ceph-osd@0'],),
- 'kwargs': {}}]
- assert m_success.mock_calls == [call('ceph-volume lvm activate successful for osd ID: 0')]
-
- @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
- def test_activate_all(self,
- m_create_key,
- mock_lvm_direct_report,
- is_root,
- factory,
- fake_run):
- args = factory(no_systemd=True)
- self.lvm_bs.args = args
- self.lvm_bs.activate = MagicMock()
- self.lvm_bs.activate_all()
- assert self.lvm_bs.activate.mock_calls == [call(args,
- osd_id='1',
- osd_fsid='824f7edf-371f-4b75-9231-4ab62a32d5c0'),
- call(args,
- osd_id='0',
- osd_fsid='a0e07c5b-bee1-4ea2-ae07-cb89deda9b27')]
-
- @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
- def test_activate_all_no_osd_found(self,
- m_create_key,
- is_root,
- factory,
- fake_run,
- monkeypatch,
- capsys):
- monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: {})
- args = factory(no_systemd=True)
- self.lvm_bs.args = args
- self.lvm_bs.activate_all()
- stdout, stderr = capsys.readouterr()
- assert "Was unable to find any OSDs to activate" in stderr
- assert "Verify OSDs are present with" in stderr
-
- @patch('ceph_volume.api.lvm.process.call', Mock(return_value=('', '', 0)))
- @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=True)
- def test_activate_all_osd_is_active(self,
- mock_lvm_direct_report,
- is_root,
- factory,
- fake_run):
- args = factory(no_systemd=False)
- self.lvm_bs.args = args
- self.lvm_bs.activate = MagicMock()
- self.lvm_bs.activate_all()
- assert self.lvm_bs.activate.mock_calls == []
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_activate_osd_id_and_fsid(self,
- m_get_lvs,
- is_root,
- factory):
- args = factory(osd_id='1',
- osd_fsid='824f7edf',
- no_systemd=True)
- lvs = [Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags=f'ceph.osd_id={args.osd_id},ceph.osd_fsid={args.osd_fsid}',
- lv_uuid='fake-uuid')]
- m_get_lvs.return_value = lvs
- self.lvm_bs.args = args
- self.lvm_bs._activate = MagicMock()
- self.lvm_bs.activate()
- assert self.lvm_bs._activate.mock_calls == [call(lvs, True, False)]
- assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_id': '1',
- 'ceph.osd_fsid': '824f7edf'})]
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_activate_not_osd_id_and_fsid(self,
- m_get_lvs,
- is_root,
- factory):
- args = factory(no_systemd=True,
- osd_id=None,
- osd_fsid='824f7edf')
- lvs = [Volume(lv_name='lv_foo',
- lv_path='/fake-path',
- vg_name='vg_foo',
- lv_tags='',
- lv_uuid='fake-uuid')]
- m_get_lvs.return_value = lvs
- self.lvm_bs.args = args
- self.lvm_bs._activate = MagicMock()
- self.lvm_bs.activate()
- assert self.lvm_bs._activate.mock_calls == [call(lvs, True, False)]
- assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_fsid': '824f7edf'})]
-
- def test_activate_osd_id_and_not_fsid(self,
- is_root,
- factory):
- args = factory(no_systemd=True,
- osd_id='1',
- osd_fsid=None)
- self.lvm_bs.args = args
- self.lvm_bs._activate = MagicMock()
- with pytest.raises(RuntimeError) as error:
- self.lvm_bs.activate()
- assert str(error.value) == 'could not activate osd.1, please provide the osd_fsid too'
-
- def test_activate_not_osd_id_and_not_fsid(self,
- is_root,
- factory):
- args = factory(no_systemd=True,
- osd_id=None,
- osd_fsid=None)
- self.lvm_bs.args = args
- self.lvm_bs._activate = MagicMock()
- with pytest.raises(RuntimeError) as error:
- self.lvm_bs.activate()
- assert str(error.value) == 'Please provide both osd_id and osd_fsid'
-
- @patch('ceph_volume.api.lvm.get_lvs')
- def test_activate_couldnt_find_osd(self,
- m_get_lvs,
- is_root,
- factory):
- args = factory(osd_id='1',
- osd_fsid='824f7edf',
- no_systemd=True)
- lvs = []
- m_get_lvs.return_value = lvs
- self.lvm_bs.args = args
- self.lvm_bs._activate = MagicMock()
- with pytest.raises(RuntimeError) as error:
- self.lvm_bs.activate()
- assert str(error.value) == 'could not find osd.1 with osd_fsid 824f7edf'
\ No newline at end of file
--- /dev/null
+import pytest
+from unittest.mock import patch, Mock, MagicMock, call
+from ceph_volume.objectstore.raw import Raw
+from ceph_volume.util import system
+
+
+class TestRaw:
+ @patch('ceph_volume.objectstore.raw.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.raw_bs = Raw([])
+
+ def test_prepare_dmcrypt(self,
+ device_info,
+ fake_call,
+ key_size):
+ self.raw_bs.secrets = {'dmcrypt_key': 'foo'}
+ self.raw_bs.block_device_path = '/dev/foo0'
+ self.raw_bs.db_device_path = '/dev/foo1'
+ self.raw_bs.wal_device_path = '/dev/foo2'
+ lsblk = {"TYPE": "disk",
+ "NAME": "foo0",
+ 'KNAME': 'foo0'}
+ device_info(lsblk=lsblk)
+ self.raw_bs.prepare_dmcrypt()
+ assert self.raw_bs.block_device_path == "/dev/mapper/ceph--foo0-block-dmcrypt"
+ assert self.raw_bs.db_device_path == "/dev/mapper/ceph--foo0-db-dmcrypt"
+ assert self.raw_bs.wal_device_path == "/dev/mapper/ceph--foo0-wal-dmcrypt"
+
+ @patch('ceph_volume.objectstore.raw.Raw.enroll_tpm2', Mock(return_value=MagicMock()))
+ def test_prepare_dmcrypt_with_tpm(self,
+ device_info,
+ fake_call,
+ key_size):
+ self.raw_bs.block_device_path = '/dev/foo0'
+ self.raw_bs.db_device_path = '/dev/foo1'
+ self.raw_bs.wal_device_path = '/dev/foo2'
+ self.raw_bs.with_tpm = 1
+ lsblk = {"TYPE": "disk",
+ "NAME": "foo0",
+ 'KNAME': 'foo0'}
+ device_info(lsblk=lsblk)
+ self.raw_bs.prepare_dmcrypt()
+ assert 'dmcrypt_key' not in self.raw_bs.secrets.keys()
+ assert self.raw_bs.block_device_path == "/dev/mapper/ceph--foo0-block-dmcrypt"
+ assert self.raw_bs.db_device_path == "/dev/mapper/ceph--foo0-db-dmcrypt"
+ assert self.raw_bs.wal_device_path == "/dev/mapper/ceph--foo0-wal-dmcrypt"
+ assert self.raw_bs.enroll_tpm2.mock_calls == [call('/dev/foo0'), call('/dev/foo1'), call('/dev/foo2')]
+
+ @patch('ceph_volume.objectstore.raw.rollback_osd')
+ @patch('ceph_volume.objectstore.raw.Raw.prepare')
+ def test_safe_prepare_raises_exception(self,
+ m_prepare,
+ m_rollback_osd,
+ factory,
+ capsys):
+ m_prepare.side_effect = Exception
+ m_rollback_osd.return_value = MagicMock()
+ args = factory(osd_id='1')
+ self.raw_bs.args = args
+ self.raw_bs.osd_id = self.raw_bs.args.osd_id
+ with pytest.raises(Exception):
+ self.raw_bs.safe_prepare()
+ assert m_rollback_osd.mock_calls == [call('1')]
+
+ @patch('ceph_volume.objectstore.raw.Raw.prepare', MagicMock())
+ def test_safe_prepare(self,
+ factory,
+ capsys):
+ args = factory(dmcrypt=True,
+ data='/dev/foo')
+ self.raw_bs.safe_prepare(args)
+ _, stderr = capsys.readouterr()
+ assert "prepare successful for: /dev/foo" in stderr
+
+ @patch.dict('os.environ', {'CEPH_VOLUME_DMCRYPT_SECRET': 'dmcrypt-key'})
+ @patch('ceph_volume.objectstore.raw.prepare_utils.create_id')
+ @patch('ceph_volume.objectstore.raw.system.generate_uuid')
+ def test_prepare(self, m_generate_uuid, m_create_id, is_root, factory):
+ m_generate_uuid.return_value = 'fake-uuid'
+ m_create_id.return_value = MagicMock()
+ self.raw_bs.prepare_dmcrypt = MagicMock()
+ self.raw_bs.prepare_osd_req = MagicMock()
+ self.raw_bs.osd_mkfs = MagicMock()
+ args = factory(crush_device_class='foo',
+ no_tmpfs=False,
+ block_wal='/dev/foo1',
+ block_db='/dev/foo2',)
+ self.raw_bs.args = args
+ self.raw_bs.secrets = dict()
+ self.raw_bs.encrypted = True
+ self.raw_bs.prepare()
+ assert self.raw_bs.prepare_osd_req.mock_calls == [call(tmpfs=True)]
+ assert self.raw_bs.osd_mkfs.called
+ assert self.raw_bs.prepare_dmcrypt.called
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.objectstore.raw.prepare_utils.link_wal')
+ @patch('ceph_volume.objectstore.raw.prepare_utils.link_db')
+ @patch('ceph_volume.objectstore.raw.prepare_utils.link_block')
+ @patch('os.path.exists')
+ @patch('os.unlink')
+ @patch('ceph_volume.objectstore.raw.prepare_utils.create_osd_path')
+ @patch('ceph_volume.objectstore.raw.process.run')
+ def test__activate(self,
+ m_run,
+ m_create_osd_path,
+ m_unlink,
+ m_exists,
+ m_link_block,
+ m_link_db,
+ m_link_wal,
+ monkeypatch,
+ factory):
+ args = factory(no_tmpfs=False)
+ self.raw_bs.args = args
+ self.raw_bs.block_device_path = '/dev/sda'
+ self.raw_bs.db_device_path = '/dev/sdb'
+ self.raw_bs.wal_device_path = '/dev/sdc'
+ m_run.return_value = MagicMock()
+ m_exists.side_effect = lambda path: True
+ m_create_osd_path.return_value = MagicMock()
+ m_unlink.return_value = MagicMock()
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ monkeypatch.setattr(system, 'path_is_mounted', lambda path: 0)
+ self.raw_bs._activate('1', True)
+ calls = [call('/var/lib/ceph/osd/ceph-1/block'),
+ call('/var/lib/ceph/osd/ceph-1/block.db'),
+ call('/var/lib/ceph/osd/ceph-1/block.wal')]
+ assert m_run.mock_calls == [call(['ceph-bluestore-tool',
+ 'prime-osd-dir',
+ '--path', '/var/lib/ceph/osd/ceph-1',
+ '--no-mon-config', '--dev', '/dev/sda'])]
+ assert m_unlink.mock_calls == calls
+ assert m_exists.mock_calls == calls
+ assert m_create_osd_path.mock_calls == [call('1', tmpfs=True)]
+
+ def test_activate_raises_exception(self,
+ is_root,
+ mock_raw_direct_report):
+ with pytest.raises(RuntimeError) as error:
+ self.raw_bs.osd_id = '1'
+ self.raw_bs.activate()
+ assert str(error.value) == 'did not find any matching OSD to activate'
+
+ def test_activate_osd_id_and_fsid(self,
+ is_root,
+ mock_raw_direct_report):
+ self.raw_bs._activate = MagicMock()
+ self.raw_bs.osd_id = '8'
+ self.raw_bs.osd_fsid = '824f7edf-371f-4b75-9231-4ab62a32d5c0'
+ self.raw_bs.activate()
+ self.raw_bs._activate.mock_calls == [call({'ceph_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'device': '/dev/mapper/ceph--40bc7bd7--4aee--483e--ba95--89a64bc8a4fd-osd--block--824f7edf--371f--4b75--9231--4ab62a32d5c0',
+ 'device_db': '/dev/mapper/ceph--73d6d4db--6528--48f2--a4e2--1c82bc87a9ac-osd--db--b82d920d--be3c--4e4d--ba64--18f7e8445892',
+ 'osd_id': 8,
+ 'osd_uuid': '824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'type': 'bluestore'},
+ tmpfs=True)]
+
+ @patch('ceph_volume.objectstore.raw.encryption_utils.rename_mapper', Mock(return_value=MagicMock()))
+ @patch('ceph_volume.util.disk.get_bluestore_header')
+ @patch('ceph_volume.objectstore.raw.encryption_utils.luks_close', Mock(return_value=MagicMock()))
+ @patch('ceph_volume.objectstore.raw.encryption_utils.luks_open', Mock(return_value=MagicMock()))
+ def test_activate_dmcrypt_tpm(self, m_bs_header, rawbluestore, fake_lsblk_all, mock_raw_direct_report, is_root) -> None:
+ m_bs_header.return_value = {
+ "/dev/mapper/activating-sdb": {
+ "osd_uuid": "db32a338-b640-4cbc-af17-f63808b1c36e",
+ "size": 20000572178432,
+ "btime": "2024-06-13T12:16:57.607442+0000",
+ "description": "main",
+ "bfm_blocks": "4882952192",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "20000572178432",
+ "bluefs": "1",
+ "ceph_fsid": "c301d0aa-288d-11ef-b535-c84bd6975560",
+ "ceph_version_when_created": "ceph version 19.0.0-4242-gf2f7cc60 (f2f7cc609cdbae767486cf2fe6872a4789adffb2) squid (dev)",
+ "created_at": "2024-06-13T12:17:20.122565Z",
+ "elastic_shared_blobs": "1",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQAk42pmt7tqFxAAHlaETFm33yFtEuoQAh/cpQ==",
+ "ready": "ready",
+ "whoami": "0"}
+ }
+ mock_luks2_1 = Mock()
+ mock_luks2_1.is_ceph_encrypted = True
+ mock_luks2_1.is_tpm2_enrolled = True
+ mock_luks2_1.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
+
+ mock_luks2_2 = Mock()
+ mock_luks2_2.is_ceph_encrypted = True
+ mock_luks2_2.is_tpm2_enrolled = False
+ mock_luks2_2.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
+
+ mock_luks2_3 = Mock()
+ mock_luks2_3.is_ceph_encrypted = False
+ mock_luks2_3.is_tpm2_enrolled = False
+ mock_luks2_3.osd_fsid = ''
+
+ mock_luks2_4 = Mock()
+ mock_luks2_4.is_ceph_encrypted = True
+ mock_luks2_4.is_tpm2_enrolled = True
+ mock_luks2_4.osd_fsid = 'abcd'
+ with patch('ceph_volume.objectstore.raw.encryption_utils.CephLuks2', side_effect=[mock_luks2_1,
+ mock_luks2_2,
+ mock_luks2_3,
+ mock_luks2_4]):
+ fake_lsblk_all([{'NAME': '/dev/sdb', 'FSTYPE': 'crypto_LUKS'},
+ {'NAME': '/dev/sdc', 'FSTYPE': 'crypto_LUKS'},
+ {'NAME': '/dev/sdd', 'FSTYPE': ''}])
+ rawbluestore.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
+ rawbluestore.osd_id = '0'
+ rawbluestore._activate = MagicMock()
+ rawbluestore.activate()
+ assert rawbluestore._activate.mock_calls == [call(0, 'db32a338-b640-4cbc-af17-f63808b1c36e')]
+ assert rawbluestore.block_device_path == '/dev/mapper/ceph-db32a338-b640-4cbc-af17-f63808b1c36e-sdb-block-dmcrypt'
+ assert rawbluestore.db_device_path == '/dev/mapper/ceph-db32a338-b640-4cbc-af17-f63808b1c36e-sdc-db-dmcrypt'
+++ /dev/null
-import pytest
-from unittest.mock import patch, Mock, MagicMock, call
-from ceph_volume.objectstore.rawbluestore import RawBlueStore
-from ceph_volume.util import system
-
-
-class TestRawBlueStore:
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
- def setup_method(self, m_create_key):
- self.raw_bs = RawBlueStore([])
-
- def test_prepare_dmcrypt(self,
- device_info,
- fake_call,
- key_size):
- self.raw_bs.secrets = {'dmcrypt_key': 'foo'}
- self.raw_bs.block_device_path = '/dev/foo0'
- self.raw_bs.db_device_path = '/dev/foo1'
- self.raw_bs.wal_device_path = '/dev/foo2'
- lsblk = {"TYPE": "disk",
- "NAME": "foo0",
- 'KNAME': 'foo0'}
- device_info(lsblk=lsblk)
- self.raw_bs.prepare_dmcrypt()
- assert self.raw_bs.block_device_path == "/dev/mapper/ceph--foo0-block-dmcrypt"
- assert self.raw_bs.db_device_path == "/dev/mapper/ceph--foo0-db-dmcrypt"
- assert self.raw_bs.wal_device_path == "/dev/mapper/ceph--foo0-wal-dmcrypt"
-
- @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.enroll_tpm2', Mock(return_value=MagicMock()))
- def test_prepare_dmcrypt_with_tpm(self,
- device_info,
- fake_call,
- key_size):
- self.raw_bs.block_device_path = '/dev/foo0'
- self.raw_bs.db_device_path = '/dev/foo1'
- self.raw_bs.wal_device_path = '/dev/foo2'
- self.raw_bs.with_tpm = 1
- lsblk = {"TYPE": "disk",
- "NAME": "foo0",
- 'KNAME': 'foo0'}
- device_info(lsblk=lsblk)
- self.raw_bs.prepare_dmcrypt()
- assert 'dmcrypt_key' not in self.raw_bs.secrets.keys()
- assert self.raw_bs.block_device_path == "/dev/mapper/ceph--foo0-block-dmcrypt"
- assert self.raw_bs.db_device_path == "/dev/mapper/ceph--foo0-db-dmcrypt"
- assert self.raw_bs.wal_device_path == "/dev/mapper/ceph--foo0-wal-dmcrypt"
- assert self.raw_bs.enroll_tpm2.mock_calls == [call('/dev/foo0'), call('/dev/foo1'), call('/dev/foo2')]
-
- @patch('ceph_volume.objectstore.rawbluestore.rollback_osd')
- @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare')
- def test_safe_prepare_raises_exception(self,
- m_prepare,
- m_rollback_osd,
- factory,
- capsys):
- m_prepare.side_effect = Exception
- m_rollback_osd.return_value = MagicMock()
- args = factory(osd_id='1')
- self.raw_bs.args = args
- self.raw_bs.osd_id = self.raw_bs.args.osd_id
- with pytest.raises(Exception):
- self.raw_bs.safe_prepare()
- assert m_rollback_osd.mock_calls == [call('1')]
-
- @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare', MagicMock())
- def test_safe_prepare(self,
- factory,
- capsys):
- args = factory(dmcrypt=True,
- data='/dev/foo')
- self.raw_bs.safe_prepare(args)
- _, stderr = capsys.readouterr()
- assert "prepare successful for: /dev/foo" in stderr
-
- @patch.dict('os.environ', {'CEPH_VOLUME_DMCRYPT_SECRET': 'dmcrypt-key'})
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_id')
- @patch('ceph_volume.objectstore.rawbluestore.system.generate_uuid')
- def test_prepare(self, m_generate_uuid, m_create_id, is_root, factory):
- m_generate_uuid.return_value = 'fake-uuid'
- m_create_id.return_value = MagicMock()
- self.raw_bs.prepare_dmcrypt = MagicMock()
- self.raw_bs.prepare_osd_req = MagicMock()
- self.raw_bs.osd_mkfs = MagicMock()
- args = factory(crush_device_class='foo',
- no_tmpfs=False,
- block_wal='/dev/foo1',
- block_db='/dev/foo2',)
- self.raw_bs.args = args
- self.raw_bs.secrets = dict()
- self.raw_bs.encrypted = True
- self.raw_bs.prepare()
- assert self.raw_bs.prepare_osd_req.mock_calls == [call(tmpfs=True)]
- assert self.raw_bs.osd_mkfs.called
- assert self.raw_bs.prepare_dmcrypt.called
-
- @patch('ceph_volume.conf.cluster', 'ceph')
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_wal')
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_db')
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_block')
- @patch('os.path.exists')
- @patch('os.unlink')
- @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_osd_path')
- @patch('ceph_volume.objectstore.rawbluestore.process.run')
- def test__activate(self,
- m_run,
- m_create_osd_path,
- m_unlink,
- m_exists,
- m_link_block,
- m_link_db,
- m_link_wal,
- monkeypatch,
- factory):
- args = factory(no_tmpfs=False)
- self.raw_bs.args = args
- self.raw_bs.block_device_path = '/dev/sda'
- self.raw_bs.db_device_path = '/dev/sdb'
- self.raw_bs.wal_device_path = '/dev/sdc'
- m_run.return_value = MagicMock()
- m_exists.side_effect = lambda path: True
- m_create_osd_path.return_value = MagicMock()
- m_unlink.return_value = MagicMock()
- monkeypatch.setattr(system, 'chown', lambda path: 0)
- monkeypatch.setattr(system, 'path_is_mounted', lambda path: 0)
- self.raw_bs._activate('1', True)
- calls = [call('/var/lib/ceph/osd/ceph-1/block'),
- call('/var/lib/ceph/osd/ceph-1/block.db'),
- call('/var/lib/ceph/osd/ceph-1/block.wal')]
- assert m_run.mock_calls == [call(['ceph-bluestore-tool',
- 'prime-osd-dir',
- '--path', '/var/lib/ceph/osd/ceph-1',
- '--no-mon-config', '--dev', '/dev/sda'])]
- assert m_unlink.mock_calls == calls
- assert m_exists.mock_calls == calls
- assert m_create_osd_path.mock_calls == [call('1', tmpfs=True)]
-
- def test_activate_raises_exception(self,
- is_root,
- mock_raw_direct_report):
- with pytest.raises(RuntimeError) as error:
- self.raw_bs.osd_id = '1'
- self.raw_bs.activate()
- assert str(error.value) == 'did not find any matching OSD to activate'
-
- def test_activate_osd_id_and_fsid(self,
- is_root,
- mock_raw_direct_report):
- self.raw_bs._activate = MagicMock()
- self.raw_bs.osd_id = '8'
- self.raw_bs.osd_fsid = '824f7edf-371f-4b75-9231-4ab62a32d5c0'
- self.raw_bs.activate()
- self.raw_bs._activate.mock_calls == [call({'ceph_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
- 'device': '/dev/mapper/ceph--40bc7bd7--4aee--483e--ba95--89a64bc8a4fd-osd--block--824f7edf--371f--4b75--9231--4ab62a32d5c0',
- 'device_db': '/dev/mapper/ceph--73d6d4db--6528--48f2--a4e2--1c82bc87a9ac-osd--db--b82d920d--be3c--4e4d--ba64--18f7e8445892',
- 'osd_id': 8,
- 'osd_uuid': '824f7edf-371f-4b75-9231-4ab62a32d5c0',
- 'type': 'bluestore'},
- tmpfs=True)]
-
- @patch('ceph_volume.objectstore.rawbluestore.encryption_utils.rename_mapper', Mock(return_value=MagicMock()))
- @patch('ceph_volume.util.disk.get_bluestore_header')
- @patch('ceph_volume.objectstore.rawbluestore.encryption_utils.luks_close', Mock(return_value=MagicMock()))
- @patch('ceph_volume.objectstore.rawbluestore.encryption_utils.luks_open', Mock(return_value=MagicMock()))
- def test_activate_dmcrypt_tpm(self, m_bs_header, rawbluestore, fake_lsblk_all, mock_raw_direct_report, is_root) -> None:
- m_bs_header.return_value = {
- "/dev/mapper/activating-sdb": {
- "osd_uuid": "db32a338-b640-4cbc-af17-f63808b1c36e",
- "size": 20000572178432,
- "btime": "2024-06-13T12:16:57.607442+0000",
- "description": "main",
- "bfm_blocks": "4882952192",
- "bfm_blocks_per_key": "128",
- "bfm_bytes_per_block": "4096",
- "bfm_size": "20000572178432",
- "bluefs": "1",
- "ceph_fsid": "c301d0aa-288d-11ef-b535-c84bd6975560",
- "ceph_version_when_created": "ceph version 19.0.0-4242-gf2f7cc60 (f2f7cc609cdbae767486cf2fe6872a4789adffb2) squid (dev)",
- "created_at": "2024-06-13T12:17:20.122565Z",
- "elastic_shared_blobs": "1",
- "kv_backend": "rocksdb",
- "magic": "ceph osd volume v026",
- "mkfs_done": "yes",
- "osd_key": "AQAk42pmt7tqFxAAHlaETFm33yFtEuoQAh/cpQ==",
- "ready": "ready",
- "whoami": "0"}
- }
- mock_luks2_1 = Mock()
- mock_luks2_1.is_ceph_encrypted = True
- mock_luks2_1.is_tpm2_enrolled = True
- mock_luks2_1.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
-
- mock_luks2_2 = Mock()
- mock_luks2_2.is_ceph_encrypted = True
- mock_luks2_2.is_tpm2_enrolled = False
- mock_luks2_2.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
-
- mock_luks2_3 = Mock()
- mock_luks2_3.is_ceph_encrypted = False
- mock_luks2_3.is_tpm2_enrolled = False
- mock_luks2_3.osd_fsid = ''
-
- mock_luks2_4 = Mock()
- mock_luks2_4.is_ceph_encrypted = True
- mock_luks2_4.is_tpm2_enrolled = True
- mock_luks2_4.osd_fsid = 'abcd'
- with patch('ceph_volume.objectstore.rawbluestore.encryption_utils.CephLuks2', side_effect=[mock_luks2_1,
- mock_luks2_2,
- mock_luks2_3,
- mock_luks2_4]):
- fake_lsblk_all([{'NAME': '/dev/sdb', 'FSTYPE': 'crypto_LUKS'},
- {'NAME': '/dev/sdc', 'FSTYPE': 'crypto_LUKS'},
- {'NAME': '/dev/sdd', 'FSTYPE': ''}])
- rawbluestore.osd_fsid = 'db32a338-b640-4cbc-af17-f63808b1c36e'
- rawbluestore.osd_id = '0'
- rawbluestore._activate = MagicMock()
- rawbluestore.activate()
- assert rawbluestore._activate.mock_calls == [call(0, 'db32a338-b640-4cbc-af17-f63808b1c36e')]
- assert rawbluestore.block_device_path == '/dev/mapper/ceph-db32a338-b640-4cbc-af17-f63808b1c36e-sdb-block-dmcrypt'
- assert rawbluestore.db_device_path == '/dev/mapper/ceph-db32a338-b640-4cbc-af17-f63808b1c36e-sdc-db-dmcrypt'
from ceph_volume.util.prepare import system
from ceph_volume import conf
from ceph_volume.tests.conftest import Factory
-from ceph_volume import objectstore
+from ceph_volume.objectstore.baseobjectstore import BaseObjectStore
from unittest.mock import patch
assert expected == fake_run.calls[0]['args'][0]
-class TestOsdMkfsBluestore(object):
+class TestOsdMkfs(object):
def setup_method(self):
conf.cluster = 'ceph'
- def test_keyring_is_added(self, fake_call, monkeypatch):
+ def test_keyring_is_added(self, fake_call, monkeypatch, factory):
+ args = factory(dmcrypt=False)
monkeypatch.setattr(system, 'chown', lambda path: True)
- o = objectstore.baseobjectstore.BaseObjectStore([])
+ o = BaseObjectStore(args)
o.osd_id = '1'
o.osd_fsid = 'asdf'
o.osd_mkfs()
def test_keyring_is_not_added(self, fake_call, monkeypatch, factory):
args = factory(dmcrypt=False)
monkeypatch.setattr(system, 'chown', lambda path: True)
- o = objectstore.bluestore.BlueStore([])
- o.args = args
+ o = BaseObjectStore(args)
o.osd_id = '1'
o.osd_fsid = 'asdf'
o.osd_mkfs()
assert '--keyfile' not in fake_call.calls[0]['args'][0]
- def test_wal_is_added(self, fake_call, monkeypatch, objectstore_bluestore, factory):
- args = factory(dmcrypt=False)
+ def test_wal_is_added(self, fake_call, monkeypatch, objectstore, factory):
+ args = factory(objecstore='bluestore',
+ osd_id='1',
+ osd_fid='asdf',
+ wal_device_path='/dev/smm1',
+ cephx_secret='foo',
+ dmcrypt=False)
monkeypatch.setattr(system, 'chown', lambda path: True)
- bs = objectstore_bluestore(objecstore='bluestore',
- osd_id='1',
- osd_fid='asdf',
- wal_device_path='/dev/smm1',
- cephx_secret='foo',
- dmcrypt=False)
- bs.args = args
- bs.osd_mkfs()
+ o = BaseObjectStore(args)
+ o.wal_device_path = '/dev/smm1'
+ o.osd_mkfs()
assert '--bluestore-block-wal-path' in fake_call.calls[2]['args'][0]
assert '/dev/smm1' in fake_call.calls[2]['args'][0]
def test_db_is_added(self, fake_call, monkeypatch, factory):
args = factory(dmcrypt=False)
monkeypatch.setattr(system, 'chown', lambda path: True)
- bs = objectstore.bluestore.BlueStore([])
- bs.args = args
- bs.db_device_path = '/dev/smm2'
- bs.osd_mkfs()
+ o = BaseObjectStore(args)
+ o.args = args
+ o.db_device_path = '/dev/smm2'
+ o.osd_mkfs()
assert '--bluestore-block-db-path' in fake_call.calls[2]['args'][0]
assert '/dev/smm2' in fake_call.calls[2]['args'][0]
assert ','.join(result) == 'auto,discard,exec,rw'
@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
-class TestMkfsBluestore(object):
+class TestMkfs(object):
- def test_non_zero_exit_status(self, m_create_key, stub_call, monkeypatch, objectstore_bluestore):
+ def test_non_zero_exit_status(self, m_create_key, stub_call, monkeypatch, objectstore):
conf.cluster = 'ceph'
monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
stub_call(([], [], 1))
- bs = objectstore_bluestore(osd_id='1',
- osd_fsid='asdf-1234',
- cephx_secret='keyring')
+ o = objectstore(osd_id='1',
+ osd_fsid='asdf-1234',
+ cephx_secret='keyring')
with pytest.raises(RuntimeError) as error:
- bs.osd_mkfs()
+ o.osd_mkfs()
assert "Command failed with exit code 1" in str(error.value)
- def test_non_zero_exit_formats_command_correctly(self, m_create_key, stub_call, monkeypatch, objectstore_bluestore):
+ def test_non_zero_exit_formats_command_correctly(self, m_create_key, stub_call, monkeypatch, objectstore):
conf.cluster = 'ceph'
monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
stub_call(([], [], 1))
- bs = objectstore_bluestore(osd_id='1',
- osd_fsid='asdf-1234',
- cephx_secret='keyring')
+ o = objectstore(osd_id='1',
+ objectstore='bluestore',
+ osd_fsid='asdf-1234',
+ cephx_secret='keyring')
with pytest.raises(RuntimeError) as error:
- bs.osd_mkfs()
+ o.osd_mkfs()
expected = ' '.join([
'ceph-osd',
'--cluster',
device_facts[diskname] = metadata
return device_facts
-def has_bluestore_label(device_path):
+def has_bluestore_label(device_path: str) -> bool:
isBluestore = False
bluestoreDiskSignature = 'bluestore block device' # 22 bytes long
return isBluestore
+def has_seastore_label(device_path: str) -> bool:
+ is_seastore = False
+ seastore_disk_signature = b'seastore block device\n' # 23 bytes including newline
+
+ try:
+ with open(device_path, "rb") as fd:
+ signature = fd.read(len(seastore_disk_signature))
+ if signature == seastore_disk_signature:
+ is_seastore = True
+ except IsADirectoryError:
+ print(f'{device_path} is a directory, skipping.')
+ except Exception as e:
+ print(f'Error reading {device_path}: {e}')
+
+ return is_seastore
+
def get_lvm_mappers(sys_block_path: str = '/sys/block') -> List[str]:
"""
Retrieve a list of Logical Volume Manager (LVM) device mappers.
if 'rgw_exit_timeout_secs' in config_json:
stop_timeout = config_json['rgw_exit_timeout_secs']
ctr.args = ctr.args + [f'--stop-timeout={stop_timeout}']
+ if self.identity.daemon_type == 'osd' and config_json is not None:
+ if 'objectstore' in config_json:
+ objectstore = config_json['objectstore']
+ ctr.args = ctr.args + [f'--osd-objectstore={objectstore}']
return ctr
_uid_gid: Optional[Tuple[int, int]] = None
placement=PlacementSpec(host_pattern=host),
data_devices=DeviceSelection(paths=devices),
unmanaged=False,
- objectstore="bluestore"
+ objectstore=drive_group.objectstore
)
self.log.info(f"Creating OSDs with service ID: {drive_group.service_id} on {host}:{device_list}")
if not is_failed_deploy:
super().post_remove(daemon, is_failed_deploy=is_failed_deploy)
+ def generate_config(self, daemon_spec: CephadmDaemonDeploySpec) -> Tuple[Dict[str, Any], List[str]]:
+ config, parent_deps = super().generate_config(daemon_spec)
+ if daemon_spec.service_name in self.mgr.spec_store:
+ svc_spec = cast(DriveGroupSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
+
+ if hasattr(svc_spec, 'objectstore') and svc_spec.objectstore:
+ config['objectstore'] = svc_spec.objectstore
+ return config, parent_deps
+
class OsdIdClaims(object):
"""
_run_cephadm.assert_any_call(
'test', 'osd', 'ceph-volume',
['--config-json', '-', '--', 'lvm', 'batch',
- '--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
+ '--no-auto', '/dev/sdb', '--objectstore', 'bluestore', '--yes', '--no-systemd'],
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True,
stdin='{"config": "", "keyring": ""}')
_run_cephadm.assert_any_call(
'test', 'osd', 'ceph-volume',
['--config-json', '-', '--', 'lvm', 'batch',
'--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
- '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
+ '--wal-devices', '/dev/sdd', '--objectstore', 'bluestore', '--yes', '--no-systemd'],
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
error_ok=True, stdin='{"config": "", "keyring": ""}',
)
"devices, preview, exp_commands",
[
# no preview and only one disk, prepare is used due the hack that is in place.
- (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --yes --no-systemd"]),
+ (['/dev/sda'], False, ["lvm batch --no-auto /dev/sda --objectstore bluestore --yes --no-systemd"]),
# no preview and multiple disks, uses batch
(['/dev/sda', '/dev/sdb'], False,
- ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd"]),
+ ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --yes --no-systemd"]),
# preview and only one disk needs to use batch again to generate the preview
- (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --yes --no-systemd --report --format json"]),
+ (['/dev/sda'], True, ["lvm batch --no-auto /dev/sda --objectstore bluestore --yes --no-systemd --report --format json"]),
# preview and multiple disks work the same
(['/dev/sda', '/dev/sdb'], True,
- ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd --report --format json"]),
+ ["CEPH_VOLUME_OSDSPEC_AFFINITY=test.spec lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --yes --no-systemd --report --format json"]),
]
)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
cmds.append(cmd)
dev_counter += 1
- elif self.spec.objectstore == 'bluestore':
+ elif self.spec.objectstore in ['bluestore', 'seastore']:
# for lvm batch we can just do all devices in one command
cmd = "lvm batch --no-auto {}".format(" ".join(data_devices))
if d != self.NO_CRUSH:
cmd += " --crush-device-class {}".format(d)
+
+ cmd += " --objectstore {}".format(self.spec.objectstore)
cmds.append(cmd)
for i in range(len(cmds)):
inventory = _mk_inventory(_mk_device()*2)
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, []).run()
- assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
def test_ceph_volume_command_1():
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, []).run()
assert all(cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
- '--db-devices /dev/sdc /dev/sdd --yes --no-systemd') for cmd in cmds), f'Expected {cmd} in {cmds}'
+ '--db-devices /dev/sdc /dev/sdd --objectstore bluestore --yes --no-systemd') for cmd in cmds), f'Expected {cmd} in {cmds}'
def test_ceph_volume_command_2():
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, []).run()
assert all(cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
- '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
+ '--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf --objectstore bluestore '
'--yes --no-systemd') for cmd in cmds), f'Expected {cmd} in {cmds}'
cmds = translate.to_ceph_volume(sel, []).run()
assert all(cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
'--db-devices /dev/sdc /dev/sdd '
- '--wal-devices /dev/sde /dev/sdf --dmcrypt '
+ '--wal-devices /dev/sde /dev/sdf --objectstore bluestore --dmcrypt '
'--yes --no-systemd') for cmd in cmds), f'Expected {cmd} in {cmds}'
cmds = translate.to_ceph_volume(sel, []).run()
assert all(cmd == ('lvm batch --no-auto /dev/sda /dev/sdb '
'--db-devices /dev/sdc /dev/sdd --wal-devices /dev/sde /dev/sdf '
- '--block-wal-size 500M --block-db-size 500M --dmcrypt '
+ '--block-wal-size 500M --block-db-size 500M --objectstore bluestore --dmcrypt '
'--osds-per-device 3 --yes --no-systemd') for cmd in cmds), f'Expected {cmd} in {cmds}'
inventory = _mk_inventory(_mk_device(rotational=True)*2)
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, ['0', '1']).run()
- assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --osd-ids 0 1 --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --osd-ids 0 1 --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
def test_ceph_volume_command_8():
)
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, []).run()
- assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc --objectstore bluestore --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
def test_ceph_volume_command_9():
inventory = _mk_inventory(_mk_device()*2)
sel = drive_selection.DriveSelection(spec, inventory)
cmds = translate.to_ceph_volume(sel, []).run()
- assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --data-allocate-fraction 0.8 --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --objectstore bluestore --data-allocate-fraction 0.8 --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
@pytest.mark.parametrize("test_input_base",
drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
cmds = translate.to_ceph_volume(drive, []).run()
- assert all(cmd == 'lvm batch --no-auto /dev/sda --crush-device-class ssd --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda --crush-device-class ssd --objectstore bluestore --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
@pytest.mark.parametrize("test_input1",
spec.validate()
drive = drive_selection.DriveSelection(spec, spec.data_devices.paths)
cmds = translate.to_ceph_volume(drive, []).run()
-
- assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --crush-device-class hdd --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
+ assert all(cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --crush-device-class hdd --objectstore bluestore --yes --no-systemd' for cmd in cmds), f'Expected {cmd} in {cmds}'
@pytest.mark.parametrize("test_input2",
cmds = translate.to_ceph_volume(drive, []).run()
expected_cmds = [
- 'lvm batch --no-auto /dev/sdb --crush-device-class ssd --yes --no-systemd',
- 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sdb --crush-device-class ssd --objectstore bluestore --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sda --crush-device-class hdd --objectstore bluestore --yes --no-systemd',
]
assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'
cmds = translate.to_ceph_volume(drive, []).run()
expected_cmds = [
- 'lvm batch --no-auto /dev/sdb --yes --no-systemd',
- 'lvm batch --no-auto /dev/sda --crush-device-class hdd --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sdb --objectstore bluestore --yes --no-systemd',
+ 'lvm batch --no-auto /dev/sda --crush-device-class hdd --objectstore bluestore --yes --no-systemd',
]
assert len(cmds) == len(expected_cmds), f"Expected {expected_cmds} got {cmds}"
assert all(cmd in cmds for cmd in expected_cmds), f'Expected {expected_cmds} got {cmds}'