# be so this function will set it after creation using the mapping
# XXX add CEPH_VOLUME_LVM_DEBUG to enable -vvvv on lv operations
type_path_tag = {
- 'journal': 'ceph.journal_device',
'data': 'ceph.data_device',
'block': 'ceph.block_device',
'wal': 'ceph.wal_device',
logger = logging.getLogger(__name__)
-def activate_filestore(osd_lvs, no_systemd=False):
- # find the osd
- for osd_lv in osd_lvs:
- if osd_lv.tags.get('ceph.type') == 'data':
- data_lv = osd_lv
- break
- else:
- raise RuntimeError('Unable to find a data LV for filestore activation')
-
- is_encrypted = data_lv.tags.get('ceph.encrypted', '0') == '1'
- is_vdo = data_lv.tags.get('ceph.vdo', '0')
-
- osd_id = data_lv.tags['ceph.osd_id']
- configuration.load_ceph_conf_path(data_lv.tags['ceph.cluster_name'])
- configuration.load()
- # it may have a volume with a journal
- for osd_lv in osd_lvs:
- if osd_lv.tags.get('ceph.type') == 'journal':
- osd_journal_lv = osd_lv
- break
- else:
- osd_journal_lv = None
-
- # TODO: add sensible error reporting if this is ever the case
- # blow up with a KeyError if this doesn't exist
- osd_fsid = data_lv.tags['ceph.osd_fsid']
- if not osd_journal_lv:
- # must be a disk partition, by querying blkid by the uuid we are ensuring that the
- # device path is always correct
- journal_uuid = data_lv.tags['ceph.journal_uuid']
- osd_journal = disk.get_device_from_partuuid(journal_uuid)
- else:
- journal_uuid = osd_journal_lv.lv_uuid
- osd_journal = data_lv.tags['ceph.journal_device']
-
- if not osd_journal:
- raise RuntimeError('unable to detect an lv or device journal for OSD %s' % osd_id)
-
- # this is done here, so that previous checks that ensure path availability
- # and correctness can still be enforced, and report if any issues are found
- if is_encrypted:
- lockbox_secret = data_lv.tags['ceph.cephx_lockbox_secret']
- # this keyring writing is idempotent
- encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
- dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
- encryption_utils.luks_open(dmcrypt_secret, data_lv.lv_path, data_lv.lv_uuid)
- encryption_utils.luks_open(dmcrypt_secret, osd_journal, journal_uuid)
-
- osd_journal = '/dev/mapper/%s' % journal_uuid
- source = '/dev/mapper/%s' % data_lv.lv_uuid
- else:
- source = data_lv.lv_path
-
- # mount the osd
- destination = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.device_is_mounted(source, destination=destination):
- prepare_utils.mount_osd(source, osd_id, is_vdo=is_vdo)
-
- # ensure that the OSD destination is always chowned properly
- system.chown(destination)
-
- # always re-do the symlink regardless if it exists, so that the journal
- # device path that may have changed can be mapped correctly every time
- destination = '/var/lib/ceph/osd/%s-%s/journal' % (conf.cluster, osd_id)
- process.run(['ln', '-snf', osd_journal, destination])
-
- # make sure that the journal has proper permissions
- system.chown(osd_journal)
-
- if no_systemd is False:
- # enable the ceph-volume unit for this OSD
- systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
-
- # enable the OSD
- systemctl.enable_osd(osd_id)
-
- # start the OSD
- systemctl.start_osd(osd_id)
- terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
-
def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
"""
# This argument is only available when passed in directly or via
# systemd, not when ``create`` is being used
+ # placeholder when a new objectstore support will be added
if getattr(args, 'auto_detect_objectstore', False):
logger.info('auto detecting objectstore')
- # may get multiple lvs, so can't do get_the_lvs() calls here
- for lv in lvs:
- has_journal = lv.tags.get('ceph.journal_uuid')
- if has_journal:
- logger.info('found a journal associated with the OSD, '
- 'assuming filestore')
- return activate_filestore(lvs, args.no_systemd)
-
- logger.info('unable to find a journal associated with the OSD, '
- 'assuming bluestore')
-
return activate_bluestore(lvs, args.no_systemd)
- # explicit filestore/bluestore flags take precedence
+ # explicit 'objectstore' flags take precedence
if getattr(args, 'bluestore', False):
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
- elif getattr(args, 'filestore', False):
- activate_filestore(lvs, args.no_systemd)
elif any('ceph.block_device' in lv.tags for lv in lvs):
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
- elif any('ceph.data_device' in lv.tags for lv in lvs):
- activate_filestore(lvs, args.no_systemd)
def main(self):
sub_command_help = dedent("""
action='store_true',
help='force bluestore objectstore activation',
)
- parser.add_argument(
- '--filestore',
- action='store_true',
- help='force filestore objectstore activation',
- )
parser.add_argument(
'--all',
dest='activate_all',
return ''.join(lines)
-def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]):
+def ensure_disjoint_device_lists(data, db=[], wal=[]):
# check that all device lists are disjoint with each other
if not all([set(data).isdisjoint(set(db)),
set(data).isdisjoint(set(wal)),
- set(data).isdisjoint(set(journal)),
set(db).isdisjoint(set(wal))]):
raise Exception('Device lists are not disjoint')
default=[],
help='Devices to provision OSDs wal volumes',
)
- parser.add_argument(
- '--journal-devices',
- nargs='*',
- type=arg_validators.ValidBatchDevice(),
- default=[],
- help='Devices to provision OSDs journal volumes',
- )
parser.add_argument(
'--auto',
action='store_true',
action='store_true',
help='bluestore objectstore (default)',
)
- parser.add_argument(
- '--filestore',
- action='store_true',
- help='filestore objectstore',
- )
parser.add_argument(
'--report',
action='store_true',
type=int,
help='Provision slots on WAL device, can remain unoccupied'
)
- def journal_size_in_mb_hack(size):
- # TODO give user time to adjust, then remove this
- if size and size[-1].isdigit():
- mlogger.warning('DEPRECATION NOTICE')
- mlogger.warning('--journal-size as integer is parsed as megabytes')
- mlogger.warning('A future release will parse integers as bytes')
- mlogger.warning('Add a "M" to explicitly pass a megabyte size')
- size += 'M'
- return disk.Size.parse(size)
- parser.add_argument(
- '--journal-size',
- type=journal_size_in_mb_hack,
- help='Override the "osd_journal_size" value, in megabytes'
- )
- parser.add_argument(
- '--journal-slots',
- type=int,
- help='Provision slots on journal device, can remain unoccupied'
- )
parser.add_argument(
'--prepare',
action='store_true',
)
self.args = parser.parse_args(argv)
self.parser = parser
- for dev_list in ['', 'db_', 'wal_', 'journal_']:
+ for dev_list in ['', 'db_', 'wal_']:
setattr(self, '{}usable'.format(dev_list), [])
def report(self, plan):
'''
Helper for legacy auto behaviour.
Sorts drives into rotating and non-rotating, the latter being used for
- db or journal.
+ db.
'''
mlogger.warning('DEPRECATION NOTICE')
mlogger.warning('You are using the legacy automatic disk sorting behavior')
# no need for additional sorting, we'll only deploy standalone on ssds
return
self.args.devices = rotating
- if self.args.filestore:
- self.args.journal_devices = ssd
- else:
- self.args.db_devices = ssd
+ self.args.db_devices = ssd
@decorators.needs_root
def main(self):
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not self.args.bluestore and not self.args.filestore:
+ if not self.args.bluestore:
self.args.bluestore = True
if (self.args.auto and not self.args.db_devices and not
- self.args.wal_devices and not self.args.journal_devices):
+ self.args.wal_devices):
self._sort_rotational_disks()
self._check_slot_args()
ensure_disjoint_device_lists(self.args.devices,
self.args.db_devices,
- self.args.wal_devices,
- self.args.journal_devices)
+ self.args.wal_devices)
plan = self.get_plan(self.args)
defaults = common.get_default_args()
global_args = [
'bluestore',
- 'filestore',
'dmcrypt',
'crush_device_class',
'no_systemd',
if args.bluestore:
plan = self.get_deployment_layout(args, args.devices, args.db_devices,
args.wal_devices)
- elif args.filestore:
- plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
return plan
def get_deployment_layout(self, args, devices, fast_devices=[],
return plan
requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
- fast_type = 'block_db' if args.bluestore else 'journal'
+ if args.bluestore:
+ fast_type = 'block_db'
fast_allocations = self.fast_allocations(fast_devices,
requested_osds,
num_osds,
},
}
-filestore_args = {
- '--filestore': {
- 'action': 'store_true',
- 'help': 'Use the filestore objectstore',
- },
- '--journal': {
- 'help': 'A logical volume (vg_name/lv_name), or path to a device',
- 'type': arg_validators.ValidDevice(as_string=True),
- },
- '--journal-size': {
- 'help': 'Size of journal LV in case a raw block device was passed in --journal',
- 'default': '0',
- 'type': disk.Size.parse
- },
- '--journal-slots': {
- 'help': ('Intended number of slots on journal device. The new OSD gets one'
- 'of those slots or 1/nth of the available capacity'),
- 'type': int,
- 'default': 1,
- },
-}
def get_default_args():
defaults = {}
def format_name(name):
return name.strip('-').replace('-', '_').replace('.', '_')
- for argset in (common_args, filestore_args, bluestore_args):
+ for argset in (common_args, bluestore_args):
defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()})
return defaults
description=description,
)
- filestore_group = parser.add_argument_group('filestore')
bluestore_group = parser.add_argument_group('bluestore')
for name, kwargs in common_args.items():
for name, kwargs in bluestore_args.items():
bluestore_group.add_argument(name, **kwargs)
- for name, kwargs in filestore_args.items():
- filestore_group.add_argument(name, **kwargs)
-
# Do not parse args, so that consumers can do something before the args get
# parsed triggering argparse behavior
return parser
if len(self.argv) == 0:
print(sub_command_help)
return
- exclude_group_options(parser, groups=['filestore', 'bluestore'], argv=self.argv)
+ exclude_group_options(parser, groups=['bluestore'], argv=self.argv)
args = parser.parse_args(self.argv)
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not args.bluestore and not args.filestore:
+ if not args.bluestore:
args.bluestore = True
self.create(args)
def prepare_dmcrypt(key, device, device_type, tags):
"""
Helper for devices that are encrypted. The operations needed for
- block, db, wal, or data/journal devices are all the same
+ block, db, wal devices are all the same
"""
if not device:
return ''
return '/dev/mapper/%s' % uuid
-def prepare_filestore(device, journal, secrets, tags, osd_id, fsid):
- """
- :param device: The name of the logical volume to work with
- :param journal: similar to device but can also be a regular/plain disk
- :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
- :param id_: The OSD id
- :param fsid: The OSD fsid, also known as the OSD UUID
- """
- cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
-
- # encryption-only operations
- if secrets.get('dmcrypt_key'):
- # format and open ('decrypt' devices) and re-assign the device and journal
- # variables so that the rest of the process can use the mapper paths
- key = secrets['dmcrypt_key']
- device = prepare_dmcrypt(key, device, 'data', tags)
- journal = prepare_dmcrypt(key, journal, 'journal', tags)
-
- # vdo detection
- is_vdo = api.is_vdo(device)
- # create the directory
- prepare_utils.create_osd_path(osd_id)
- # format the device
- prepare_utils.format_device(device)
- # mount the data device
- prepare_utils.mount_osd(device, osd_id, is_vdo=is_vdo)
- # symlink the journal
- prepare_utils.link_journal(journal, osd_id)
- # get the latest monmap
- prepare_utils.get_monmap(osd_id)
- # prepare the osd filesystem
- prepare_utils.osd_mkfs_filestore(osd_id, fsid, cephx_secret)
- # write the OSD keyring if it doesn't exist already
- prepare_utils.write_keyring(osd_id, cephx_secret)
- if secrets.get('dmcrypt_key'):
- # if the device is going to get activated right away, this can be done
- # here, otherwise it will be recreated
- encryption_utils.write_lockbox_keyring(
- osd_id,
- fsid,
- tags['ceph.cephx_lockbox_secret']
- )
-
-
def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
"""
:param block: The name of the logical volume for the bluestore data
a device or partition will result in error.
:param arg: The value of ``--data`` when parsing args
- :param device_type: Usually, either ``data`` or ``block`` (filestore vs. bluestore)
+ :param device_type: Usually ``block``
:param osd_uuid: The OSD uuid
"""
device = self.args.data
'ceph.crush_device_class': crush_device_class,
'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
}
- if self.args.filestore:
- if not self.args.journal:
- logger.info(('no journal was specifed, creating journal lv '
- 'on {}').format(self.args.data))
- self.args.journal = self.args.data
- self.args.journal_size = disk.Size(g=5)
- # need to adjust data size/slots for colocated journal
- if self.args.data_size:
- self.args.data_size -= self.args.journal_size
- if self.args.data_slots == 1:
- self.args.data_slots = 0
- else:
- raise RuntimeError('Can\'t handle multiple filestore OSDs '
- 'with colocated journals yet. Please '
- 'create journal LVs manually')
- tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
- tags['ceph.encrypted'] = encrypted
-
- journal_device, journal_uuid, tags = self.setup_device(
- 'journal',
- self.args.journal,
- tags,
- self.args.journal_size,
- self.args.journal_slots)
-
- try:
- vg_name, lv_name = self.args.data.split('/')
- data_lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- data_lv = None
-
- if not data_lv:
- data_lv = self.prepare_data_device('data', osd_fsid)
-
- tags['ceph.data_device'] = data_lv.lv_path
- tags['ceph.data_uuid'] = data_lv.lv_uuid
- tags['ceph.vdo'] = api.is_vdo(data_lv.lv_path)
- tags['ceph.type'] = 'data'
- data_lv.set_tags(tags)
- if not journal_device.startswith('/'):
- # we got a journal lv, set rest of the tags
- api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name}).set_tags(tags)
-
- prepare_filestore(
- data_lv.lv_path,
- journal_device,
- secrets,
- tags,
- self.osd_id,
- osd_fsid,
- )
- elif self.args.bluestore:
+ if self.args.bluestore:
try:
vg_name, lv_name = self.args.data.split('/')
block_lv = api.get_single_lv(filters={'lv_name': lv_name,
if len(self.argv) == 0:
print(sub_command_help)
return
- exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore'])
+ exclude_group_options(parser, argv=self.argv, groups=['bluestore'])
self.args = parser.parse_args(self.argv)
- # the unfortunate mix of one superset for both filestore and bluestore
- # makes this validation cumbersome
- if self.args.filestore:
- if not self.args.journal:
- raise SystemExit('--journal is required when using --filestore')
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not self.args.bluestore and not self.args.filestore:
+ if not self.args.bluestore:
self.args.bluestore = True
self.safe_prepare()
# leaving many journals with osd.1 - usually, only a single LV will be
# returned
- journal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'journal'}))
db_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'db'}))
wal_lvs = api.get_lvs(tags=merge_dict(lv_tags, {'ceph.type': 'wal'}))
- backing_devices = [(journal_lvs, 'journal'), (db_lvs, 'db'),
+ backing_devices = [(db_lvs, 'db'),
(wal_lvs, 'wal')]
verified_devices = []
def prepare_dmcrypt(key, device, device_type, fsid):
"""
Helper for devices that are encrypted. The operations needed for
- block, db, wal, or data/journal devices are all the same
+ block, db, wal, devices are all the same
"""
if not device:
return ''
from ceph_volume import process, decorators, terminal, conf
from ceph_volume.util import system, disk
from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util import prepare as prepare_utils
from ceph_volume.systemd import systemctl
try:
objectstore = json_config['type']
except KeyError:
- if {'data', 'journal'}.issubset(set(devices)):
- logger.warning(
- '"type" key not found, assuming "filestore" since journal key is present'
- )
- objectstore = 'filestore'
- else:
- logger.warning(
- '"type" key not found, assuming "bluestore" since journal key is not present'
- )
- objectstore = 'bluestore'
+ logger.warning(
+ '"type" key not found, assuming "bluestore" since journal key is not present'
+ )
+ objectstore = 'bluestore'
# Go through all the device combinations that are absolutely required,
# raise an error describing what was expected and what was found
# otherwise.
- if objectstore == 'filestore':
- if {'data', 'journal'}.issubset(set(devices)):
- return True
- else:
- found = [i for i in devices if i in ['data', 'journal']]
- mlogger.error("Required devices (data, and journal) not present for filestore")
- mlogger.error('filestore devices found: %s', found)
- raise RuntimeError('Unable to activate filestore OSD due to missing devices')
- else:
+ if objectstore == 'bluestore':
# This is a bit tricky, with newer bluestore we don't need data, older implementations
# do (e.g. with ceph-disk). ceph-volume just uses a tmpfs that doesn't require data.
if {'block', 'data'}.issubset(set(devices)):
"be skipped, consider cleaning legacy "
"json file {}".format(osd_metadata['fsid'], args.json_config))
- journal_device = self.get_device(osd_metadata.get('journal', {}).get('uuid'))
block_device = self.get_device(osd_metadata.get('block', {}).get('uuid'))
block_db_device = self.get_device(osd_metadata.get('block.db', {}).get('uuid'))
block_wal_device = self.get_device(osd_metadata.get('block.wal', {}).get('uuid'))
if not system.device_is_mounted(data_device, destination=osd_dir):
- if osd_metadata.get('type') == 'filestore':
- prepare_utils.mount_osd(data_device, osd_id)
- else:
- process.run(['mount', '-v', data_device, osd_dir])
+ process.run(['mount', '-v', data_device, osd_dir])
device_map = {
- 'journal': journal_device,
'block': block_device,
'block.db': block_db_device,
'block.wal': block_wal_device
def __init__(self, **kw):
# default flags
self.bluestore = False
- self.filestore = False
self.no_systemd = False
self.auto_detect_objectstore = None
for k, v in kw.items():
# test the negative side effect with an actual functional run, so we must
# setup a perfect scenario for this test to check it can really work
# with/without osd_id
- def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
- FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
- lv_tags="ceph.osd_fsid=1234")
- volumes = []
- volumes.append(FooVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
- monkeypatch.setattr(activate, 'activate_filestore', capture)
- args = Args(osd_id=None, osd_fsid='1234', filestore=True)
- activate.Activate([]).activate(args)
- assert capture.calls[0]['args'][0] == [FooVolume]
-
def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
lv_tags="ceph.osd_fsid=1234")
activate.Activate([]).activate(args)
assert capture.calls[0]['args'][0] == [FooVolume]
- def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
- FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
- lv_tags="ceph.osd_fsid=1111")
- volumes = []
- volumes.append(FooVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
- monkeypatch.setattr(api, 'get_single_lv', lambda **kwargs: [])
- monkeypatch.setattr(activate, 'activate_filestore', capture)
-
- args = Args(osd_id=None, osd_fsid='2222')
- with pytest.raises(RuntimeError):
- activate.Activate([]).activate(args)
-
def test_osd_id_no_osd_fsid(self, is_root):
args = Args(osd_id=42, osd_fsid=None)
with pytest.raises(RuntimeError) as result:
activate.Activate([]).activate(args)
assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
- def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
- monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
- fake_enable = Capture()
- fake_start_osd = Capture()
- monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
- JournalVolume = api.Volume(
- lv_name='journal',
- lv_path='/dev/vg/journal',
- lv_uuid='000',
- lv_tags=','.join([
- "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
- "ceph.journal_uuid=000", "ceph.type=journal",
- "ceph.osd_id=0", "ceph.osd_fsid=1234"])
- )
- DataVolume = api.Volume(
- lv_name='data',
- lv_path='/dev/vg/data',
- lv_uuid='001',
- lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
- "journal,ceph.journal_uuid=000,ceph.type=data," + \
- "ceph.osd_id=0,ceph.osd_fsid=1234")
- volumes = []
- volumes.append(DataVolume)
- volumes.append(JournalVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
- args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
- activate.Activate([]).activate(args)
- assert fake_enable.calls == []
- assert fake_start_osd.calls == []
-
- def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
- monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
- fake_enable = Capture()
- fake_start_osd = Capture()
- monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
- JournalVolume = api.Volume(
- lv_name='journal',
- lv_path='/dev/vg/journal',
- lv_uuid='000',
- lv_tags=','.join([
- "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
- "ceph.journal_uuid=000", "ceph.type=journal",
- "ceph.osd_id=0", "ceph.osd_fsid=1234"])
- )
- DataVolume = api.Volume(
- lv_name='data',
- lv_path='/dev/vg/data',
- lv_uuid='001',
- lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
- "journal,ceph.journal_uuid=000,ceph.type=data," + \
- "ceph.osd_id=0,ceph.osd_fsid=1234")
- volumes = []
- volumes.append(DataVolume)
- volumes.append(JournalVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
- args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
- filestore=True, auto_detect_objectstore=True)
- activate.Activate([]).activate(args)
- assert fake_enable.calls == []
- assert fake_start_osd.calls == []
-
- def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
- fake_enable = Capture()
- fake_start_osd = Capture()
- monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
- monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
- JournalVolume = api.Volume(
- lv_name='journal',
- lv_path='/dev/vg/journal',
- lv_uuid='000',
- lv_tags=','.join([
- "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
- "ceph.journal_uuid=000", "ceph.type=journal",
- "ceph.osd_id=0","ceph.osd_fsid=1234"])
- )
- DataVolume = api.Volume(
- lv_name='data',
- lv_path='/dev/vg/data',
- lv_uuid='001',
- lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
- "journal,ceph.journal_uuid=000,ceph.type=data," + \
- "ceph.osd_id=0,ceph.osd_fsid=1234")
- volumes = []
- volumes.append(DataVolume)
- volumes.append(JournalVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
- args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
- filestore=True, auto_detect_objectstore=False)
- activate.Activate([]).activate(args)
- assert fake_enable.calls != []
- assert fake_start_osd.calls != []
-
- def test_filestore_systemd(self, is_root, monkeypatch, capture):
- fake_enable = Capture()
- fake_start_osd = Capture()
- monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
- monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
- monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
- JournalVolume = api.Volume(
- lv_name='journal',
- lv_path='/dev/vg/journal',
- lv_uuid='000',
- lv_tags=','.join([
- "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
- "ceph.journal_uuid=000", "ceph.type=journal",
- "ceph.osd_id=0","ceph.osd_fsid=1234"])
- )
- DataVolume = api.Volume(
- lv_name='data',
- lv_path='/dev/vg/data',
- lv_uuid='001',
- lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
- "journal,ceph.journal_uuid=000,ceph.type=data," + \
- "ceph.osd_id=0,ceph.osd_fsid=1234")
- volumes = []
- volumes.append(DataVolume)
- volumes.append(JournalVolume)
- monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
-
- args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
- filestore=True)
- activate.Activate([]).activate(args)
- assert fake_enable.calls != []
- assert fake_start_osd.calls != []
-
def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
activation.activate = capture
activation.main()
parsed_args = capture.calls[0]['args'][0]
- assert parsed_args.filestore is False
- assert parsed_args.bluestore is False
-
- def test_uses_filestore(self, capture):
- args = ['--filestore', '0', 'asdf-ljh-asdf']
- activation = activate.Activate(args)
- activation.activate = capture
- activation.main()
- parsed_args = capture.calls[0]['args'][0]
- assert parsed_args.filestore is True
assert parsed_args.bluestore is False
def test_uses_bluestore(self, capture):
activation.activate = capture
activation.main()
parsed_args = capture.calls[0]['args'][0]
- assert parsed_args.filestore is False
assert parsed_args.bluestore is True
devices = [device1, device2, device3]
args = factory(report=True,
devices=devices,
- filestore=False,
)
b = batch.Batch([])
b.args = args
b._sort_rotational_disks()
assert len(b.args.devices) == 3
- @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ @pytest.mark.parametrize('objectstore', ['bluestore'])
def test_batch_sort_mixed(self, factory, objectstore):
device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
devices = [device1, device2, device3]
args = factory(report=True,
devices=devices,
- filestore=False if objectstore == 'bluestore' else True,
)
b = batch.Batch([])
b.args = args
b._sort_rotational_disks()
assert len(b.args.devices) == 2
- if objectstore == 'bluestore':
- assert len(b.args.db_devices) == 1
- else:
- assert len(b.args.journal_devices) == 1
+ assert len(b.args.db_devices) == 1
def test_get_physical_osds_return_len(self, factory,
mock_devices_available,
import pytest
from ceph_volume.devices import lvm
-from mock import patch
class TestCreate(object):
with pytest.raises(SystemExit):
lvm.create.Create(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
- assert 'Use the filestore objectstore' in stdout
assert 'Use the bluestore objectstore' in stdout
assert 'A physical device or logical' in stdout
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
- assert expected in stderr
-
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.create.Create(argv=[
- '--bluestore', '--data', '/dev/sdfoo',
- '--journal', '/dev/sf14',
- ]).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
- assert expected in stderr
-
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.create.Create(argv=[
- '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
- '--journal', '/dev/sf14',
- ]).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
- assert expected in stderr
import pytest
from ceph_volume.devices import lvm
from ceph_volume.api import lvm as api
-from mock.mock import patch, Mock, MagicMock
+from mock.mock import patch, Mock
class TestLVM(object):
with pytest.raises(SystemExit):
lvm.prepare.Prepare(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
- assert 'Use the filestore objectstore' in stdout
assert 'Use the bluestore objectstore' in stdout
assert 'A physical device or logical' in stdout
-
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
- assert expected in stderr
-
-
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.prepare.Prepare(argv=[
- '--bluestore', '--data', '/dev/sdfoo',
- '--journal', '/dev/sf14',
- ]).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
- assert expected in stderr
-
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
- device_info()
- with pytest.raises(SystemExit):
- lvm.prepare.Prepare(argv=[
- '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
- '--journal', '/dev/sf14',
- ]).main()
- stdout, stderr = capsys.readouterr()
- expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
- assert expected in stderr
-
- @patch('ceph_volume.util.arg_validators.Device')
- @patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
- def test_journal_is_required_with_filestore(self, m_has_bs_label, m_device, is_root, monkeypatch, device_info):
- m_device.return_value = MagicMock(exists=True,
- has_fs=False,
- used_by_ceph=False,
- has_partitions=False,
- has_gpt_headers=False)
- monkeypatch.setattr("os.path.exists", lambda path: True)
- with pytest.raises(SystemExit) as error:
- lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
- expected = '--journal is required when using --filestore'
- assert expected in str(error.value)
-
@patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
m_is_ceph_device.return_value = True
class TestValidateDevices(object):
- def test_filestore_missing_journal(self):
- activation = activate.Activate([])
- with pytest.raises(RuntimeError) as error:
- activation.validate_devices({'type': 'filestore', 'data': {}})
- assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
-
- def test_filestore_missing_data(self):
- activation = activate.Activate([])
- with pytest.raises(RuntimeError) as error:
- activation.validate_devices({'type': 'filestore', 'journal': {}})
- assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
-
- def test_filestore_journal_device_found(self, capsys):
- activation = activate.Activate([])
- with pytest.raises(RuntimeError):
- activation.validate_devices({'type': 'filestore', 'journal': {}})
- stdout, stderr = capsys.readouterr()
- assert "devices found: ['journal']" in stderr
-
- def test_filestore_data_device_found(self, capsys):
- activation = activate.Activate([])
- with pytest.raises(RuntimeError):
- activation.validate_devices({'type': 'filestore', 'data': {}})
- stdout, stderr = capsys.readouterr()
- assert "devices found: ['data']" in stderr
-
- def test_filestore_with_all_devices(self):
- activation = activate.Activate([])
- result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}})
- assert result is True
-
- def test_filestore_without_type(self):
- activation = activate.Activate([])
- result = activation.validate_devices({'journal': {}, 'data': {}})
- assert result is True
-
def test_bluestore_with_all_devices(self):
activation = activate.Activate([])
result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
[tox]
-envlist = centos8-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos8-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
+envlist = centos8-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
skipsdist = True
[testenv]
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
changedir=
- centos8-filestore-single_type: {toxinidir}/centos8/filestore/single-type
- centos8-filestore-single_type_dmcrypt: {toxinidir}/centos8/filestore/single-type-dmcrypt
- centos8-filestore-mixed_type: {toxinidir}/centos8/filestore/mixed-type
- centos8-filestore-mixed_type_dmcrypt: {toxinidir}/centos8/filestore/mixed-type-dmcrypt
- centos8-filestore-mixed_type_explicit: {toxinidir}/centos8/filestore/mixed-type-explicit
- centos8-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/filestore/mixed-type-dmcrypt-explicit
centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/filestore_lvm
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-../../../playbooks/test_filestore.yml
\ No newline at end of file
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-../../../../Vagrantfile
\ No newline at end of file
+++ /dev/null
-../../../../../group_vars/filestore_lvm_dmcrypt
\ No newline at end of file
+++ /dev/null
-[mons]
-mon0
-
-[osds]
-osd0
-
-[mgrs]
-mon0
+++ /dev/null
-../../../playbooks/setup_partitions.yml
\ No newline at end of file
+++ /dev/null
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
- - name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
- with_items:
- - 0
- - 2
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/vdd2
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/vdd for lvm data usage
- parted:
- device: /dev/vdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: re-create partition /dev/vdd lvm journals
- parted:
- device: /dev/vdd
- number: 2
- part_start: 50%
- part_end: 100%
- unit: '%'
- state: present
- label: gpt
-
- - name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 lv
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap /dev/vdc1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: re-create partition /dev/vdc1
- parted:
- device: /dev/vdc
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- state: present
- label: gpt
-
- - name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --filestore --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
+++ /dev/null
-../../../../vagrant_variables.yml
\ No newline at end of file
+++ /dev/null
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@0 daemon
- service:
- name: ceph-osd@0
- state: stopped
-
-
-- hosts: mons
- become: yes
- tasks:
- - name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
- with_items:
- - 0
- - 2
-
- - name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
- - name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- register: result
- retries: 30
- delay: 1
- until: result is succeeded
-
-- hosts: osds
- become: yes
- tasks:
-
- # osd.2 device
- - name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.2 journal
- - name: zap /dev/vdd2
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # partitions have been completely removed, so re-create them again
- - name: re-create partition /dev/vdd for lvm data usage
- parted:
- device: /dev/vdd
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- label: gpt
- state: present
-
- - name: re-create partition /dev/vdd lvm journals
- parted:
- device: /dev/vdd
- number: 2
- part_start: 50%
- part_end: 100%
- unit: '%'
- state: present
- label: gpt
-
- - name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 data lv
- # note: we don't use --destroy here to test this works without that flag.
- # --destroy is used in the bluestore tests
- - name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- # osd.0 journal device
- - name: zap /dev/vdc1
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: re-create partition /dev/vdc1
- parted:
- device: /dev/vdc
- number: 1
- part_start: 0%
- part_end: 50%
- unit: '%'
- state: present
- label: gpt
-
- - name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: find all OSD paths
- find:
- paths: /var/lib/ceph/osd
- recurse: no
- file_type: directory
- register: osd_paths
-
- # set all OSD paths to root:rootto ensure that the OSD will be able to
- # activate regardless
- - name: mangle permissions to root
- file:
- path: "{{ item.path }}"
- owner: root
- group: root
- recurse: yes
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: stop ceph-osd@2 daemon
- service:
- name: ceph-osd@2
- state: stopped
-
- - name: stop ceph-osd@1 daemon
- service:
- name: ceph-osd@1
- state: stopped
-
- - name: activate all to start the previously prepared osd.0
- command: "ceph-volume lvm activate --filestore --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: node inventory
- command: "ceph-volume inventory"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: list all OSDs
- command: "ceph-volume lvm list"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: create temporary directory
- tempfile:
- state: directory
- suffix: sparse
- register: tmpdir
-
- - name: create a 1GB sparse file
- command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
-
- - name: find an empty loop device
- command: losetup -f
- register: losetup_list
-
- - name: setup loop device with sparse file
- command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
-
- - name: create volume group
- command: vgcreate test_zap {{ losetup_list.stdout }}
- failed_when: false
-
- - name: create logical volume 1
- command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
- failed_when: false
-
- - name: create logical volume 2
- command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
- failed_when: false
-
- # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
- - name: zap test_zap/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: zap test_zap/data-lv2
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
- environment:
- CEPH_VOLUME_DEBUG: 1
[tox]
-envlist = centos8-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+envlist = centos8-bluestore-{create,prepare_activate,dmcrypt}
skipsdist = True
[testenv]
DEBIAN_FRONTEND=noninteractive
changedir=
# plain/unencrypted
- centos8-filestore-create: {toxinidir}/centos8/filestore/create
centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
# dmcrypt
- centos8-filestore-dmcrypt: {toxinidir}/centos8/filestore/dmcrypt
centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
# TODO: these are placeholders for now, eventually we want to
# test the prepare/activate workflow of ceph-volume as well
- centos8-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
[tox]
-envlist = centos7-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
+envlist = centos7-bluestore-{activate,dmcrypt_plain,dmcrypt_luks}
skipsdist = True
[testenv]
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
changedir=
- centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
- centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
- centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
assert expected == fake_run.calls[0]['args'][0]
-mkfs_filestore_flags = [
- 'ceph-osd',
- '--cluster',
- '--osd-objectstore', 'filestore',
- '--mkfs',
- '-i',
- '--monmap',
- '--keyfile', '-', # goes through stdin
- '--osd-data',
- '--osd-journal',
- '--osd-uuid',
- '--setuser', 'ceph',
- '--setgroup', 'ceph'
-]
-
-
-class TestOsdMkfsFilestore(object):
-
- @pytest.mark.parametrize('flag', mkfs_filestore_flags)
- def test_keyring_is_used(self, fake_call, monkeypatch, flag):
- monkeypatch.setattr(system, 'chown', lambda path: True)
- prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
- assert flag in fake_call.calls[0]['args'][0]
-
-
class TestOsdMkfsBluestore(object):
def test_keyring_is_added(self, fake_call, monkeypatch):
assert ','.join(result) == 'auto,discard,exec,rw'
-class TestMkfsFilestore(object):
-
- def test_non_zero_exit_status(self, stub_call, monkeypatch):
- conf.cluster = 'ceph'
- monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
- stub_call(([], [], 1))
- with pytest.raises(RuntimeError) as error:
- prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
- assert "Command failed with exit code 1" in str(error.value)
-
- def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
- conf.cluster = 'ceph'
- monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
- stub_call(([], [], 1))
- with pytest.raises(RuntimeError) as error:
- prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
- expected = ' '.join([
- 'ceph-osd',
- '--cluster',
- 'ceph',
- '--osd-objectstore', 'filestore', '--mkfs',
- '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
- '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
- '--osd-journal', '/var/lib/ceph/osd/ceph-1/journal',
- '--osd-uuid', 'asdf-1234',
- '--setuser', 'ceph', '--setgroup', 'ceph'])
- assert expected in str(error.value)
-
-
class TestMkfsBluestore(object):
def test_non_zero_exit_status(self, stub_call, monkeypatch):
'--osd-uuid', 'asdf-1234',
'--setuser', 'ceph', '--setgroup', 'ceph'])
assert expected in str(error.value)
-
-
-class TestGetJournalSize(object):
-
- def test_undefined_size_fallbacks_formatted(self, conf_ceph_stub):
- conf_ceph_stub(dedent("""
- [global]
- fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
- """))
- result = prepare.get_journal_size()
- assert result == '5G'
-
- def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub):
- conf_ceph_stub(dedent("""
- [global]
- fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
- """))
- result = prepare.get_journal_size(lv_format=False)
- assert result.gb.as_int() == 5
-
- def test_defined_size_unformatted(self, conf_ceph_stub):
- conf_ceph_stub(dedent("""
- [global]
- fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
- [osd]
- osd journal size = 10240
- """))
- result = prepare.get_journal_size(lv_format=False)
- assert result.gb.as_int() == 10
-
- def test_defined_size_formatted(self, conf_ceph_stub):
- conf_ceph_stub(dedent("""
- [global]
- fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
- [osd]
- osd journal size = 10240
- """))
- result = prepare.get_journal_size()
- assert result == '10G'
-
- def test_refuse_tiny_journals(self, conf_ceph_stub):
- conf_ceph_stub(dedent("""
- [global]
- fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
-
- [osd]
- osd journal size = 1024
- """))
- with pytest.raises(RuntimeError) as error:
- prepare.get_journal_size()
- assert 'journal sizes must be larger' in str(error.value)
- assert 'detected: 1024.00 MB' in str(error.value)
# leave the validation to Batch.get_deployment_layout()
# This way the idempotency isn't broken (especially when using --osds-per-device)
for lv in self._device.lvs:
- if lv.tags.get('ceph.type') in ['db', 'wal', 'journal']:
+ if lv.tags.get('ceph.type') in ['db', 'wal']:
return self._device
if self._device.used_by_ceph:
return self._device
def write_lockbox_keyring(osd_id, osd_fsid, secret):
"""
Helper to write the lockbox keyring. This is needed because the bluestore OSD will
- not persist the keyring, and it can't be stored in the data device for filestore because
- at the time this is needed, the device is encrypted.
+ not persist the keyring.
For bluestore: A tmpfs filesystem is mounted, so the path can get written
to, but the files are ephemeral, which requires this file to be created
every time it is activated.
- For filestore: The path for the OSD would exist at this point even if no
- OSD data device is mounted, so the keyring is written to fetch the key, and
- then the data device is mounted on that directory, making the keyring
- "disappear".
"""
if os.path.exists('/var/lib/ceph/osd/%s-%s/lockbox.keyring' % (conf.cluster, osd_id)):
return
system.chown(osd_keyring)
-def get_journal_size(lv_format=True):
- """
- Helper to retrieve the size (defined in megabytes in ceph.conf) to create
- the journal logical volume, it "translates" the string into a float value,
- then converts that into gigabytes, and finally (optionally) it formats it
- back as a string so that it can be used for creating the LV.
-
- :param lv_format: Return a string to be used for ``lv_create``. A 5 GB size
- would result in '5G', otherwise it will return a ``Size`` object.
- """
- conf_journal_size = conf.ceph.get_safe('osd', 'osd_journal_size', '5120')
- logger.debug('osd_journal_size set to %s' % conf_journal_size)
- journal_size = disk.Size(mb=str_to_int(conf_journal_size))
-
- if journal_size < disk.Size(gb=2):
- mlogger.error('Refusing to continue with configured size for journal')
- raise RuntimeError('journal sizes must be larger than 2GB, detected: %s' % journal_size)
- if lv_format:
- return '%sG' % journal_size.gb.as_int()
- return journal_size
-
-
def get_block_db_size(lv_format=True):
"""
Helper to retrieve the size (defined in megabytes in ceph.conf) to create
terminal.error('device %s is used by another osd %s as %s, should be %s'% (device, current_osd_uuid, current_device_type, osd_uuid))
raise SystemExit(1)
-def link_journal(journal_device, osd_id):
- _link_device(journal_device, 'journal', osd_id)
-
def link_block(block_device, osd_id):
_link_device(block_device, 'block', osd_id)
else:
raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
-
-def osd_mkfs_filestore(osd_id, fsid, keyring):
- """
- Create the files for the OSD to function. A normal call will look like:
-
- ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
- --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
- --osd-data /var/lib/ceph/osd/ceph-0 \
- --osd-journal /var/lib/ceph/osd/ceph-0/journal \
- --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
- --keyring /var/lib/ceph/osd/ceph-0/keyring \
- --setuser ceph --setgroup ceph
-
- """
- path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
- monmap = os.path.join(path, 'activate.monmap')
- journal = os.path.join(path, 'journal')
-
- system.chown(journal)
- system.chown(path)
-
- command = [
- 'ceph-osd',
- '--cluster', conf.cluster,
- '--osd-objectstore', 'filestore',
- '--mkfs',
- '-i', osd_id,
- '--monmap', monmap,
- ]
-
- if get_osdspec_affinity():
- command.extend(['--osdspec-affinity', get_osdspec_affinity()])
-
- command.extend([
- '--keyfile', '-',
- '--osd-data', path,
- '--osd-journal', journal,
- '--osd-uuid', fsid,
- '--setuser', 'ceph',
- '--setgroup', 'ceph'
- ])
-
- _, _, returncode = process.call(
- command, stdin=keyring, terminal_verbose=True, show_command=True
- )
- if returncode != 0:
- raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))