'--osd-uuid',
help='OSD UUID to activate'
)
- parser.add_argument(
- '--bluestore',
- action='store_true',
- help='force bluestore objectstore activation',
- )
- parser.add_argument(
- '--seastore',
- action='store_true',
- help='force seastore objectstore activation',
- )
parser.add_argument(
'--no-systemd',
dest='no_systemd',
argparse.Namespace(
osd_id=self.args.osd_id,
osd_fsid=self.args.osd_uuid,
- seastore=self.args.seastore,
- bluestore=self.args.bluestore,
no_tmpfs=self.args.no_tmpfs,
no_systemd=self.args.no_systemd,
)
systemctl.start_osd(osd_id)
terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
-def activate_seastore(osd_lvs, no_systemd=False, no_tmpfs=False):
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == 'block':
- osd_block_lv = lv
- break
- else:
- raise RuntimeError('could not find a seastore OSD to activate')
-
- is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
- dmcrypt_secret = None
- osd_id = osd_block_lv.tags['ceph.osd_id']
- conf.cluster = osd_block_lv.tags['ceph.cluster_name']
- osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
- configuration.load_ceph_conf_path(osd_block_lv.tags['ceph.cluster_name'])
- configuration.load()
-
- # mount on tmpfs the osd directory
- osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.path_is_mounted(osd_path):
- # mkdir -p and mount as tmpfs
- prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
- # encryption is handled here, before priming the OSD dir
- if is_encrypted:
- osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid
- lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
- encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
- dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
- encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid)
- else:
- osd_lv_path = osd_block_lv.lv_path
-
- # always re-do the symlink regardless if it exists, so that the block
- # devices that may have changed can be mapped correctly every time
- process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')])
- system.chown(os.path.join(osd_path, 'block'))
- if no_systemd is False:
- # enable the ceph-volume unit for this OSD
- systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
-
- # enable the OSD
- systemctl.enable_osd(osd_id)
-
- # start the OSD
- systemctl.start_osd(osd_id)
- terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
class Activate(object):
# explicit filestore/bluestore flags take precedence
if getattr(args, 'bluestore', False):
activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
- elif getattr(args, 'seastore', False):
- activate_seastore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
elif getattr(args, 'filestore', False):
activate_filestore(lvs, args.no_systemd)
elif any('ceph.block_device' in lv.tags for lv in lvs):
action='store_true',
help='force bluestore objectstore activation',
)
- parser.add_argument(
- '--seastore',
- action='store_true',
- help='force seastore objectstore activation',
- )
parser.add_argument(
'--filestore',
action='store_true',
action='store_true',
help='bluestore objectstore (default)',
)
- parser.add_argument(
- '--seastore',
- action='store_true',
- help='seastore objectstore (defualt)',
- )
parser.add_argument(
'--filestore',
action='store_true',
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not self.args.bluestore and not self.args.filestore and not self.args.seastore:
+ if not self.args.bluestore and not self.args.filestore:
self.args.bluestore = True
if (self.args.auto and not self.args.db_devices and not
defaults = common.get_default_args()
global_args = [
'bluestore',
- 'seastore',
'filestore',
'dmcrypt',
'crush_device_class',
if args.bluestore:
plan = self.get_deployment_layout(args, args.devices, args.db_devices,
args.wal_devices)
- elif args.seastore:
- plan = self.get_deployment_layout(args, args.devices)
elif args.filestore:
plan = self.get_deployment_layout(args, args.devices, args.journal_devices)
return plan
},
}
-seastore_args = {
- '--seastore': {
- 'action': 'store_true',
- 'help': 'Use the seastore objectstore',
- },
-}
-
filestore_args = {
'--filestore': {
'action': 'store_true',
defaults = {}
def format_name(name):
return name.strip('-').replace('-', '_').replace('.', '_')
- for argset in (common_args, filestore_args, bluestore_args, seastore_args):
+ for argset in (common_args, filestore_args, bluestore_args):
defaults.update({format_name(name): val.get('default', None) for name, val in argset.items()})
return defaults
filestore_group = parser.add_argument_group('filestore')
bluestore_group = parser.add_argument_group('bluestore')
- seastore_group = parser.add_argument_group('seastore')
for name, kwargs in common_args.items():
parser.add_argument(name, **kwargs)
for name, kwargs in filestore_args.items():
filestore_group.add_argument(name, **kwargs)
- for name, kwargs in seastore_args.items():
- seastore_group.add_argument(name, **kwargs)
-
# Do not parse args, so that consumers can do something before the args get
# parsed triggering argparse behavior
return parser
if len(self.argv) == 0:
print(sub_command_help)
return
- exclude_group_options(parser, groups=['filestore', 'bluestore', 'seastore'], argv=self.argv)
+ exclude_group_options(parser, groups=['filestore', 'bluestore'], argv=self.argv)
args = parser.parse_args(self.argv)
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
db=db
)
-def prepare_seastore(block, secrets, tags, osd_id, fsid):
- """
- :param block: The name of the logical volume for the seastore data
- :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
- :param id_: The OSD id
- :param fsid: The OSD fsid, also known as the OSD UUID
- """
- cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
- # encryption-only operations
- if secrets.get('dmcrypt_key'):
- key = secrets['dmcrypt_key']
- block = prepare_dmcrypt(key, block, 'block', tags)
-
- # create the directory
- prepare_utils.create_osd_path(osd_id, tmpfs=True)
- # symlink the block
- prepare_utils.link_block(block, osd_id)
- # get the latest monmap
- prepare_utils.get_monmap(osd_id)
- # write the OSD keyring if it doesn't exist already
- prepare_utils.write_keyring(osd_id, cephx_secret)
- # prepare the osd filesystem
- prepare_utils.osd_mkfs_seastore(
- osd_id, fsid,
- keyring=cephx_secret,
- )
-
class Prepare(object):
self.osd_id,
osd_fsid,
)
- elif self.args.seastore:
- try:
- vg_name, lv_name = self.args.data.split('/')
- block_lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- block_lv = None
-
- if not block_lv:
- block_lv = self.prepare_data_device('block', osd_fsid)
-
- tags['ceph.block_device'] = block_lv.lv_path
- tags['ceph.block_uuid'] = block_lv.lv_uuid
- tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
- tags['ceph.encrypted'] = encrypted
- tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path)
- tags['ceph.type'] = 'block'
- block_lv.set_tags(tags)
-
- prepare_seastore(
- block_lv.lv_path,
- secrets,
- tags,
- self.osd_id,
- osd_fsid,
- )
def main(self):
sub_command_help = dedent("""
if len(self.argv) == 0:
print(sub_command_help)
return
- exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore', 'seastore'])
+ exclude_group_options(parser, argv=self.argv, groups=['filestore', 'bluestore'])
self.args = parser.parse_args(self.argv)
# the unfortunate mix of one superset for both filestore and bluestore
# makes this validation cumbersome
raise SystemExit('--journal is required when using --filestore')
# Default to bluestore here since defaulting it in add_argument may
# cause both to be True
- if not self.args.bluestore and not self.args.filestore and not self.args.seastore:
+ if not self.args.bluestore and not self.args.filestore:
self.args.bluestore = True
self.safe_prepare()
raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
-def osd_mkfs_seastore(osd_id, fsid, keyring=None):
- """
- Create the files for the OSD to function. A normal call will look like:
-
- ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
- --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
- --osd-data /var/lib/ceph/osd/ceph-0 \
- --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
- --keyring /var/lib/ceph/osd/ceph-0/keyring \
- --setuser ceph --setgroup ceph
-
- In some cases it is required to use the keyring, when it is passed in as
- a keyword argument it is used as part of the ceph-osd command
- """
- path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
- monmap = os.path.join(path, 'activate.monmap')
-
- system.chown(path)
-
- base_command = [
- 'ceph-osd',
- '--cluster', conf.cluster,
- '--osd-objectstore', 'seastore',
- '--mkfs',
- '--key', "/var/lib/ceph/osd/ceph-%s/keyring" % osd_id,
- '-i', osd_id,
- '--monmap', monmap,
- ]
-
- supplementary_command = [
- '--osd-data', path,
- '--osd-uuid', fsid,
- '--setuser', 'ceph',
- '--setgroup', 'ceph'
- ]
-
- if keyring is not None:
- base_command.extend(['--keyfile', '-'])
-
- if get_osdspec_affinity():
- base_command.extend(['--osdspec-affinity', get_osdspec_affinity()])
-
- command = base_command + supplementary_command
-
- """
- When running in containers the --mkfs on raw device sometimes fails
- to acquire a lock through flock() on the device because systemd-udevd holds one temporarily.
- See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock.
- Because this is really transient, we retry up to 5 times and wait for 1 sec in-between
- """
- for retry in range(5):
- _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True)
- if returncode == 0:
- break
- else:
- if returncode == errno.EWOULDBLOCK:
- time.sleep(1)
- logger.info('disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry)
- continue
- else:
- raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
-
-
def osd_mkfs_filestore(osd_id, fsid, keyring):
"""
Create the files for the OSD to function. A normal call will look like:
'--no-systemd',
]
else:
- osd_type = 'bluestore'
- if 'objectstore' in ctx and ctx.objectstore:
- osd_type = ctx.objectstore
-
cmd = [
'activate',
- '--' + osd_type,
'--osd-id', str(daemon_id),
'--osd-uuid', osd_fsid,
'--no-systemd',
'--meta-json',
help='JSON dict of additional metadata'
)
- parser_deploy.add_argument(
- '--objectstore',
- help='Set object store'
- )
parser_deploy.add_argument(
'--extra-container-args',
action='append',
ports: List[int] = daemon_spec.ports if daemon_spec.ports else []
if daemon_spec.daemon_type == 'container':
- custom_container_spec = cast(CustomContainerSpec,
- self.mgr.spec_store[daemon_spec.service_name].spec)
- image = custom_container_spec.image
- if custom_container_spec.ports:
- ports.extend(custom_container_spec.ports)
+ spec = cast(CustomContainerSpec,
+ self.mgr.spec_store[daemon_spec.service_name].spec)
+ image = spec.image
+ if spec.ports:
+ ports.extend(spec.ports)
# TCP port to open in the host firewall
if len(ports) > 0:
if not osd_uuid:
raise OrchestratorError('osd.%s not in osdmap' % daemon_spec.daemon_id)
daemon_spec.extra_args.extend(['--osd-fsid', osd_uuid])
- if daemon_spec.service_name in self.mgr.spec_store:
- osd_spec = cast(DriveGroupSpec, self.mgr.spec_store[daemon_spec.service_name].spec)
- objectstore = osd_spec.objectstore
- if objectstore:
- daemon_spec.extra_args.extend(['--objectstore', objectstore])
- final_conf = daemon_spec.final_config['config']
- objectstore_str = '\n\tosd_objectstore = ' + objectstore
- index = 0
- if final_conf.find("[osd]") != -1:
- index = final_conf.index("[osd]") + 5
- elif final_conf.find("[global]") != -1:
- index = final_conf.index("[global]") + 8
- if index != 0:
- final_conf = final_conf[:index] + objectstore_str + final_conf[index:]
- daemon_spec.final_config['config'] = final_conf
- else:
- daemon_spec.extra_args.extend(['--objectstore', 'bluestore'])
if reconfig:
daemon_spec.extra_args.append('--reconfig')
['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
mock.call(host, f'osd.{osd_id}', 'deploy',
['--name', f'osd.{osd_id}', '--meta-json', mock.ANY,
- '--config-json', '-', '--osd-fsid', 'uuid', '--objectstore', 'bluestore'],
+ '--config-json', '-', '--osd-fsid', 'uuid'],
stdin=mock.ANY, image=''),
mock.call(host, 'osd', 'ceph-volume',
['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
self.service_id,
"`all` is only allowed for data_devices")
- if self.objectstore not in ['bluestore', 'seastore']:
+ if self.objectstore not in ('bluestore'):
raise DriveGroupValidationError(self.service_id,
f"{self.objectstore} is not supported. Must be "
- f"one of bluestore, seastore")
+ f"one of ('bluestore')")
if self.block_wal_size is not None and type(self.block_wal_size) not in [int, str]:
raise DriveGroupValidationError(
cmd += " --crush-device-class {}".format(d)
cmds.append(cmd)
- elif self.spec.objectstore == 'seastore':
- cmd = "lvm batch --no-auto {}".format(" ".join(data_devices))
- cmd += " --seastore"
- cmds.append(cmd)
-
for i in range(len(cmds)):
if self.spec.encrypted:
cmds[i] += " --dmcrypt"