# these do not search for their keyrings in a data directory
mounts[data_dir + '/keyring'] = '/etc/ceph/ceph.client.%s.%s.keyring' % (daemon_type, daemon_id)
- if daemon_type in ['mon', 'osd']:
+ if daemon_type in ['mon', 'osd', 'clusterless-ceph-volume']:
mounts['/dev'] = '/dev' # FIXME: narrow this down?
mounts['/run/udev'] = '/run/udev'
- if daemon_type == 'osd':
+ if daemon_type in ['osd', 'clusterless-ceph-volume']:
mounts['/sys'] = '/sys' # for numa.cc, pick_address, cgroups, ...
+ mounts['/run/lvm'] = '/run/lvm'
+ mounts['/run/lock/lvm'] = '/run/lock/lvm'
+ if daemon_type == 'osd':
# selinux-policy in the container may not match the host.
if HostFacts(ctx).selinux_enabled:
selinux_folder = '/var/lib/ceph/%s/selinux' % fsid
if not os.path.exists(selinux_folder):
os.makedirs(selinux_folder, mode=0o755)
mounts[selinux_folder] = '/sys/fs/selinux:ro'
- mounts['/run/lvm'] = '/run/lvm'
- mounts['/run/lock/lvm'] = '/run/lock/lvm'
try:
if ctx.shared_ceph_folder: # make easy manager modules/ceph-volume development
##################################
+def _zap(ctx, what):
+ mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None)
+ c = CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/sbin/ceph-volume',
+ envs=ctx.env,
+ args=['lvm', 'zap', '--destroy', what],
+ privileged=True,
+ volume_mounts=mounts,
+ )
+ logger.info(f'Zapping {what}...')
+ out, err, code = call_throws(ctx, c.run_cmd())
+
+
+@infer_image
+def _zap_osds(ctx):
+ # assume fsid lock already held
+
+ # list
+ mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None)
+ c = CephContainer(
+ ctx,
+ image=ctx.image,
+ entrypoint='/usr/sbin/ceph-volume',
+ envs=ctx.env,
+ args=['inventory', '--format', 'json'],
+ privileged=True,
+ volume_mounts=mounts,
+ )
+ out, err, code = call_throws(ctx, c.run_cmd())
+ if code:
+ raise Error('failed to list osd inventory')
+ try:
+ ls = json.loads(out)
+ except ValueError as e:
+ raise Error(f'Invalid JSON in ceph-volume inventory: {e}')
+
+ for i in ls:
+ matches = [lv.get('cluster_fsid') == ctx.fsid for lv in i.get('lvs', [])]
+ if any(matches) and all(matches):
+ _zap(ctx, i.get('path'))
+ elif any(matches):
+ lv_names = [lv['name'] for lv in i.get('lvs', [])]
+ # TODO: we need to map the lv_names back to device paths (the vg
+ # id isn't part of the output here!)
+ logger.warning(f'Not zapping LVs (not implemented): {lv_names}')
+
+
+def command_zap_osds(ctx):
+ if not ctx.force:
+ raise Error('must pass --force to proceed: '
+ 'this command may destroy precious data!')
+
+ lock = FileLock(ctx, ctx.fsid)
+ lock.acquire()
+
+ _zap_osds(ctx)
+
+##################################
+
+
def command_rm_cluster(ctx):
# type: (CephadmContext) -> None
if not ctx.force:
if os.path.exists(files[n]):
os.remove(files[n])
+
##################################
'command', nargs=argparse.REMAINDER,
help='command')
+ parser_zap_osds = subparsers.add_parser(
+ 'zap-osds', help='zap all OSDs associated with a particular fsid')
+ parser_zap_osds.set_defaults(func=command_zap_osds)
+ parser_zap_osds.add_argument(
+ '--fsid',
+ required=True,
+ help='cluster FSID')
+ parser_zap_osds.add_argument(
+ '--force',
+ action='store_true',
+ help='proceed, even though this may destroy valuable data')
+
parser_unit = subparsers.add_parser(
'unit', help="operate on the daemon's systemd unit")
parser_unit.set_defaults(func=command_unit)