daemon_type = ctx.name
daemon_id = None
else:
- daemon_type = 'osd' # get the most mounts
+ daemon_type = 'shell' # get limited set of mounts
daemon_id = None
if ctx.fsid and daemon_type in ceph_daemons():
lock.acquire()
(uid, gid) = (0, 0) # ceph-volume runs as root
- mounts = get_container_mounts_for_type(ctx, ctx.fsid, 'osd')
+ mounts = get_container_mounts_for_type(ctx, ctx.fsid, 'ceph-volume')
tmp_config = None
tmp_keyring = None
"""
mounts = dict()
- if daemon_type in ceph_daemons():
+ if daemon_type in ceph_daemons() or daemon_type in [
+ 'ceph-volume',
+ 'shell',
+ ]:
if fsid:
run_path = os.path.join('/var/run/ceph', fsid)
if os.path.exists(run_path):
mounts[run_path] = '/var/run/ceph:z'
log_dir = os.path.join(ctx.log_dir, fsid)
+ if not os.path.exists(log_dir):
+ os.mkdir(log_dir)
mounts[log_dir] = '/var/log/ceph:z'
crash_dir = '/var/lib/ceph/%s/crash' % fsid
if os.path.exists(crash_dir):
journald_sock_dir = '/run/systemd/journal'
mounts[journald_sock_dir] = journald_sock_dir
- if daemon_type in ['mon', 'osd', 'clusterless-ceph-volume']:
+ if daemon_type in [
+ 'mon',
+ 'osd',
+ 'ceph-volume',
+ 'clusterless-ceph-volume',
+ ]:
mounts['/dev'] = '/dev' # FIXME: narrow this down?
mounts['/run/udev'] = '/run/udev'
- if daemon_type in ['osd', 'clusterless-ceph-volume']:
+ if daemon_type in ['osd', 'ceph-volume', 'clusterless-ceph-volume']:
mounts['/sys'] = '/sys' # for numa.cc, pick_address, cgroups, ...
mounts['/run/lvm'] = '/run/lvm'
mounts['/run/lock/lvm'] = '/run/lock/lvm'
- if daemon_type == 'osd':
+ if daemon_type in ['osd', 'ceph-volume']:
# selinux-policy in the container may not match the host.
if HostFacts(ctx).selinux_enabled:
cluster_dir = f'{ctx.data_dir}/{fsid}'
logger.error(
f'Cluster direcotry {cluster_dir} does not exist.'
)
+ if daemon_type == 'osd':
mounts['/'] = '/rootfs'
+ elif daemon_type == 'ceph-volume':
+ mounts['/'] = '/rootfs:rslave'
try:
if (