Previously, we were just always setting this as long
as users were using podman with a high enough version,
but it seems a user has run into an issue where their
daemons are failing to deploy with
Error: could not find cgroup mount in "/proc/self/cgroup"
despite having a podman with high enough version.
Fixes: https://tracker.ceph.com/issues/57096
Signed-off-by: Adam King <adking@redhat.com>
(cherry picked from commit
c57cb0b236e27a1d475aa8eb397ef7b880024bf5)
Conflicts:
src/pybind/mgr/cephadm/module.py
'--cidfile',
runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, daemon_id),
])
- if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION:
+ if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION and not ctx.no_cgroups_split:
container_args.append('--cgroups=split')
return CephContainer.for_daemon(
action='store_true',
default=not CONTAINER_INIT,
help='Do not run podman/docker with `--init`')
+ parser.add_argument(
+ '--no-cgroups-split',
+ action='store_true',
+ default=False,
+ help='Do not run containers with --cgroups=split (currently only relevant when using podman)')
subparsers = parser.add_subparsers(help='sub-command')
default=10,
desc='max number of osds that will be drained simultaneously when osds are removed'
),
+ Option(
+ 'cgroups_split',
+ type='bool',
+ default=True,
+ desc='Pass --cgroups=split when cephadm creates containers (currently podman only)'
+ ),
]
def __init__(self, *args: Any, **kwargs: Any):
self.apply_spec_fails: List[Tuple[str, str]] = []
self.max_osd_draining_count = 10
self.device_enhanced_scan = False
+ self.cgroups_split = True
self.notify(NotifyType.mon_map, None)
self.config_notify()
if not self.mgr.container_init:
final_args += ['--no-container-init']
+ if not self.mgr.cgroups_split:
+ final_args += ['--no-cgroups-split']
+
# subcommand
final_args.append(command)