From: Adam King Date: Tue, 16 Aug 2022 14:43:39 +0000 (-0400) Subject: mgr/cephadm: make setting --cgroups=split configurable X-Git-Tag: v17.2.6~98^2~23^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=a2bbeda437a857c44d58f609cd7ae77da42c89f4;p=ceph.git mgr/cephadm: make setting --cgroups=split configurable Previously, we were just always setting this as long as users were using podman with a high enough version, but it seems a user has run into an issue where their daemons are failing to deploy with Error: could not find cgroup mount in "/proc/self/cgroup" despite having a podman with high enough version. Fixes: https://tracker.ceph.com/issues/57096 Signed-off-by: Adam King (cherry picked from commit c57cb0b236e27a1d475aa8eb397ef7b880024bf5) Conflicts: src/pybind/mgr/cephadm/module.py --- diff --git a/src/cephadm/cephadm b/src/cephadm/cephadm index 9c66f8a409980..79c8cae9f5ee7 100755 --- a/src/cephadm/cephadm +++ b/src/cephadm/cephadm @@ -3088,7 +3088,7 @@ def get_container(ctx: CephadmContext, '--cidfile', runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, daemon_id), ]) - if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION: + if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION and not ctx.no_cgroups_split: container_args.append('--cgroups=split') return CephContainer.for_daemon( @@ -8764,6 +8764,11 @@ def _get_parser(): action='store_true', default=not CONTAINER_INIT, help='Do not run podman/docker with `--init`') + parser.add_argument( + '--no-cgroups-split', + action='store_true', + default=False, + help='Do not run containers with --cgroups=split (currently only relevant when using podman)') subparsers = parser.add_subparsers(help='sub-command') diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index b2a48801cc2d7..14cf05b76e9f1 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -395,6 +395,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, default=10, desc='max number of osds that will be drained simultaneously when osds are removed' ), + Option( + 'cgroups_split', + type='bool', + default=True, + desc='Pass --cgroups=split when cephadm creates containers (currently podman only)' + ), ] def __init__(self, *args: Any, **kwargs: Any): @@ -465,6 +471,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, self.apply_spec_fails: List[Tuple[str, str]] = [] self.max_osd_draining_count = 10 self.device_enhanced_scan = False + self.cgroups_split = True self.notify(NotifyType.mon_map, None) self.config_notify() diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index 6ca94393ed236..74215a238b2f2 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -1320,6 +1320,9 @@ class CephadmServe: if not self.mgr.container_init: final_args += ['--no-container-init'] + if not self.mgr.cgroups_split: + final_args += ['--no-cgroups-split'] + # subcommand final_args.append(command)