]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: make setting --cgroups=split configurable 48075/head
authorAdam King <adking@redhat.com>
Tue, 16 Aug 2022 14:43:39 +0000 (10:43 -0400)
committerAdam King <adking@redhat.com>
Tue, 13 Sep 2022 19:01:30 +0000 (15:01 -0400)
Previously, we were just always setting this as long
as users were using podman with a high enough version,
but it seems a user has run into an issue where their
daemons are failing to deploy with

Error: could not find cgroup mount in "/proc/self/cgroup"

despite having a podman with high enough version.

Fixes: https://tracker.ceph.com/issues/57096
Signed-off-by: Adam King <adking@redhat.com>
(cherry picked from commit c57cb0b236e27a1d475aa8eb397ef7b880024bf5)

Conflicts:
src/pybind/mgr/cephadm/module.py

src/cephadm/cephadm
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/serve.py

index 9c66f8a409980bbf7c2cad83dcc08f21ba9b7dee..79c8cae9f5ee77796c961b31971e67b7d8357559 100755 (executable)
@@ -3088,7 +3088,7 @@ def get_container(ctx: CephadmContext,
             '--cidfile',
             runtime_dir + '/ceph-%s@%s.%s.service-cid' % (fsid, daemon_type, daemon_id),
         ])
-        if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION:
+        if ctx.container_engine.version >= CGROUPS_SPLIT_PODMAN_VERSION and not ctx.no_cgroups_split:
             container_args.append('--cgroups=split')
 
     return CephContainer.for_daemon(
@@ -8764,6 +8764,11 @@ def _get_parser():
         action='store_true',
         default=not CONTAINER_INIT,
         help='Do not run podman/docker with `--init`')
+    parser.add_argument(
+        '--no-cgroups-split',
+        action='store_true',
+        default=False,
+        help='Do not run containers with --cgroups=split (currently only relevant when using podman)')
 
     subparsers = parser.add_subparsers(help='sub-command')
 
index b2a48801cc2d779a1faf603718b53de5fed78169..14cf05b76e9f132c6bff81a93a0aa14b4eadf0a1 100644 (file)
@@ -395,6 +395,12 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             default=10,
             desc='max number of osds that will be drained simultaneously when osds are removed'
         ),
+        Option(
+            'cgroups_split',
+            type='bool',
+            default=True,
+            desc='Pass --cgroups=split when cephadm creates containers (currently podman only)'
+        ),
     ]
 
     def __init__(self, *args: Any, **kwargs: Any):
@@ -465,6 +471,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             self.apply_spec_fails: List[Tuple[str, str]] = []
             self.max_osd_draining_count = 10
             self.device_enhanced_scan = False
+            self.cgroups_split = True
 
         self.notify(NotifyType.mon_map, None)
         self.config_notify()
index 6ca94393ed2360b8cc58fb1a5cc4d705700352fe..74215a238b2f244a6f82e446ff5ba2fdd517b351 100644 (file)
@@ -1320,6 +1320,9 @@ class CephadmServe:
         if not self.mgr.container_init:
             final_args += ['--no-container-init']
 
+        if not self.mgr.cgroups_split:
+            final_args += ['--no-cgroups-split']
+
         # subcommand
         final_args.append(command)