]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: pass `CEPH_VOLUME_SKIP_RESTORECON=yes` 44248/head
authorGuillaume Abrioux <gabrioux@redhat.com>
Thu, 25 Nov 2021 10:16:40 +0000 (11:16 +0100)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 8 Dec 2021 14:22:49 +0000 (15:22 +0100)
In containerized deployments, ceph-volume shouldn't try to make any call
to restorecon.

Fixes: https://tracker.ceph.com/issues/53397
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit f072f81f9edfca0316d2192680ee603658f9a1fe)

src/cephadm/cephadm

index 9fe5fb3763b382935b509e31a04712884b56c338..246b9300bb65473cbb49fb04d8c2b1f52a260f66 100755 (executable)
@@ -2462,6 +2462,33 @@ def get_container_mounts(ctx, fsid, daemon_type, daemon_id,
     return mounts
 
 
+def get_ceph_volume_container(ctx: CephadmContext,
+                              privileged: bool = True,
+                              cname: str = '',
+                              volume_mounts: Dict[str, str] = {},
+                              bind_mounts: Optional[List[List[str]]] = None,
+                              args: List[str] = [],
+                              envs: Optional[List[str]] = None) -> 'CephContainer':
+    if envs is None:
+        envs = []
+    envs.append('CEPH_VOLUME_SKIP_RESTORECON=yes')
+    envs.append('CEPH_VOLUME_DEBUG=1')
+
+    return CephContainer(
+        ctx,
+        image=ctx.image,
+        entrypoint='/usr/sbin/ceph-volume',
+        args=args,
+        volume_mounts=volume_mounts,
+        bind_mounts=bind_mounts,
+        envs=envs,
+        privileged=privileged,
+        cname=cname,
+        memory_request=ctx.memory_request,
+        memory_limit=ctx.memory_limit,
+    )
+
+
 def get_container(ctx: CephadmContext,
                   fsid: str, daemon_type: str, daemon_id: Union[int, str],
                   privileged: bool = False,
@@ -2792,21 +2819,40 @@ def deploy_daemon_units(
                     p = os.path.join(data_dir, n)
                     f.write('[ ! -L {p} ] || chown {uid}:{gid} {p}\n'.format(p=p, uid=uid, gid=gid))
             else:
-                prestart = CephContainer(
+                # if ceph-volume does not support 'ceph-volume activate', we must
+                # do 'ceph-volume lvm activate'.
+                test_cv = get_ceph_volume_container(
                     ctx,
-                    image=ctx.image,
-                    entrypoint='/usr/sbin/ceph-volume',
-                    args=[
+                    args=['activate', '--bad-option'],
+                    volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id),
+                    bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id),
+                    cname='ceph-%s-%s.%s-activate-test' % (fsid, daemon_type, daemon_id),
+                )
+                out, err, ret = call(ctx, test_cv.run_cmd(), verbosity=CallVerbosity.SILENT)
+                #  bad: ceph-volume: error: unrecognized arguments: activate --bad-option
+                # good: ceph-volume: error: unrecognized arguments: --bad-option
+                if 'unrecognized arguments: activate' in err:
+                    # older ceph-volume without top-level activate or --no-tmpfs
+                    cmd = [
                         'lvm', 'activate',
                         str(daemon_id), osd_fsid,
-                        '--no-systemd'
-                    ],
-                    privileged=True,
+                        '--no-systemd',
+                    ]
+                else:
+                    cmd = [
+                        'activate',
+                        '--osd-id', str(daemon_id),
+                        '--osd-uuid', osd_fsid,
+                        '--no-systemd',
+                        '--no-tmpfs',
+                    ]
+
+                prestart = get_ceph_volume_container(
+                    ctx,
+                    args=cmd,
                     volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id),
                     bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id),
                     cname='ceph-%s-%s.%s-activate' % (fsid, daemon_type, daemon_id),
-                    memory_request=ctx.memory_request,
-                    memory_limit=ctx.memory_limit,
                 )
                 _write_container_cmd_to_bash(ctx, f, prestart, 'LVM OSDs use ceph-volume lvm activate')
         elif daemon_type == CephIscsi.daemon_type:
@@ -2840,15 +2886,12 @@ def deploy_daemon_units(
     with open(data_dir + '/unit.poststop.new', 'w') as f:
         if daemon_type == 'osd':
             assert osd_fsid
-            poststop = CephContainer(
+            poststop = get_ceph_volume_container(
                 ctx,
-                image=ctx.image,
-                entrypoint='/usr/sbin/ceph-volume',
                 args=[
                     'lvm', 'deactivate',
                     str(daemon_id), osd_fsid,
                 ],
-                privileged=True,
                 volume_mounts=get_container_mounts(ctx, fsid, daemon_type, daemon_id),
                 bind_mounts=get_container_binds(ctx, fsid, daemon_type, daemon_id),
                 cname='ceph-%s-%s.%s-deactivate' % (fsid, daemon_type,
@@ -4809,13 +4852,10 @@ def command_ceph_volume(ctx):
         tmp_keyring = write_tmp(keyring, uid, gid)
         mounts[tmp_keyring.name] = '/var/lib/ceph/bootstrap-osd/ceph.keyring:z'
 
-    c = CephContainer(
+    c = get_ceph_volume_container(
         ctx,
-        image=ctx.image,
-        entrypoint='/usr/sbin/ceph-volume',
         envs=ctx.env,
         args=ctx.command,
-        privileged=True,
         volume_mounts=mounts,
     )
 
@@ -5316,12 +5356,9 @@ class AdoptOsd(object):
         # type: () -> Tuple[Optional[str], Optional[str]]
         osd_fsid, osd_type = None, None
 
-        c = CephContainer(
+        c = get_ceph_volume_container(
             self.ctx,
-            image=self.ctx.image,
-            entrypoint='/usr/sbin/ceph-volume',
             args=['lvm', 'list', '--format=json'],
-            privileged=True
         )
         out, err, code = call_throws(self.ctx, c.run_cmd())
         if not code:
@@ -5676,15 +5713,10 @@ def command_rm_daemon(ctx):
 
 def _zap(ctx: CephadmContext, what: str) -> None:
     mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None)
-    c = CephContainer(
-        ctx,
-        image=ctx.image,
-        entrypoint='/usr/sbin/ceph-volume',
-        envs=ctx.env,
-        args=['lvm', 'zap', '--destroy', what],
-        privileged=True,
-        volume_mounts=mounts,
-    )
+    c = get_ceph_volume_container(ctx,
+                                  args=['lvm', 'zap', '--destroy', what],
+                                  volume_mounts=mounts,
+                                  envs=ctx.env)
     logger.info(f'Zapping {what}...')
     out, err, code = call_throws(ctx, c.run_cmd())
 
@@ -5695,15 +5727,10 @@ def _zap_osds(ctx: CephadmContext) -> None:
 
     # list
     mounts = get_container_mounts(ctx, ctx.fsid, 'clusterless-ceph-volume', None)
-    c = CephContainer(
-        ctx,
-        image=ctx.image,
-        entrypoint='/usr/sbin/ceph-volume',
-        envs=ctx.env,
-        args=['inventory', '--format', 'json'],
-        privileged=True,
-        volume_mounts=mounts,
-    )
+    c = get_ceph_volume_container(ctx,
+                                  args=['inventory', '--format', 'json'],
+                                  volume_mounts=mounts,
+                                  envs=ctx.env)
     out, err, code = call_throws(ctx, c.run_cmd())
     if code:
         raise Error('failed to list osd inventory')