From be1d98f4254e976e7d8bfd14fee7e075004c00a4 Mon Sep 17 00:00:00 2001 From: Dimitri Savineau Date: Tue, 6 Oct 2020 18:09:17 -0400 Subject: [PATCH] ceph-osd: add missing container_binary 90f3f61 introduced the docker-to-podman.yml playbook but the ceph-osd-run.sh.j2 template still has some docker hardcoded instead of using the container_binary variable. Signed-off-by: Dimitri Savineau --- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index 3cd1a9b80..734bfc1ed 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -16,7 +16,7 @@ function id_to_device () { {% if dmcrypt | bool %} {{ container_binary }} run --rm --net=host --ulimit nofile=1024:4096 --ipc=host --pid=host --privileged=true -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /dev:/dev -v /etc/localtime:/etc/localtime:ro -e DEBUG=verbose -e CLUSTER={{ cluster }} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} osd_ceph_disk_dmcrypt_data_map {% endif %} - DATA_PART=$(docker run --rm --ulimit nofile=1024:4096 --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z --entrypoint ceph-disk {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | grep ", osd\.${1}," | awk '{ print $1 }') + DATA_PART=$({{ container_binary }} run --rm --ulimit nofile=1024:4096 --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z --entrypoint ceph-disk {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list | grep ", osd\.${1}," | awk '{ print $1 }') if [ -z "${DATA_PART}" ]; then echo "No data partition found for OSD ${i}" exit 1 @@ -29,7 +29,7 @@ function id_to_device () { } function expose_partitions () { - DOCKER_ENV=$(docker run --rm --net=host --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list) + DOCKER_ENV=$({{ container_binary }} run --rm --net=host --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list) } {% else -%} # NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images @@ -58,7 +58,7 @@ function expose_partitions { # NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers. # This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios # We can't assume that the 'ceph' is still present so calling Docker exec instead - part=$(docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}') + part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}') DOCKER_ENV="-e OSD_JOURNAL=$part" fi # if empty, the previous command didn't find anything so we fail -- 2.39.5