Test with podman instead of docker and also support for python 3 only.
Signed-off-by: Sébastien Han <seb@redhat.com>
'''
Build the docker CLI to run a command inside a container
'''
-
- command_exec = ['docker', 'run', '--rm', '--privileged', '--net=host',
+ container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+ command_exec = [container_binary, 'run',
+ '--rm', '--privileged', '--net=host',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
except ValueError:
strategy_change = "strategy changed" in out
if strategy_change:
- out = json.dumps({"changed": False, "stdout": out.rstrip("\r\n")})
+ out = json.dumps(
+ {"changed": False, "stdout": out.rstrip("\r\n")})
rc = 0
changed = False
else:
out = out.rstrip("\r\n")
result = dict(
cmd=cmd,
- stdout=out,
- stderr=err.rstrip("\r\n"),
+ stdout=out.rstrip('\r\n'),
+ stderr=err.rstrip('\r\n'),
rc=rc,
changed=changed,
)
end=str(endd),
delta=str(delta),
rc=rc,
- stdout=out.rstrip(b'\r\n'),
- stderr=err.rstrip(b'\r\n'),
+ stdout=out.rstrip('\r\n'),
+ stderr=err.rstrip('\r\n'),
changed=changed,
)
result = ceph_volume.get_wal("wal-lv", "wal-vg")
assert result == "wal-vg/wal-lv"
- def test_container_exec(sefl):
+ def test_container_exec(self):
fake_binary = "ceph-volume"
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', # noqa E501
- name: run a dummy container (sleep 300) from where we can create pool(s)/key(s)
command: >
- docker run \
+ {{ container_binary }} run \
--rm \
-d \
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec {% if groups.get(mon_group_name, []) | length > 0 -%} ceph-mon-{{ hostvars[delegated_node]['ansible_hostname'] }} {% else %} ceph-create-keys {% endif %}"
+ docker_exec_cmd: "{{ container_binary }} exec {% if groups.get(mon_group_name, []) | length > 0 -%} ceph-mon-{{ hostvars[delegated_node]['ansible_hostname'] }} {% else %} ceph-create-keys {% endif %}"
when:
- containerized_deployment
---
# NOTE (leseb): we must check each inventory group so this will work with collocated daemons
- name: inspect ceph mon container
- command: "docker inspect {{ ceph_mon_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_mon_container_stat.stdout }}"
changed_when: false
register: ceph_mon_inspect
when:
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph osd container
- command: "docker inspect {{ ceph_osd_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_osd_container_stat.stdout }}"
changed_when: false
register: ceph_osd_inspect
when:
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph mds container
- command: "docker inspect {{ ceph_mds_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_mds_container_stat.stdout }}"
changed_when: false
register: ceph_mds_inspect
when:
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph rgw container
- command: "docker inspect {{ ceph_rgw_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_rgw_container_stat.stdout }}"
changed_when: false
register: ceph_rgw_inspect
when:
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph mgr container
- command: "docker inspect {{ ceph_mgr_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_mgr_container_stat.stdout }}"
changed_when: false
register: ceph_mgr_inspect
when:
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph rbd mirror container
- command: "docker inspect {{ ceph_rbd_mirror_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_rbd_mirror_container_stat.stdout }}"
changed_when: false
register: ceph_rbd_mirror_inspect
when:
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
- name: inspect ceph nfs container
- command: "docker inspect {{ ceph_nfs_container_stat.stdout }}"
+ command: "{{ container_binary }} inspect {{ ceph_nfs_container_stat.stdout }}"
changed_when: false
register: ceph_nfs_inspect
when:
# NOTE(leseb): using failed_when to handle the case when the image is not present yet
- name: "inspecting ceph mon container image before pulling"
- command: "docker inspect {{ (ceph_mon_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_mon_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_mon_container_inspect_before_pull
- ceph_mon_inspect.get('rc') == 0
- name: "inspecting ceph osd container image before pulling"
- command: "docker inspect {{ (ceph_osd_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_osd_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_osd_container_inspect_before_pull
- ceph_osd_inspect.get('rc') == 0
- name: "inspecting ceph rgw container image before pulling"
- command: "docker inspect {{ (ceph_rgw_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_rgw_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_rgw_container_inspect_before_pull
- ceph_rgw_inspect.get('rc') == 0
- name: "inspecting ceph mds container image before pulling"
- command: "docker inspect {{ (ceph_mds_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_mds_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_mds_container_inspect_before_pull
- ceph_mds_inspect.get('rc') == 0
- name: "inspecting ceph mgr container image before pulling"
- command: "docker inspect {{ (ceph_mgr_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_mgr_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_mgr_container_inspect_before_pull
- ceph_mgr_inspect.get('rc') == 0
- name: "inspecting ceph rbd mirror container image before pulling"
- command: "docker inspect {{ (ceph_rbd_mirror_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_rbd_mirror_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_rbd_mirror_container_inspect_before_pull
- ceph_rbd_mirror_inspect.get('rc') == 0
- name: "inspecting ceph nfs container image before pulling"
- command: "docker inspect {{ (ceph_nfs_inspect.stdout | from_json)[0].Image }}"
+ command: "{{ container_binary }} inspect {{ (ceph_nfs_inspect.stdout | from_json)[0].Image }}"
changed_when: false
failed_when: false
register: ceph_nfs_container_inspect_before_pull
- nfs_group_name in group_names
- ceph_nfs_container_inspect_before_pull.get('rc') == 0
-- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
- command: "timeout {{ docker_pull_timeout }} docker pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+- name: "pulling {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
+ command: "timeout {{ docker_pull_timeout }} {{ container_binary }} pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: docker_image
until: docker_image.rc == 0
when:
- (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
-- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
- command: "docker inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
+ command: "{{ container_binary }} inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
failed_when: false
register: image_inspect_after_pull
- name: export local ceph dev image
command: >
- docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: load ceph dev image
- command: "docker load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
when:
- (ceph_docker_dev_image is defined and ceph_docker_dev_image)
changed_when: false
check_mode: no
register: ceph_docker_version
+ when: not is_podman
- name: set_fact ceph_docker_version ceph_docker_version.stdout.split
set_fact:
ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
+ when: not is_podman
- name: include checks.yml
include_tasks: checks.yml
- name: get ceph version
command: >
- docker run --rm --entrypoint /usr/bin/ceph
+ {{ container_binary }} run --rm --entrypoint /usr/bin/ceph
{{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
--version
changed_when: false
set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
+- name: check if podman binary is present
+ stat:
+ path: /usr/bin/podman
+ register: podman_binary
+
+- name: set_fact is_podman
+ set_fact:
+ is_podman: "{{ podman_binary.stat.exists }}"
+ when: is_atomic
+
+- name: set_fact container_binary
+ set_fact:
+ container_binary: "{{ 'podman' if is_atomic and is_podman else 'docker' }}"
+ when: containerized_deployment
+
- name: set_fact monitor_name ansible_hostname
set_fact:
monitor_name: "{{ ansible_hostname }}"
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- containerized_deployment
---
- name: check for a mon container
- command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
register: ceph_mon_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(mon_group_name, [])
- name: check for an osd container
- command: "docker ps -q --filter='name=ceph-osd'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
register: ceph_osd_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(osd_group_name, [])
- name: check for a mds container
- command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(mds_group_name, [])
- name: check for a rgw container
- command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(rgw_group_name, [])
- name: check for a mgr container
- command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(mgr_group_name, [])
- name: check for a rbd mirror container
- command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check for a nfs container
- command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(nfs_group_name, [])
- name: check for a tcmu-runner container
- command: "docker ps -q --filter='name=tcmu-runner'"
+ command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-api container
- command: "docker ps -q --filter='name=rbd-target-api'"
+ command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a rbd-target-gw container
- command: "docker ps -q --filter='name=rbd-target-gw'"
+ command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
DELAY="{{ handler_health_mds_check_delay }}"
MDS_NAME="{{ mds_name }}"
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mds-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
{% endif %}
# Backward compatibility
DELAY="{{ handler_health_mgr_check_delay }}"
MGR_NAME="{{ ansible_hostname }}"
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mgr-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
{% endif %}
# Backward compatibility
DELAY="{{ handler_health_mon_check_delay }}"
MONITOR_NAME="{{ monitor_name }}"
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mon-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
{% endif %}
# Backward compatibility
NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
PID=/var/run/ganesha.pid
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
{% endif %}
# First, restart the daemon
CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
check_pgs() {
- num_pgs=$($docker_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
+ num_pgs=$($container_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
if [[ "$num_pgs" == "0" ]]; then
return 0
fi
while [ $RETRIES -ne 0 ]; do
- test "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')"
+ test "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')"
RET=$?
test $RET -eq 0 && return 0
sleep $DELAY
echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean"
echo "It is possible that the cluster has less OSDs than the replica configuration"
echo "Will refuse to continue"
- $docker_exec ceph $CEPH_CLI -s
- $docker_exec ceph $CEPH_CLI osd dump
- $docker_exec ceph $CEPH_CLI osd tree
- $docker_exec ceph $CEPH_CLI osd crush rule dump
+ $container_exec ceph $CEPH_CLI -s
+ $container_exec ceph $CEPH_CLI osd dump
+ $container_exec ceph $CEPH_CLI osd tree
+ $container_exec ceph $CEPH_CLI osd crush rule dump
exit 1
}
-wait_for_socket_in_docker() {
- osd_mount_point=$(docker exec "$1" df --output=target | grep '/var/lib/ceph/osd/')
- whoami=$(docker exec "$1" cat $osd_mount_point/whoami)
- if ! docker exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then
+wait_for_socket_in_container() {
+ osd_mount_point=$({{ container_binary }} exec "$1" df --output=target | grep '/var/lib/ceph/osd/')
+ whoami=$({{ container_binary }} exec "$1" cat $osd_mount_point/whoami)
+ if ! {{ container_binary }} exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then
echo "Timed out while trying to look for a Ceph OSD socket."
echo "Abort mission!"
exit 1
echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/'
}
-get_docker_id_from_dev_name() {
+get_container_id_from_dev_name() {
local id
local count
count=10
while [ $count -ne 0 ]; do
- id=$(docker ps -q -f "name=$1")
+ id=$({{ container_binary }} ps -q -f "name=$1")
test "$id" != "" && break
sleep $DELAY
let count=count-1
echo "$id"
}
-get_docker_osd_id() {
- wait_for_socket_in_docker $1
- docker exec "$1" ls /var/run/ceph | cut -d'.' -f2
+get_container_osd_id() {
+ wait_for_socket_in_container $1
+ {{ container_binary }} exec "$1" ls /var/run/ceph | cut -d'.' -f2
}
# For containerized deployments, the unit file looks like: ceph-osd@sda.service
# Wait and ensure the socket exists after restarting the daemon
{% if containerized_deployment and osd_scenario != 'lvm' -%}
id=$(get_dev_name "$unit")
- container_id=$(get_docker_id_from_dev_name "$id")
- wait_for_socket_in_docker "$container_id"
+ container_id=$(get_container_id_from_dev_name "$id")
+ wait_for_socket_in_container "$container_id"
osd_id=$whoami
- docker_exec="docker exec $container_id"
+ container_exec="{{ container_binary }} exec $container_id"
{% elif containerized_deployment and osd_scenario == 'lvm' %}
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
- container_id=$(get_docker_id_from_dev_name "ceph-osd-${osd_id}")
- docker_exec="docker exec $container_id"
+ container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
+ container_exec="{{ container_binary }} exec $container_id"
{% else %}
osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
{% endif %}
SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok
while [ $COUNT -ne 0 ]; do
RETRIES="{{ handler_health_osd_check_retries }}"
- $docker_exec test -S "$SOCKET" && check_pgs && continue 2
+ $container_exec test -S "$SOCKET" && check_pgs && continue 2
sleep $DELAY
let COUNT=COUNT-1
done
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
RBD_MIRROR_NAME="{{ ansible_hostname }}"
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
{% endif %}
# Backward compatibility
RGW_NAME="{{ ansible_hostname }}"
RGW_PORT="{{ radosgw_frontend_port }}"
{% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-rgw-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rgw-{{ ansible_hostname }}"
{% endif %}
# Backward compatibility
$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop rbd-target-api
-ExecStartPre=-/usr/bin/docker rm rbd-target-api
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_rbd_target_api_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
-e CEPH_DAEMON=RBD_TARGET_API \
--name=rbd-target-api \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop rbd-target-api
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
Restart=always
RestartSec=10s
TimeoutStartSec=120
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop rbd-target-gw
-ExecStartPre=-/usr/bin/docker rm rbd-target-gw
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
-e CEPH_DAEMON=RBD_TARGET_GW \
--name=rbd-target-gw \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop rbd-target-gw
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
Restart=always
RestartSec=10s
TimeoutStartSec=120
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop tcmu-runner
-ExecStartPre=-/usr/bin/docker rm tcmu-runner
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
+ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
--memory={{ ceph_tcmu_runner_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
-e CEPH_DAEMON=TCMU_RUNNER \
--name=tcmu-runner \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop tcmu-runner
+ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
Restart=always
RestartSec=10s
TimeoutStartSec=120
---
- name: set_fact docker_exec_cmd mds
set_fact:
- docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
- name: set_fact admin_keyring
set_fact:
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
when:
- containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_mds_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_mds_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_mds_docker_cpu_limit * 100000 }} \
{{ ceph_mds_docker_extra_env }} \
--name=ceph-mds-{{ ansible_hostname }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120
---
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd_mgr: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_mgr_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_mgr_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
{{ ceph_mgr_docker_extra_env }} \
--name=ceph-mgr-{{ ansible_hostname }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120
---
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when:
- containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i
ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
-ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
--memory={{ ceph_mon_docker_memory_limit }} \
-{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+{% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_mon_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \
-e CEPH_DAEMON=MON \
{{ ceph_mon_docker_extra_env }} \
{{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStop=-/usr/bin/docker stop ceph-mon-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
Restart=always
RestartSec=10s
---
- name: set_fact docker_exec_cmd_nfs
set_fact:
- docker_exec_cmd_nfs: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
---
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-nfs-{{ ansible_hostname }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
when:
- containerized_deployment
---
- name: set_fact docker_exec_cmd_nfs
set_fact:
- docker_exec_cmd_nfs: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if not containerized_deployment_with_kv -%}
-v /var/lib/ceph:/var/lib/ceph:z \
-v /etc/ceph:/etc/ceph:z \
{{ ceph_nfs_docker_extra_env }} \
--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-nfs-%i
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
Restart=always
RestartSec=10s
TimeoutStartSec=120
# starting the next task
- name: prepare ceph containerized osd disk collocated
command: |
- docker run --net=host \
+ {{ container_binary }} run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
- name: automatic prepare ceph containerized osd disk collocated
command: |
- docker run --net=host \
+ {{ container_binary }} run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.split('/')[-1] }} \
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ lvm_volumes }}"
tags: prepare_osd
\ No newline at end of file
# starting the next task
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
command: |
- docker run --net=host \
+ {{ container_binary }} run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
- name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
command: |
- docker run --net=host \
+ {{ container_binary }} run --net=host \
--pid=host \
--privileged=true \
--name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
owner: "root"
group: "root"
mode: "0744"
+ setype: "bin_t"
notify:
- restart ceph osds
when:
- name: set_fact docker_exec_start_osd
set_fact:
- docker_exec_start_osd: "{{ 'docker run --rm --privileged=true -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /var/run/udev/:/var/run/udev/:z -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
+ docker_exec_start_osd: "{{ '{{ container_binary }} run --rm --privileged=true -v /var/run/udev/:/var/run/udev/:z -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
- name: collect osd ids
shell: >
# FUNCTIONS #
#############
function expose_partitions () {
-DOCKER_ENV=$(docker run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
+DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
}
{% else -%}
# NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images
# Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
function expose_partitions {
- if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
+ if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
- docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
+ {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
fi
fi
- if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
+ if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
- docker logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
+ {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
fi
fi
if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
# NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers.
# This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios
# We can't assume that the 'ceph' is still present so calling Docker exec instead
- part=$(docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
+ part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
DOCKER_ENV="-e OSD_JOURNAL=$part"
fi
# if empty, the previous command didn't find anything so we fail
# MAIN #
########
-/usr/bin/docker run \
+/usr/bin/{{ container_binary }} run \
--rm \
--net=host \
--privileged=true \
--pid=host \
--memory={{ ceph_osd_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_osd_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_osd_docker_cpu_limit * 100000 }} \
[Service]
EnvironmentFile=-/etc/environment
{% if osd_scenario == 'lvm' -%}
-ExecStartPre=-/usr/bin/docker stop ceph-osd-%i
-ExecStartPre=-/usr/bin/docker rm -f ceph-osd-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
{% else %}
-ExecStartPre=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i
-ExecStartPre=-/usr/bin/docker rm -f ceph-osd-{{ ansible_hostname }}-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i
{% endif -%}
ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
{% if osd_scenario == 'lvm' -%}
-ExecStop=-/usr/bin/docker stop ceph-osd-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
{% else %}
-ExecStop=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
{% endif -%}
Restart=always
RestartSec=10s
---
- name: set_fact docker_exec_cmd
set_fact:
- docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
+ docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
when:
- containerized_deployment
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_rbd_mirror_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=') or is_podman -%}
--cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \
--name=ceph-rbd-mirror-{{ ansible_hostname }} \
{{ ceph_rbd_mirror_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120
[Service]
EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
--memory={{ ceph_rgw_docker_memory_limit }} \
- {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+ {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
--cpus={{ ceph_rgw_docker_cpu_limit }} \
{% else -%}
--cpu-quota={{ ceph_rgw_docker_cpu_limit * 100000 }} \
--name=ceph-rgw-{{ ansible_hostname }} \
{{ ceph_rgw_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
Restart=always
RestartSec=10s
TimeoutStartSec=120
block:
- name: fail on unsupported distribution for iscsi gateways
fail:
- msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux or CentOS"
+ msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
when:
- - ansible_distribution not in ['RedHat', 'CentOS']
+ - ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
- name: fail on unsupported distribution version for iscsi gateways
fail:
+++ /dev/null
-site-container.yml.sample
\ No newline at end of file
--- /dev/null
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+ - mons
+ - agents
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - iscsigws
+ - iscsi-gws # for backward compatibility only!
+ - mgrs
+
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+
+ vars:
+ delegate_facts_host: True
+
+ tasks:
+ # pre-tasks for following import -
+ - name: gather facts
+ setup:
+ when:
+ - not delegate_facts_host | bool
+
+ - name: gather and delegate facts
+ setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] }}"
+ run_once: true
+ when:
+ - delegate_facts_host | bool
+
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+ tags:
+ - always
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+ tags:
+ - always
+
+ - name: check if podman binary is present
+ stat:
+ path: /usr/bin/podman
+ register: podman_binary
+ tags:
+ - always
+
+ - name: set_fact is_podman
+ set_fact:
+ is_podman: "{{ podman_binary.stat.exists }}"
+ when: is_atomic
+ tags:
+ - always
+
+ - name: set_fact container_binary
+ set_fact:
+ container_binary: "{{ 'podman' if is_atomic and is_podman else 'docker' }}"
+ tags:
+ - always
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: [with_pkg, fetch_container_image]
+ - import_role:
+ name: ceph-validate
+ private: false
+ - import_role:
+ name: ceph-infra
+ private: false
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ tags: [with_pkg, fetch_container_image]
+ when:
+ - not (is_atomic | bool)
+ - (not (inventory_hostname in groups.get('clients', [])) or (inventory_hostname == groups.get('clients', [''])|first))
+
+
+ # post-tasks for upcoming import -
+ - name: "pull {{ ceph_docker_image }} image"
+ command: "{{ container_binary }} pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ when:
+ - is_atomic
+ - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
+ - (not (inventory_hostname in groups.get('clients', [])) or (inventory_hostname == groups.get('clients', [''])|first))
+
+- hosts: mons
+ tasks:
+ - name: set ceph monitor install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+ become: True
+ gather_facts: false
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mon
+ private: false
+ serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS
+
+- hosts: mons
+ tasks:
+ - name: set ceph monitor install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mon:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mgrs
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph manager install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: fals
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mgr
+ private: false
+
+ # post-tasks for upcoming imports -
+ - name: set ceph manager install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mgr:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: osds
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for upcoming imports -
+ - name: set ceph osd install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-osd
+ private: false
+
+ # post-tasks for preceding imports -
+ - name: set ceph osd install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_osd:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mdss
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph mds install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-mds
+ private: false
+
+ # post-tasks for preceding imports -
+ - name: set ceph mds install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_mds:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rgws
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph rgw install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rgw
+ private: false
+
+ # post-tasks for preceding imports -
+ - name: set ceph rgw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rgw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: nfss
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph nfs install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-nfs
+ private: false
+
+ # post-tasks for following imports -
+ - name: set ceph nfs install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_nfs:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rbdmirrors
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph rbd mirror install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-rbd-mirror
+ private: false
+
+ # post-tasks for preceding imports -
+ - name: set ceph rbd mirror install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_rbdmirror:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: clients
+ become: True
+ gather_facts: false
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph client install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ when:
+ - inventory_hostname == groups.get('clients', ['']) | first
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-client
+ private: false
+
+ # post-tasks for preceding imports -
+ - name: set ceph client install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_client:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts:
+ - iscsigws
+ - iscsi-gws # for backward compatibility only!
+ gather_facts: false
+ become: True
+ tasks:
+ # pre-tasks for following imports -
+ - name: set ceph iscsi gateway install 'In Progress'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "In Progress"
+ start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+ - import_role:
+ name: ceph-defaults
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-handler
+ private: false
+ - import_role:
+ name: ceph-container-common
+ private: false
+ - import_role:
+ name: ceph-config
+ private: false
+ tags: ['ceph_update_config']
+ - import_role:
+ name: ceph-iscsi-gw
+ private: false
+
+ # post-tasks for preceding imports -
+ post_tasks:
+ - name: set ceph iscsi gw install 'Complete'
+ run_once: true
+ set_stats:
+ data:
+ installer_phase_ceph_iscsi_gw:
+ status: "Complete"
+ end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+ gather_facts: false
+ become: True
+ tasks:
+ - name: get ceph status from the first monitor
+ command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+ register: ceph_status
+ changed_when: false
+ delegate_to: "{{ groups['mons'][0] }}"
+ run_once: true
+ ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
+
+ - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+ debug:
+ msg: "{{ ceph_status.stdout_lines }}"
+ delegate_to: "{{ groups['mons'][0] }}"
+ run_once: true
+ when: not ceph_status.failed
--- /dev/null
+../../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+../../../centos/7/cluster/ceph-override.json
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: eth1
+radosgw_interface: eth1
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_docker_on_openstack: False
+public_network: "192.168.30.0/24"
+cluster_network: "192.168.31.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_size: 1
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: ""
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ size: ""
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
--- /dev/null
+---
+user_config: True
+copy_admin_key: True
+test:
+ name: "test"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+test2:
+ name: "test2"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+pools:
+ - "{{ test }}"
+ - "{{ test2 }}"
--- /dev/null
+---
+gateway_ip_list: 192.168.1.90
+generate_crt: True
\ No newline at end of file
--- /dev/null
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+ name: HDD
+ root: HDD
+ type: host
+ default: true
+crush_rules:
+ - "{{ crush_rule_hdd }}"
--- /dev/null
+---
+ceph_osd_docker_run_script_path: /var/tmp
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
\ No newline at end of file
--- /dev/null
+---
+copy_admin_key: True
+rgw_create_pools:
+ foo:
+ pg_num: 17
+ bar:
+ pg_num: 19
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+
+[mdss]
+mds0
+
+[rgws]
+rgw0
+
+[nfss]
+nfs0
+
+[clients]
+client0
+client1
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[all:vars]
+ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 1
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.30
+cluster_subnet: 192.168.31
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+vagrant_box: fedora/29-atomic-host
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
--- /dev/null
+- hosts: all
+ gather_facts: true
+ become: yes
+ tasks:
+ - name: unlock /usr
+ command: ostree admin unlock --hotfix
+ changed_when: false
+ when: ansible_distribution == 'Fedora'
+
+ - name: remove docker binary on fedora to simulate rhel8
+ file:
+ path: /usr/bin/docker
+ state: absent
+ when: ansible_distribution == 'Fedora'
\ No newline at end of file
[tox]
-envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,cluster,docker_cluster,update_cluster,update_docker_cluster,switch_to_containers,ooo_collocation,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,lvm_batch,lvm_osds_container,lvm_batch_container,infra_lv_create,add_osds,add_osds_container,rgw_multisite,rgw_multisite_container}
+envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,cluster,docker_cluster,update_cluster,update_docker_cluster,switch_to_containers,ooo_collocation,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,lvm_batch,lvm_osds_container,lvm_batch_container,infra_lv_create,add_osds,add_osds_container,rgw_multisite,rgw_multisite_container,container_podman}
infra_lv_create
skipsdist = True
shrink_osd_container: PLAYBOOK = site-docker.yml.sample
shrink_osd_container: COPY_ADMIN_KEY = True
shrink_osd: COPY_ADMIN_KEY = True
+ container_podman: PLAYBOOK = site-docker.yml.sample
rhcs: CEPH_STABLE_RELEASE = luminous
luminous: CEPH_STABLE_RELEASE = luminous
add_osds_container: {toxinidir}/tests/functional/centos/7/add-osds-container
rgw_multisite: {toxinidir}/tests/functional/centos/7/rgw-multisite
rgw_multisite_container: {toxinidir}/tests/functional/centos/7/rgw-multisite-container
+ container_podman: {toxinidir}/tests/functional/fedora/29/container-podman
commands=
rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
purge_cluster_non_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
purge_cluster_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
switch_to_containers: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+ container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/simulate_rhel8.yml
rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"