]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Add new container scenario
authorSébastien Han <seb@redhat.com>
Thu, 8 Nov 2018 09:02:37 +0000 (10:02 +0100)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Tue, 27 Nov 2018 16:47:40 +0000 (16:47 +0000)
Test with podman instead of docker and also support for python 3 only.

Signed-off-by: Sébastien Han <seb@redhat.com>
52 files changed:
library/ceph_volume.py
library/test_ceph_volume.py
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-container-common/tasks/fetch_image.yml
roles/ceph-container-common/tasks/main.yml
roles/ceph-defaults/tasks/facts.yml
roles/ceph-handler/tasks/check_running_containers.yml
roles/ceph-handler/templates/restart_mds_daemon.sh.j2
roles/ceph-handler/templates/restart_mgr_daemon.sh.j2
roles/ceph-handler/templates/restart_mon_daemon.sh.j2
roles/ceph-handler/templates/restart_nfs_daemon.sh.j2
roles/ceph-handler/templates/restart_osd_daemon.sh.j2
roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2
roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-iscsi-gw/templates/rbd-target-api.service.j2
roles/ceph-iscsi-gw/templates/rbd-target-gw.service.j2
roles/ceph-iscsi-gw/templates/tcmu-runner.service.j2
roles/ceph-mds/tasks/containerized.yml
roles/ceph-mds/tasks/main.yml
roles/ceph-mds/templates/ceph-mds.service.j2
roles/ceph-mgr/tasks/main.yml
roles/ceph-mgr/templates/ceph-mgr.service.j2
roles/ceph-mon/tasks/main.yml
roles/ceph-mon/templates/ceph-mon.service.j2
roles/ceph-nfs/tasks/create_rgw_nfs_user.yml
roles/ceph-nfs/tasks/main.yml
roles/ceph-nfs/tasks/start_nfs.yml
roles/ceph-nfs/templates/ceph-nfs.service.j2
roles/ceph-osd/tasks/scenarios/collocated.yml
roles/ceph-osd/tasks/scenarios/lvm.yml
roles/ceph-osd/tasks/scenarios/non-collocated.yml
roles/ceph-osd/tasks/start_osds.yml
roles/ceph-osd/templates/ceph-osd-run.sh.j2
roles/ceph-osd/templates/ceph-osd.service.j2
roles/ceph-rbd-mirror/tasks/main.yml
roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2
roles/ceph-rgw/templates/ceph-radosgw.service.j2
roles/ceph-validate/tasks/check_system.yml
site-docker.yml.sample [changed from symlink to file mode: 0644]
tests/functional/fedora/29/container-podman/Vagrantfile [new symlink]
tests/functional/fedora/29/container-podman/ceph-override.json [new symlink]
tests/functional/fedora/29/container-podman/group_vars/all [new file with mode: 0644]
tests/functional/fedora/29/container-podman/group_vars/clients [new file with mode: 0644]
tests/functional/fedora/29/container-podman/group_vars/iscsigws [new file with mode: 0644]
tests/functional/fedora/29/container-podman/group_vars/mons [new file with mode: 0644]
tests/functional/fedora/29/container-podman/group_vars/osds [new file with mode: 0644]
tests/functional/fedora/29/container-podman/group_vars/rgws [new file with mode: 0644]
tests/functional/fedora/29/container-podman/hosts [new file with mode: 0644]
tests/functional/fedora/29/container-podman/vagrant_variables.yml [new file with mode: 0644]
tests/functional/simulate_rhel8.yml [new file with mode: 0644]
tox.ini

index f20abb4abfb71e6914afdf06e5a6531b1335d66a..75a293285cd9fead08ce3f9c246ccaedf725fb81 100644 (file)
@@ -178,8 +178,9 @@ def container_exec(binary, container_image):
     '''
     Build the docker CLI to run a command inside a container
     '''
-
-    command_exec = ['docker', 'run', '--rm', '--privileged', '--net=host',
+    container_binary = os.getenv('CEPH_CONTAINER_BINARY')
+    command_exec = [container_binary, 'run',
+                    '--rm', '--privileged', '--net=host',
                     '-v', '/run/lock/lvm:/run/lock/lvm:z',
                     '-v', '/var/run/udev/:/var/run/udev/:z',
                     '-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z',
@@ -581,15 +582,16 @@ def run_module():
         except ValueError:
             strategy_change = "strategy changed" in out
             if strategy_change:
-                out = json.dumps({"changed": False, "stdout": out.rstrip("\r\n")})
+                out = json.dumps(
+                    {"changed": False, "stdout": out.rstrip("\r\n")})
                 rc = 0
                 changed = False
             else:
                 out = out.rstrip("\r\n")
             result = dict(
                 cmd=cmd,
-                stdout=out,
-                stderr=err.rstrip("\r\n"),
+                stdout=out.rstrip('\r\n'),
+                stderr=err.rstrip('\r\n'),
                 rc=rc,
                 changed=changed,
             )
@@ -620,8 +622,8 @@ def run_module():
         end=str(endd),
         delta=str(delta),
         rc=rc,
-        stdout=out.rstrip(b'\r\n'),
-        stderr=err.rstrip(b'\r\n'),
+        stdout=out.rstrip('\r\n'),
+        stderr=err.rstrip('\r\n'),
         changed=changed,
     )
 
index 5863b56982129fbfa60830e690e62b7f73fe4aba..401c3e925d340b2036c85e5460225c33df08a55e 100644 (file)
@@ -36,7 +36,7 @@ class TestCephVolumeModule(object):
         result = ceph_volume.get_wal("wal-lv", "wal-vg")
         assert result == "wal-vg/wal-lv"
 
-    def test_container_exec(sefl):
+    def test_container_exec(self):
         fake_binary = "ceph-volume"
         fake_container_image = "docker.io/ceph/daemon:latest-luminous"
         expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host',  # noqa E501
index f74c21e16e3adbb415da9ca66a7aa07826b322cb..99602205f6e539572260c211f8fa8c62344d77a5 100644 (file)
@@ -27,7 +27,7 @@
 
 - name: run a dummy container (sleep 300) from where we can create pool(s)/key(s)
   command: >
-    docker run \
+    {{ container_binary }} run \
     --rm \
     -d \
     -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
@@ -50,7 +50,7 @@
 
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec {% if groups.get(mon_group_name, []) | length > 0 -%} ceph-mon-{{ hostvars[delegated_node]['ansible_hostname'] }} {% else %} ceph-create-keys {% endif %}"
+    docker_exec_cmd: "{{ container_binary }} exec {% if groups.get(mon_group_name, []) | length > 0 -%} ceph-mon-{{ hostvars[delegated_node]['ansible_hostname'] }} {% else %} ceph-create-keys {% endif %}"
   when:
     - containerized_deployment
 
index 1b634745790495d41ec89cd78123785f02f05b1f..8a29117123cb419d5e5bf212ee476ad5a587fc89 100644 (file)
@@ -1,7 +1,7 @@
 ---
 # NOTE (leseb): we must check each inventory group so this will work with collocated daemons
 - name: inspect ceph mon container
-  command: "docker inspect {{ ceph_mon_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_mon_container_stat.stdout }}"
   changed_when: false
   register: ceph_mon_inspect
   when:
@@ -10,7 +10,7 @@
     - ceph_mon_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph osd container
-  command: "docker inspect {{ ceph_osd_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_osd_container_stat.stdout }}"
   changed_when: false
   register: ceph_osd_inspect
   when:
@@ -19,7 +19,7 @@
     - ceph_osd_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph mds container
-  command: "docker inspect {{ ceph_mds_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_mds_container_stat.stdout }}"
   changed_when: false
   register: ceph_mds_inspect
   when:
@@ -28,7 +28,7 @@
     - ceph_mds_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph rgw container
-  command: "docker inspect {{ ceph_rgw_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_rgw_container_stat.stdout }}"
   changed_when: false
   register: ceph_rgw_inspect
   when:
@@ -37,7 +37,7 @@
     - ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph mgr container
-  command: "docker inspect {{ ceph_mgr_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_mgr_container_stat.stdout }}"
   changed_when: false
   register: ceph_mgr_inspect
   when:
@@ -46,7 +46,7 @@
     - ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph rbd mirror container
-  command: "docker inspect {{ ceph_rbd_mirror_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_rbd_mirror_container_stat.stdout }}"
   changed_when: false
   register: ceph_rbd_mirror_inspect
   when:
@@ -55,7 +55,7 @@
     - ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
 
 - name: inspect ceph nfs container
-  command: "docker inspect {{ ceph_nfs_container_stat.stdout }}"
+  command: "{{ container_binary }} inspect {{ ceph_nfs_container_stat.stdout }}"
   changed_when: false
   register: ceph_nfs_inspect
   when:
@@ -65,7 +65,7 @@
 
 # NOTE(leseb): using failed_when to handle the case when the image is not present yet
 - name: "inspecting ceph mon container image before pulling"
-  command: "docker inspect {{ (ceph_mon_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_mon_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_mon_container_inspect_before_pull
@@ -74,7 +74,7 @@
     - ceph_mon_inspect.get('rc') == 0
 
 - name: "inspecting ceph osd container image before pulling"
-  command: "docker inspect {{ (ceph_osd_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_osd_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_osd_container_inspect_before_pull
@@ -83,7 +83,7 @@
     - ceph_osd_inspect.get('rc') == 0
 
 - name: "inspecting ceph rgw container image before pulling"
-  command: "docker inspect {{ (ceph_rgw_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_rgw_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_rgw_container_inspect_before_pull
@@ -92,7 +92,7 @@
     - ceph_rgw_inspect.get('rc') == 0
 
 - name: "inspecting ceph mds container image before pulling"
-  command: "docker inspect {{ (ceph_mds_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_mds_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_mds_container_inspect_before_pull
     - ceph_mds_inspect.get('rc') == 0
 
 - name: "inspecting ceph mgr container image before pulling"
-  command: "docker inspect {{ (ceph_mgr_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_mgr_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_mgr_container_inspect_before_pull
     - ceph_mgr_inspect.get('rc') == 0
 
 - name: "inspecting ceph rbd mirror container image before pulling"
-  command: "docker inspect {{ (ceph_rbd_mirror_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_rbd_mirror_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_rbd_mirror_container_inspect_before_pull
     - ceph_rbd_mirror_inspect.get('rc') == 0
 
 - name: "inspecting ceph nfs container image before pulling"
-  command: "docker inspect {{ (ceph_nfs_inspect.stdout | from_json)[0].Image }}"
+  command: "{{ container_binary }} inspect {{ (ceph_nfs_inspect.stdout | from_json)[0].Image }}"
   changed_when: false
   failed_when: false
   register: ceph_nfs_container_inspect_before_pull
     - nfs_group_name in group_names
     - ceph_nfs_container_inspect_before_pull.get('rc') == 0
 
-- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
-  command: "timeout {{ docker_pull_timeout }} docker pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+- name: "pulling {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
+  command: "timeout {{ docker_pull_timeout }} {{ container_binary }} pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
   changed_when: false
   register: docker_image
   until: docker_image.rc == 0
   when:
     - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
 
-- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
-  command: "docker inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
+  command: "{{ container_binary }} inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
   changed_when: false
   failed_when: false
   register: image_inspect_after_pull
 
 - name: export local ceph dev image
   command: >
-    docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+    {{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
     "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
   delegate_to: localhost
   when:
     - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
 
 - name: load ceph dev image
-  command: "docker load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+  command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
   when:
     - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
 
index aa585161e88045aac7bf61b9a639f3011c40f3e7..533fa9e1b737b8273eedf098b443b638d5f9d5d8 100644 (file)
@@ -9,10 +9,12 @@
   changed_when: false
   check_mode: no
   register: ceph_docker_version
+  when: not is_podman
 
 - name: set_fact ceph_docker_version ceph_docker_version.stdout.split
   set_fact:
     ceph_docker_version: "{{ ceph_docker_version.stdout.split(' ')[2] }}"
+  when: not is_podman
 
 - name: include checks.yml
   include_tasks: checks.yml
@@ -31,7 +33,7 @@
 
 - name: get ceph version
   command: >
-    docker run --rm --entrypoint /usr/bin/ceph
+    {{ container_binary }} run --rm --entrypoint /usr/bin/ceph
     {{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
     --version
   changed_when: false
index b96d0574fbf334a7ffe6d76957797845a4b1f8a0..713359286e825a8ad25a7087fcf2bac9669dd15b 100644 (file)
@@ -8,6 +8,21 @@
   set_fact:
     is_atomic: "{{ stat_ostree.stat.exists }}"
 
+- name: check if podman binary is present
+  stat:
+    path: /usr/bin/podman
+  register: podman_binary
+
+- name: set_fact is_podman
+  set_fact:
+    is_podman: "{{ podman_binary.stat.exists }}"
+  when: is_atomic
+
+- name: set_fact container_binary
+  set_fact:
+    container_binary: "{{ 'podman' if is_atomic and is_podman else 'docker' }}"
+  when: containerized_deployment
+
 - name: set_fact monitor_name ansible_hostname
   set_fact:
     monitor_name: "{{ ansible_hostname }}"
@@ -22,7 +37,7 @@
 
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when:
     - containerized_deployment
index 15acc5df293afa1b0c2d0f3de1b34a224f670a1e..c0590fe419d7090bab437ca16754df53f4d87ff6 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: check for a mon container
-  command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
   register: ceph_mon_container_stat
   changed_when: false
   failed_when: false
@@ -9,7 +9,7 @@
     - inventory_hostname in groups.get(mon_group_name, [])
 
 - name: check for an osd container
-  command: "docker ps -q --filter='name=ceph-osd'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
   register: ceph_osd_container_stat
   changed_when: false
   failed_when: false
@@ -18,7 +18,7 @@
     - inventory_hostname in groups.get(osd_group_name, [])
 
 - name: check for a mds container
-  command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
   register: ceph_mds_container_stat
   changed_when: false
   failed_when: false
@@ -27,7 +27,7 @@
     - inventory_hostname in groups.get(mds_group_name, [])
 
 - name: check for a rgw container
-  command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
   register: ceph_rgw_container_stat
   changed_when: false
   failed_when: false
@@ -36,7 +36,7 @@
     - inventory_hostname in groups.get(rgw_group_name, [])
 
 - name: check for a mgr container
-  command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
   register: ceph_mgr_container_stat
   changed_when: false
   failed_when: false
@@ -45,7 +45,7 @@
     - inventory_hostname in groups.get(mgr_group_name, [])
 
 - name: check for a rbd mirror container
-  command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
   register: ceph_rbd_mirror_container_stat
   changed_when: false
   failed_when: false
@@ -54,7 +54,7 @@
     - inventory_hostname in groups.get(rbdmirror_group_name, [])
 
 - name: check for a nfs container
-  command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
   register: ceph_nfs_container_stat
   changed_when: false
   failed_when: false
@@ -63,7 +63,7 @@
     - inventory_hostname in groups.get(nfs_group_name, [])
 
 - name: check for a tcmu-runner container
-  command: "docker ps -q --filter='name=tcmu-runner'"
+  command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
   register: ceph_tcmu_runner_stat
   changed_when: false
   failed_when: false
@@ -72,7 +72,7 @@
     - inventory_hostname in groups.get(iscsi_gw_group_name, [])
 
 - name: check for a rbd-target-api container
-  command: "docker ps -q --filter='name=rbd-target-api'"
+  command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
   register: ceph_rbd_target_api_stat
   changed_when: false
   failed_when: false
@@ -81,7 +81,7 @@
     - inventory_hostname in groups.get(iscsi_gw_group_name, [])
 
 - name: check for a rbd-target-gw container
-  command: "docker ps -q --filter='name=rbd-target-gw'"
+  command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
   register: ceph_rbd_target_gw_stat
   changed_when: false
   failed_when: false
index 2300386e0edb904ad7d0880f29199ae28d445ab5..b40d579b27b8caf8d4112929a5878549d0b37712 100644 (file)
@@ -4,7 +4,7 @@ RETRIES="{{ handler_health_mds_check_retries }}"
 DELAY="{{ handler_health_mds_check_delay }}"
 MDS_NAME="{{ mds_name }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mds-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
 {% endif %}
 
 # Backward compatibility
index a9bbc9f966c738dadd073f99dfbfa398478c3889..c9f3554003e7041cca2baf239acff13fef7dabf8 100644 (file)
@@ -4,7 +4,7 @@ RETRIES="{{ handler_health_mgr_check_retries }}"
 DELAY="{{ handler_health_mgr_check_delay }}"
 MGR_NAME="{{ ansible_hostname }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mgr-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
 {% endif %}
 
 # Backward compatibility
index 0e8318c46007cd767b1ab379f526e6ac779837d2..36d50ac9fc68149aba881796d62ff43970adb5b5 100644 (file)
@@ -4,7 +4,7 @@ RETRIES="{{ handler_health_mon_check_retries }}"
 DELAY="{{ handler_health_mon_check_delay }}"
 MONITOR_NAME="{{ monitor_name }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-mon-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
 {% endif %}
 
 # Backward compatibility
index 6567ed3a9c0b0ba3e58448c475b07b7a2e12a6a5..39f304bfb95fa59f1a2bf1f7c7f69ec5ab69e25f 100644 (file)
@@ -5,7 +5,7 @@ DELAY="{{ handler_health_nfs_check_delay }}"
 NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
 PID=/var/run/ganesha.pid
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
 {% endif %}
 
 # First, restart the daemon
index 7559271c587b493c8ccaa008e5e1af7629dc2c7f..253d2371d907d4af0156b27f779db2a2b2418b04 100644 (file)
@@ -4,12 +4,12 @@ DELAY="{{ handler_health_osd_check_delay }}"
 CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}"
 
 check_pgs() {
-  num_pgs=$($docker_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
+  num_pgs=$($container_exec ceph $CEPH_CLI -s -f json|python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')
   if [[ "$num_pgs" == "0" ]]; then
     return 0
   fi
   while [ $RETRIES -ne 0 ]; do
-    test "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')"
+    test "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')" -eq "$($container_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print sum ( [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if "active+clean" in i["state_name"]])')"
     RET=$?
     test $RET -eq 0 && return 0
     sleep $DELAY
@@ -19,17 +19,17 @@ check_pgs() {
   echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean"
   echo "It is possible that the cluster has less OSDs than the replica configuration"
   echo "Will refuse to continue"
-  $docker_exec ceph $CEPH_CLI -s
-  $docker_exec ceph $CEPH_CLI osd dump
-  $docker_exec ceph $CEPH_CLI osd tree
-  $docker_exec ceph $CEPH_CLI osd crush rule dump
+  $container_exec ceph $CEPH_CLI -s
+  $container_exec ceph $CEPH_CLI osd dump
+  $container_exec ceph $CEPH_CLI osd tree
+  $container_exec ceph $CEPH_CLI osd crush rule dump
   exit 1
 }
 
-wait_for_socket_in_docker() {
-  osd_mount_point=$(docker exec "$1" df --output=target | grep '/var/lib/ceph/osd/')
-  whoami=$(docker exec "$1" cat $osd_mount_point/whoami)
-  if ! docker exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then
+wait_for_socket_in_container() {
+  osd_mount_point=$({{ container_binary }} exec "$1" df --output=target | grep '/var/lib/ceph/osd/')
+  whoami=$({{ container_binary }} exec "$1" cat $osd_mount_point/whoami)
+  if ! {{ container_binary }} exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then
     echo "Timed out while trying to look for a Ceph OSD socket."
     echo "Abort mission!"
     exit 1
@@ -40,12 +40,12 @@ get_dev_name() {
   echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/'
 }
 
-get_docker_id_from_dev_name() {
+get_container_id_from_dev_name() {
   local id
   local count
   count=10
   while [ $count -ne 0 ]; do
-    id=$(docker ps -q -f "name=$1")
+    id=$({{ container_binary }} ps -q -f "name=$1")
     test "$id" != "" && break
     sleep $DELAY
     let count=count-1
@@ -53,9 +53,9 @@ get_docker_id_from_dev_name() {
   echo "$id"
 }
 
-get_docker_osd_id() {
-  wait_for_socket_in_docker $1
-  docker exec "$1" ls /var/run/ceph | cut -d'.' -f2
+get_container_osd_id() {
+  wait_for_socket_in_container $1
+  {{ container_binary }} exec "$1" ls /var/run/ceph | cut -d'.' -f2
 }
 
 # For containerized deployments, the unit file looks like: ceph-osd@sda.service
@@ -68,21 +68,21 @@ for unit in $(systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-
   # Wait and ensure the socket exists after restarting the daemon
   {% if containerized_deployment and osd_scenario != 'lvm' -%}
   id=$(get_dev_name "$unit")
-  container_id=$(get_docker_id_from_dev_name "$id")
-  wait_for_socket_in_docker "$container_id"
+  container_id=$(get_container_id_from_dev_name "$id")
+  wait_for_socket_in_container "$container_id"
   osd_id=$whoami
-  docker_exec="docker exec $container_id"
+  container_exec="{{ container_binary }} exec $container_id"
   {% elif containerized_deployment and osd_scenario == 'lvm' %}
   osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
-  container_id=$(get_docker_id_from_dev_name "ceph-osd-${osd_id}")
-  docker_exec="docker exec $container_id"
+  container_id=$(get_container_id_from_dev_name "ceph-osd-${osd_id}")
+  container_exec="{{ container_binary }} exec $container_id"
   {% else %}
   osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]+')
   {% endif %}
   SOCKET=/var/run/ceph/{{ cluster }}-osd.${osd_id}.asok
   while [ $COUNT -ne 0 ]; do
     RETRIES="{{ handler_health_osd_check_retries }}"
-    $docker_exec test -S "$SOCKET" && check_pgs && continue 2
+    $container_exec test -S "$SOCKET" && check_pgs && continue 2
     sleep $DELAY
     let COUNT=COUNT-1
   done
index bb6b58da96b7e323658c8a8ccb3fef5eb9479f7b..5fb23454a861488caa0d0ffd3552259d38fb401f 100644 (file)
@@ -4,7 +4,7 @@ RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
 DELAY="{{ handler_health_rbd_mirror_check_delay }}"
 RBD_MIRROR_NAME="{{ ansible_hostname }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
 {% endif %}
 
 # Backward compatibility
index 976b6cc3b81c7573760b5fe26abedfc88dc1ab21..4d970ffa8cde1ab16bf896fbbe42d43bf15324f9 100644 (file)
@@ -5,7 +5,7 @@ DELAY="{{ handler_health_rgw_check_delay }}"
 RGW_NAME="{{ ansible_hostname }}"
 RGW_PORT="{{ radosgw_frontend_port }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="docker exec ceph-rgw-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rgw-{{ ansible_hostname }}"
 {% endif %}
 # Backward compatibility
 $DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.{{ ansible_fqdn }}.asok
index 3d4a556fd41523ff1dfc34dd8c0dbca8cce2a2ad..2c278b30950bcf2fdc7ede3dfb2d5f5d9f61f648 100644 (file)
@@ -22,7 +22,7 @@
 
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when: containerized_deployment
 
index 2cc19231f4a05dff0fff83b42acca08a95e6ff67..a0fca974015f4d3aefbacbf76502138cb7159ced 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop rbd-target-api
-ExecStartPre=-/usr/bin/docker rm rbd-target-api
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-api
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-api
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
   --memory={{ ceph_rbd_target_api_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_rbd_target_api_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_rbd_target_api_docker_cpu_limit * 100000 }} \
@@ -24,7 +24,7 @@ ExecStart=/usr/bin/docker run --rm \
   -e CEPH_DAEMON=RBD_TARGET_API \
   --name=rbd-target-api \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop rbd-target-api
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-api
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index 6729884989110033675a35aa474363e099ecd82d..f95e915573f73fe519619a6be18624a821803493 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop rbd-target-gw
-ExecStartPre=-/usr/bin/docker rm rbd-target-gw
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop rbd-target-gw
+ExecStartPre=-/usr/bin/{{ container_binary }} rm rbd-target-gw
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
   --memory={{ ceph_rbd_target_gw_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_rbd_target_gw_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_rbd_target_gw_docker_cpu_limit * 100000 }} \
@@ -24,7 +24,7 @@ ExecStart=/usr/bin/docker run --rm \
   -e CEPH_DAEMON=RBD_TARGET_GW \
   --name=rbd-target-gw \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop rbd-target-gw
+ExecStopPost=-/usr/bin/{{ container_binary }} stop rbd-target-gw
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index bbaf58ee798bc63ff8d268963ed4b43235858684..42441836de2d1441ee49c6f1ec9fef682e9f16c7 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop tcmu-runner
-ExecStartPre=-/usr/bin/docker rm tcmu-runner
-ExecStart=/usr/bin/docker run --rm \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop tcmu-runner
+ExecStartPre=-/usr/bin/{{ container_binary }} rm tcmu-runner
+ExecStart=/usr/bin/{{ container_binary }} run --rm \
   --memory={{ ceph_tcmu_runner_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_tcmu_runner_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_tcmu_runner_docker_cpu_limit * 100000 }} \
@@ -24,7 +24,7 @@ ExecStart=/usr/bin/docker run --rm \
   -e CEPH_DAEMON=TCMU_RUNNER \
   --name=tcmu-runner \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop tcmu-runner
+ExecStopPost=-/usr/bin/{{ container_binary }} stop tcmu-runner
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index f990013685994fee336d1bff8e98816ba6e1fff8..944010bca57438f377598516ec59d68136db6648 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd mds
   set_fact:
-    docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
 
 - name: set_fact admin_keyring
   set_fact:
index a63297199eff842a15d692933993804de8f7aa00..9a6056695c112b4aded08c2d04072a762f22d5e4 100644 (file)
@@ -6,7 +6,7 @@
 
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
   when:
     - containerized_deployment
 
index 9e3dcfb5bf0c4bd018fbe92146abaf458ad570c4..c3cd5485c0901893579ec545ab15d9b768d1b5aa 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-mds-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   --memory={{ ceph_mds_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_mds_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_mds_docker_cpu_limit * 100000 }} \
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
   {{ ceph_mds_docker_extra_env }} \
   --name=ceph-mds-{{ ansible_hostname }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-mds-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index 214a8efeff65442a27808a99b787e73185ddf6f7..80e269d9e7f2d3a3443ab0f26cce05ab718f5718 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd_mgr: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    docker_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
   when:
     - containerized_deployment
 
index 766290e2567ee0674eae091eb5f70080bfaf6ed3..78a1426277a6fd97e161a34040fb2879e2f42ccb 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-mgr-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   --memory={{ ceph_mgr_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_mgr_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_mgr_docker_cpu_limit * 100000 }} \
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
   {{ ceph_mgr_docker_extra_env }} \
   --name=ceph-mgr-{{ ansible_hostname }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-mgr-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index 4aa0b27391674fc62719b0542cbfeb996885527e..f25ff84590a992d1ba8b63a377e315327982a9d9 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
   when:
     - containerized_deployment
 
index 608c0b70f7d889addf2ed55ced38ad2504849127..de28fdabbfcee8302cb644f580b9d396919297c0 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker rm ceph-mon-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mon-%i
 ExecStartPre=/bin/sh -c '"$(command -v mkdir)" -p /etc/ceph /var/lib/ceph/mon'
-ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
+ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
   --memory={{ ceph_mon_docker_memory_limit }} \
-{% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+{% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_mon_docker_cpu_limit }} \
 {% else -%}
   --cpu-quota={{ ceph_mon_docker_cpu_limit * 100000 }} \
@@ -37,7 +37,7 @@ ExecStart=/usr/bin/docker run --rm --name ceph-mon-%i \
   -e CEPH_DAEMON=MON \
   {{ ceph_mon_docker_extra_env }} \
   {{ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStop=-/usr/bin/docker stop ceph-mon-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-mon-%i
 ExecStopPost=-/bin/rm -f /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
 Restart=always
 RestartSec=10s
index 2df8479085b582ca4e96613c7420b93f56d668fc..6f48c411f3dc7d5568b0c451bcd4eca1dd21c262 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd_nfs
   set_fact:
-    docker_exec_cmd_nfs: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
   when:
     - containerized_deployment
 
index 83482d35e5d385fe6ece37106e93d1e34ca4c6d9..b68eb26a1f71cf4d65316efb1d630248c266f300 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-nfs-{{ ansible_hostname }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
   when:
     - containerized_deployment
 
index 11ec5e76359159b0e35ce3e414d6e33af75baf7f..8a0a7d507e4b400d8b45f41b32ac1c493391a5c4 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd_nfs
   set_fact:
-    docker_exec_cmd_nfs: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    docker_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
   when:
     - containerized_deployment
 
index 77acbee90135517197d06adf0a9d72d017088e11..b77ab47109ba482ccdce570868e248247d87f8bb 100644 (file)
@@ -5,9 +5,9 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker rm ceph-nfs-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
 ExecStartPre=/usr/bin/mkdir -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   {% if not containerized_deployment_with_kv -%}
   -v /var/lib/ceph:/var/lib/ceph:z \
   -v /etc/ceph:/etc/ceph:z \
@@ -29,7 +29,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
   {{ ceph_nfs_docker_extra_env }} \
   --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-nfs-%i
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-nfs-%i
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index 5c99309cff4b382f867a25205e35ef5e5b71c819..e1a79133691e64cea4b7d9cc63576629658744a7 100644 (file)
@@ -4,7 +4,7 @@
 # starting the next task
 - name: prepare ceph containerized osd disk collocated
   command: |
-    docker run --net=host \
+    {{ container_binary }} run --net=host \
     --pid=host \
     --privileged=true \
     --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
@@ -29,7 +29,7 @@
 
 - name: automatic prepare ceph containerized osd disk collocated
   command: |
-    docker run --net=host \
+    {{ container_binary }} run --net=host \
     --pid=host \
     --privileged=true \
     --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.split('/')[-1] }} \
index 3e759ce532edf2b2c9575f284222f1e8c4c93109..07f5878409bade9b1b9daf82a95dadce7354133b 100644 (file)
@@ -17,5 +17,6 @@
   environment:
     CEPH_VOLUME_DEBUG: 1
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
   with_items: "{{ lvm_volumes }}"
   tags: prepare_osd
\ No newline at end of file
index aa9a862beea316cc779dc93c9cbe34312adfd650..303a5558ea2f975d4da387524e098c514f857b2a 100644 (file)
@@ -4,7 +4,7 @@
 # starting the next task
 - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated
   command: |
-    docker run --net=host \
+    {{ container_binary }} run --net=host \
     --pid=host \
     --privileged=true \
     --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
@@ -31,7 +31,7 @@
 
 - name: prepare ceph "{{ osd_objectstore }}" containerized osd disk(s) non-collocated with a dedicated device for db and wal
   command: |
-    docker run --net=host \
+    {{ container_binary }} run --net=host \
     --pid=host \
     --privileged=true \
     --name=ceph-osd-prepare-{{ ansible_hostname }}-{{ item.1 | regex_replace('/dev/', '') }} \
index 7ff0fdb4919850f6c3d778daa15b09fbdfb8a6d1..393fe2043da0cb37bba4c32ebabc148439651958 100644 (file)
@@ -19,6 +19,7 @@
       owner: "root"
       group: "root"
       mode: "0744"
+      setype: "bin_t"
     notify:
       - restart ceph osds
   when:
@@ -26,7 +27,7 @@
 
 - name: set_fact docker_exec_start_osd
   set_fact:
-    docker_exec_start_osd: "{{ 'docker run --rm --privileged=true -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /var/run/udev/:/var/run/udev/:z -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
+    docker_exec_start_osd: "{{ '{{ container_binary }} run --rm --privileged=true -v /var/run/udev/:/var/run/udev/:z -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket -v /etc/ceph:/etc/ceph:z -v /dev:/dev --entrypoint=ceph-volume ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else 'ceph-volume' }}"
 
 - name: collect osd ids
   shell: >
index ad55ea38cfb68b8ee733140adcfad2120fcacff4..e95eefb66455fd0156100eb05442eab27bb47f26 100644 (file)
@@ -13,21 +13,21 @@ DOCKER_ENV=""
 # FUNCTIONS #
 #############
 function expose_partitions () {
-DOCKER_ENV=$(docker run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
+DOCKER_ENV=$({{ container_binary }} run --rm --net=host --name expose_partitions_${1} --privileged=true -v /dev/:/dev/ -v /etc/ceph:/etc/ceph:z -e CLUSTER={{ cluster }} -e OSD_DEVICE=/dev/${1} {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} disk_list)
 }
 {% else -%}
 # NOTE(leseb): maintains backwards compatibility with old ceph-docker Jewel images
 # Jewel images prior to https://github.com/ceph/ceph-docker/pull/797
 REGEX="[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
 function expose_partitions {
-  if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
+  if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}; then
     if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
-      docker logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
+      {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-devdev${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log
     fi
   fi
-  if docker ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
+  if {{ container_binary }} ps -a | grep -sq ceph-osd-prepare-{{ ansible_hostname }}-${1}; then
     if [[ ! -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log ]]; then
-      docker logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
+      {{ container_binary }} logs ceph-osd-prepare-{{ ansible_hostname }}-${1} &> {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-${1}.log
     fi
   fi
   if [[ -f {{ ceph_osd_docker_run_script_path }}/ceph-osd-prepare-{{ ansible_hostname }}-devdev${1}.log ]]; then
@@ -42,7 +42,7 @@ function expose_partitions {
     # NOTE(leseb): if we arrive here this probably means we just switched from non-containers to containers.
     # This is tricky as we don't have any info on the type of OSD, this is 'only' a problem for non-collocated scenarios
     # We can't assume that the 'ceph' is still present so calling Docker exec instead
-    part=$(docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
+    part=$({{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} list /dev/${1} | awk '/journal / {print $1}')
     DOCKER_ENV="-e OSD_JOURNAL=$part"
   fi
   # if empty, the previous command didn't find anything so we fail
@@ -62,13 +62,13 @@ expose_partitions "$1"
 # MAIN #
 ########
 
-/usr/bin/docker run \
+/usr/bin/{{ container_binary }} run \
   --rm \
   --net=host \
   --privileged=true \
   --pid=host \
   --memory={{ ceph_osd_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_osd_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_osd_docker_cpu_limit * 100000 }} \
index d7297e0e805e0cd3160cdd7e52086b93ac78ed2d..fc46baf11d966beb95660faef6b1141eb6b11dca 100644 (file)
@@ -6,17 +6,17 @@ After=docker.service
 [Service]
 EnvironmentFile=-/etc/environment
 {% if osd_scenario == 'lvm' -%}
-ExecStartPre=-/usr/bin/docker stop ceph-osd-%i
-ExecStartPre=-/usr/bin/docker rm -f ceph-osd-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-%i
 {% else %}
-ExecStartPre=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i
-ExecStartPre=-/usr/bin/docker rm -f ceph-osd-{{ ansible_hostname }}-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
+ExecStartPre=-/usr/bin/{{ container_binary }} rm -f ceph-osd-{{ ansible_hostname }}-%i
 {% endif -%}
 ExecStart={{ ceph_osd_docker_run_script_path }}/ceph-osd-run.sh %i
 {% if osd_scenario == 'lvm' -%}
-ExecStop=-/usr/bin/docker stop ceph-osd-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-%i
 {% else %}
-ExecStop=-/usr/bin/docker stop ceph-osd-{{ ansible_hostname }}-%i
+ExecStop=-/usr/bin/{{ container_binary }} stop ceph-osd-{{ ansible_hostname }}-%i
 {% endif -%}
 Restart=always
 RestartSec=10s
index 82a33975ddbc1405ee9aa4de6baf9b08af8ab2fe..ccd401571180dd15a9f781cf9fc03c5e22aea15c 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact docker_exec_cmd
   set_fact:
-    docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
+    docker_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
   when:
     - containerized_deployment
 
index cd853f77557f70d48cab086e7d82ed7bee736c45..8bdf1e3241c531b34e9d975bece259c78ce5125e 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-rbd-mirror-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   --memory={{ ceph_rbd_mirror_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=') or is_podman -%}
   --cpus={{ ceph_rbd_mirror_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_rbd_mirror_docker_cpu_limit * 100000 }} \
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
   --name=ceph-rbd-mirror-{{ ansible_hostname }} \
   {{ ceph_rbd_mirror_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index 98b6344d3f661b4bfef92226fd8e10e0ade0c6e5..dfd4d4425ad308d0c229123b94b412fac3633e2c 100644 (file)
@@ -4,11 +4,11 @@ After=docker.service
 
 [Service]
 EnvironmentFile=-/etc/environment
-ExecStartPre=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
-ExecStartPre=-/usr/bin/docker rm ceph-rgw-{{ ansible_hostname }}
-ExecStart=/usr/bin/docker run --rm --net=host \
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}
+ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   --memory={{ ceph_rgw_docker_memory_limit }} \
-  {% if ceph_docker_version.split('.')[0] is version_compare('13', '>=') -%}
+  {% if (ceph_docker_version is defined and ceph_docker_version.split('.')[0] is version_compare('13', '>=')) or is_podman -%}
   --cpus={{ ceph_rgw_docker_cpu_limit }} \
   {% else -%}
   --cpu-quota={{ ceph_rgw_docker_cpu_limit * 100000 }} \
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/docker run --rm --net=host \
   --name=ceph-rgw-{{ ansible_hostname }} \
   {{ ceph_rgw_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
-ExecStopPost=-/usr/bin/docker stop ceph-rgw-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}
 Restart=always
 RestartSec=10s
 TimeoutStartSec=120
index bcce73501ed43ca15d034c907439169b90ac6497..704cb3e6d11d60416ef5db0ea28aa3fc8d17cd40 100644 (file)
@@ -82,9 +82,9 @@
   block:
     - name: fail on unsupported distribution for iscsi gateways
       fail:
-        msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux or CentOS"
+        msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
       when:
-        - ansible_distribution not in ['RedHat', 'CentOS']
+        - ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
 
     - name: fail on unsupported distribution version for iscsi gateways
       fail:
deleted file mode 120000 (symlink)
index 16aae02fc2f1cd990e31d5b0e6fd5ffa578eff0e..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-site-container.yml.sample
\ No newline at end of file
new file mode 100644 (file)
index 0000000000000000000000000000000000000000..a4e2102816a74ca15dc45c878763b5e599ddb05e
--- /dev/null
@@ -0,0 +1,489 @@
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+  - mons
+  - agents
+  - osds
+  - mdss
+  - rgws
+  - nfss
+  - rbdmirrors
+  - clients
+  - iscsigws
+  - iscsi-gws # for backward compatibility only!
+  - mgrs
+
+  gather_facts: false
+  become: True
+  any_errors_fatal: true
+
+  vars:
+    delegate_facts_host: True
+
+  tasks:
+    # pre-tasks for following import -
+    - name: gather facts
+      setup:
+      when:
+        - not delegate_facts_host | bool
+
+    - name: gather and delegate facts
+      setup:
+      delegate_to: "{{ item }}"
+      delegate_facts: True
+      with_items: "{{ groups['all'] }}"
+      run_once: true
+      when:
+        - delegate_facts_host | bool
+
+    - name: check if it is atomic host
+      stat:
+        path: /run/ostree-booted
+      register: stat_ostree
+      tags:
+        - always
+
+    - name: set_fact is_atomic
+      set_fact:
+        is_atomic: '{{ stat_ostree.stat.exists }}'
+      tags:
+        - always
+
+    - name: check if podman binary is present
+      stat:
+        path: /usr/bin/podman
+      register: podman_binary
+      tags:
+        - always
+
+    - name: set_fact is_podman
+      set_fact:
+        is_podman: "{{ podman_binary.stat.exists }}"
+      when: is_atomic
+      tags:
+        - always
+
+    - name: set_fact container_binary
+      set_fact:
+        container_binary: "{{ 'podman' if is_atomic and is_podman else 'docker' }}"
+      tags:
+        - always
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: [with_pkg, fetch_container_image]
+    - import_role:
+        name: ceph-validate
+        private: false
+    - import_role:
+        name: ceph-infra
+        private: false
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+      tags: [with_pkg, fetch_container_image]
+      when:
+        - not (is_atomic | bool)
+        - (not (inventory_hostname in groups.get('clients', [])) or (inventory_hostname == groups.get('clients', [''])|first))
+
+
+    # post-tasks for upcoming import -
+    - name: "pull {{ ceph_docker_image }} image"
+      command: "{{ container_binary }} pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+      changed_when: false
+      when:
+        - is_atomic
+        - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
+        - (not (inventory_hostname in groups.get('clients', [])) or (inventory_hostname == groups.get('clients', [''])|first))
+
+- hosts: mons
+  tasks:
+    - name: set ceph monitor install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mon:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+  become: True
+  gather_facts: false
+  tasks:
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-mon
+        private: false
+  serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS
+
+- hosts: mons
+  tasks:
+    - name: set ceph monitor install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mon:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mgrs
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph manager install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mgr:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: fals
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-mgr
+        private: false
+
+    # post-tasks for upcoming imports -
+    - name: set ceph manager install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mgr:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: osds
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for upcoming imports -
+    - name: set ceph osd install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_osd:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-osd
+        private: false
+
+    # post-tasks for preceding imports -
+    - name: set ceph osd install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_osd:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mdss
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph mds install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mds:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-mds
+        private: false
+
+    # post-tasks for preceding imports -
+    - name: set ceph mds install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_mds:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rgws
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph rgw install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_rgw:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-rgw
+        private: false
+
+    # post-tasks for preceding imports -
+    - name: set ceph rgw install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_rgw:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: nfss
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph nfs install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-nfs
+        private: false
+
+    # post-tasks for following imports -
+    - name: set ceph nfs install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_nfs:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: rbdmirrors
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph rbd mirror install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_rbdmirror:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-rbd-mirror
+        private: false
+
+    # post-tasks for preceding imports -
+    - name: set ceph rbd mirror install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_rbdmirror:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: clients
+  become: True
+  gather_facts: false
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph client install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_client:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+      when:
+        - inventory_hostname == groups.get('clients', ['']) | first
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-client
+        private: false
+
+    # post-tasks for preceding imports -
+    - name: set ceph client install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_client:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts:
+    - iscsigws
+    - iscsi-gws # for backward compatibility only!
+  gather_facts: false
+  become: True
+  tasks:
+    # pre-tasks for following imports -
+    - name: set ceph iscsi gateway install 'In Progress'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_iscsi_gw:
+            status: "In Progress"
+            start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+    - import_role:
+        name: ceph-defaults
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-handler
+        private: false
+    - import_role:
+        name: ceph-container-common
+        private: false
+    - import_role:
+        name: ceph-config
+        private: false
+      tags: ['ceph_update_config']
+    - import_role:
+        name: ceph-iscsi-gw
+        private: false
+
+    # post-tasks for preceding imports -
+  post_tasks:
+    - name: set ceph iscsi gw install 'Complete'
+      run_once: true
+      set_stats:
+        data:
+          installer_phase_ceph_iscsi_gw:
+            status: "Complete"
+            end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- hosts: mons
+  gather_facts: false
+  become: True
+  tasks:
+    - name: get ceph status from the first monitor
+      command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+      register: ceph_status
+      changed_when: false
+      delegate_to: "{{ groups['mons'][0] }}"
+      run_once: true
+      ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
+
+    - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+      debug:
+        msg: "{{ ceph_status.stdout_lines }}"
+      delegate_to: "{{ groups['mons'][0] }}"
+      run_once: true
+      when: not ceph_status.failed
diff --git a/tests/functional/fedora/29/container-podman/Vagrantfile b/tests/functional/fedora/29/container-podman/Vagrantfile
new file mode 120000 (symlink)
index 0000000..dfd7436
--- /dev/null
@@ -0,0 +1 @@
+../../../../../Vagrantfile
\ No newline at end of file
diff --git a/tests/functional/fedora/29/container-podman/ceph-override.json b/tests/functional/fedora/29/container-podman/ceph-override.json
new file mode 120000 (symlink)
index 0000000..0c30b3f
--- /dev/null
@@ -0,0 +1 @@
+../../../centos/7/cluster/ceph-override.json
\ No newline at end of file
diff --git a/tests/functional/fedora/29/container-podman/group_vars/all b/tests/functional/fedora/29/container-podman/group_vars/all
new file mode 100644 (file)
index 0000000..ec741bf
--- /dev/null
@@ -0,0 +1,39 @@
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+monitor_interface: eth1
+radosgw_interface: eth1
+ceph_mon_docker_subnet: "{{ public_network }}"
+ceph_docker_on_openstack: False
+public_network: "192.168.30.0/24"
+cluster_network: "192.168.31.0/24"
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+  global:
+    osd_pool_default_size: 1
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+  size: ""
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+  size: ""
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
diff --git a/tests/functional/fedora/29/container-podman/group_vars/clients b/tests/functional/fedora/29/container-podman/group_vars/clients
new file mode 100644 (file)
index 0000000..cbd665c
--- /dev/null
@@ -0,0 +1,22 @@
+---
+user_config: True
+copy_admin_key: True
+test:
+  name: "test"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+test2:
+  name: "test2"
+  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  rule_name: "HDD"
+  type: 1
+  erasure_profile: ""
+  expected_num_objects: ""
+pools:
+  - "{{ test }}"
+  - "{{ test2 }}"
diff --git a/tests/functional/fedora/29/container-podman/group_vars/iscsigws b/tests/functional/fedora/29/container-podman/group_vars/iscsigws
new file mode 100644 (file)
index 0000000..401805e
--- /dev/null
@@ -0,0 +1,3 @@
+---
+gateway_ip_list: 192.168.1.90
+generate_crt: True
\ No newline at end of file
diff --git a/tests/functional/fedora/29/container-podman/group_vars/mons b/tests/functional/fedora/29/container-podman/group_vars/mons
new file mode 100644 (file)
index 0000000..4b54059
--- /dev/null
@@ -0,0 +1,10 @@
+---
+create_crush_tree: True
+crush_rule_config: True
+crush_rule_hdd:
+  name: HDD
+  root: HDD
+  type: host
+  default: true
+crush_rules:
+  - "{{ crush_rule_hdd }}"
diff --git a/tests/functional/fedora/29/container-podman/group_vars/osds b/tests/functional/fedora/29/container-podman/group_vars/osds
new file mode 100644 (file)
index 0000000..672a0f9
--- /dev/null
@@ -0,0 +1,11 @@
+---
+ceph_osd_docker_run_script_path: /var/tmp
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+lvm_volumes:
+  - data: data-lv1
+    data_vg: test_group
+  - data: data-lv2
+    data_vg: test_group
+    db: journal1
+    db_vg: journals
\ No newline at end of file
diff --git a/tests/functional/fedora/29/container-podman/group_vars/rgws b/tests/functional/fedora/29/container-podman/group_vars/rgws
new file mode 100644 (file)
index 0000000..8f2a9a3
--- /dev/null
@@ -0,0 +1,7 @@
+---
+copy_admin_key: True
+rgw_create_pools:
+  foo:
+    pg_num: 17
+  bar:
+    pg_num: 19
diff --git a/tests/functional/fedora/29/container-podman/hosts b/tests/functional/fedora/29/container-podman/hosts
new file mode 100644 (file)
index 0000000..ed6bf60
--- /dev/null
@@ -0,0 +1,33 @@
+[mons]
+mon0
+mon1
+mon2
+
+[mgrs]
+mgr0
+
+[osds]
+osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
+osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
+
+[mdss]
+mds0
+
+[rgws]
+rgw0
+
+[nfss]
+nfs0
+
+[clients]
+client0
+client1
+
+[rbdmirrors]
+rbd-mirror0
+
+[iscsigws]
+iscsi-gw0
+
+[all:vars]
+ansible_python_interpreter=/usr/bin/python3
\ No newline at end of file
diff --git a/tests/functional/fedora/29/container-podman/vagrant_variables.yml b/tests/functional/fedora/29/container-podman/vagrant_variables.yml
new file mode 100644 (file)
index 0000000..3952026
--- /dev/null
@@ -0,0 +1,32 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 2
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 1
+rbd_mirror_vms: 1
+client_vms: 2
+iscsi_gw_vms: 1
+mgr_vms: 1
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.30
+cluster_subnet: 192.168.31
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+vagrant_box: fedora/29-atomic-host
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+vagrant_sync_dir: /vagrant
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
diff --git a/tests/functional/simulate_rhel8.yml b/tests/functional/simulate_rhel8.yml
new file mode 100644 (file)
index 0000000..05f029a
--- /dev/null
@@ -0,0 +1,14 @@
+- hosts: all
+  gather_facts: true
+  become: yes
+  tasks:
+    - name: unlock /usr
+      command: ostree admin unlock --hotfix
+      changed_when: false
+      when: ansible_distribution == 'Fedora'
+
+    - name: remove docker binary on fedora to simulate rhel8
+      file:
+        path: /usr/bin/docker
+        state: absent
+      when: ansible_distribution == 'Fedora'
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index 3b8208274f2473474b3118c5f59b553b39457c0b..640cdb889ed6fdc32e793623f0ca895213d4a951 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,cluster,docker_cluster,update_cluster,update_docker_cluster,switch_to_containers,ooo_collocation,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,lvm_batch,lvm_osds_container,lvm_batch_container,infra_lv_create,add_osds,add_osds_container,rgw_multisite,rgw_multisite_container}
+envlist = {dev,luminous,mimic,rhcs}-{xenial_cluster,centos7_cluster,cluster,docker_cluster,update_cluster,update_docker_cluster,switch_to_containers,ooo_collocation,bluestore_lvm_osds,bluestore_lvm_osds_container,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,shrink_mon_container,shrink_osd_container,docker_cluster_collocation,lvm_batch,lvm_osds_container,lvm_batch_container,infra_lv_create,add_osds,add_osds_container,rgw_multisite,rgw_multisite_container,container_podman}
   infra_lv_create
 
 skipsdist = True
@@ -213,6 +213,7 @@ setenv=
   shrink_osd_container: PLAYBOOK = site-docker.yml.sample
   shrink_osd_container: COPY_ADMIN_KEY = True
   shrink_osd: COPY_ADMIN_KEY = True
+  container_podman: PLAYBOOK = site-docker.yml.sample
 
   rhcs: CEPH_STABLE_RELEASE = luminous
   luminous: CEPH_STABLE_RELEASE = luminous
@@ -273,6 +274,7 @@ changedir=
   add_osds_container: {toxinidir}/tests/functional/centos/7/add-osds-container
   rgw_multisite: {toxinidir}/tests/functional/centos/7/rgw-multisite
   rgw_multisite_container: {toxinidir}/tests/functional/centos/7/rgw-multisite-container
+  container_podman: {toxinidir}/tests/functional/fedora/29/container-podman
 
 commands=
   rhcs: ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "change_dir={changedir}" --tags "vagrant_setup"
@@ -299,6 +301,8 @@ commands=
   purge_cluster_non_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
   purge_cluster_container: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
   switch_to_containers: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/lvm_setup.yml
+  container_podman: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/simulate_rhel8.yml
 
   rhcs: ansible-playbook -vv -i {changedir}/hosts {toxinidir}/tests/functional/rhcs_setup.yml --extra-vars "ceph_docker_registry={env:CEPH_DOCKER_REGISTRY:docker.io} repo_url={env:REPO_URL:} rhel7_repo_url={env:RHEL7_REPO_URL:}" --skip-tags "vagrant_setup"