--- /dev/null
+---
+- name: restart ceph container mon daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
+ listen: "restart ceph mons container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mon_group_name in group_names
+ - ceph_mon_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(mon_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_mon_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[mon_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container osds daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
+ listen: "restart ceph osds container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
+ # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
+ - osd_group_name in group_names
+ - ceph_osd_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(osd_group_name) | last
+ - ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0)
+ - handler_health_osd_check
+ # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
+ - inventory_hostname in play_hosts
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[osd_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container mds daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
+ listen: "restart ceph mdss container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mds_group_name in group_names
+ - ceph_mds_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(mds_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_mds_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[mds_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container rgw daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
+ listen: "restart ceph rgws container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - rgw_group_name in group_names
+ - ceph_rgw_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(rgw_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[rgw_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container nfs daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
+ listen: "restart ceph nfss container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - nfs_group_name in group_names
+ - ceph_nfs_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(nfs_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[nfs_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container rbd mirror daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
+ listen: "restart ceph rbdmirrors container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - rbdmirror_group_name in group_names
+ - ceph_rbd_mirror_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(rbdmirror_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[rbdmirror_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
+
+- name: restart ceph container mgr daemon(s) because new image
+ command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
+ listen: "restart ceph mgrs container"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mgr_group_name in group_names
+ - ceph_mgr_container_stat.get('rc') == 0
+ - inventory_hostname == groups.get(mgr_group_name) | last
+ - inventory_hostname in play_hosts
+ - ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
+ - container_inspect_before_pull.rc == 0
+ - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+ with_items: "{{ groups[mgr_group_name] }}"
+ run_once: true
+ delegate_to: "{{ item }}"
--- /dev/null
+---
+- name: check for a mon container
+ command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+ register: ceph_mon_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check for an osd container
+ command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
+ register: ceph_osd_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check for a mds container
+ command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+ register: ceph_mds_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check for a rgw container
+ command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+ register: ceph_rgw_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check for a mgr container
+ command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+ register: ceph_mgr_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check for a rbd mirror container
+ command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+ register: ceph_rbd_mirror_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check for a nfs container
+ command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
+ register: ceph_nfs_container_stat
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
---
-# Normal case - pull image from registry
-- name: "pull {{ ceph_docker_image }} image"
+# NOTE(leseb): using failed_when to handle the case when the image is not present yet
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image before pulling"
+ command: "docker inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ changed_when: false
+ failed_when: false
+ register: container_inspect_before_pull
+
+- name: "pulling {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
command: "timeout {{ docker_pull_timeout }} docker pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: docker_image
when:
- (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
-# Dev case - export local dev image and send it across
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
+ command: "docker inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ failed_when: false
+ register: container_inspect_after_pull
+ notify:
+ - restart ceph mons container
+ - restart ceph osds container
+ - restart ceph mdss container
+ - restart ceph rgws container
+ - restart ceph mgrs container
+ - restart ceph rbdmirrors container
+
- name: export local ceph dev image
local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
when: