]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
container: restart container when there is a new image
authorSébastien Han <seb@redhat.com>
Fri, 15 Dec 2017 18:43:23 +0000 (19:43 +0100)
committerSébastien Han <seb@redhat.com>
Wed, 10 Jan 2018 15:46:42 +0000 (16:46 +0100)
This wasn't any good choice to implement this.
We had several options and none of them were ideal since handlers can
not be triggered cross-roles.
We could have achieved that by doing:

* option 1 was to add a dependancy in the meta of the ceph-docker-common
role. We had that long ago and we decided to stop so everything is
managed via site.yml

* option 2 was to import files from another role. This is messy and we
don't that anywhere in the current code base. We will continue to do so.

There is option 3 where we pull the image from the ceph-config role.
This is not suitable as well since the docker command won't be available
unless you run Atomic distro. This would also mean that you're trying to
pull twice. First time in ceph-config, second time in ceph-docker-common

The only option I came up with was to duplicate a bit of the ceph-config
handlers code.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1526513
Signed-off-by: Sébastien Han <seb@redhat.com>
roles/ceph-docker-common/handlers/main.yml [new file with mode: 0644]
roles/ceph-docker-common/tasks/check_socket_container.yml [new file with mode: 0644]
roles/ceph-docker-common/tasks/fetch_image.yml
roles/ceph-docker-common/tasks/main.yml

diff --git a/roles/ceph-docker-common/handlers/main.yml b/roles/ceph-docker-common/handlers/main.yml
new file mode 100644 (file)
index 0000000..eac40e3
--- /dev/null
@@ -0,0 +1,115 @@
+---
+- name: restart ceph container mon daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
+  listen: "restart ceph mons container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - mon_group_name in group_names
+    - ceph_mon_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(mon_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_mon_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[mon_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container osds daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
+  listen: "restart ceph osds container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
+    # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
+    - osd_group_name in group_names
+    - ceph_osd_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(osd_group_name) | last
+    - ((crush_location is defined and crush_location) or ceph_osd_container_stat.get('stdout_lines', [])|length != 0)
+    - handler_health_osd_check
+    # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
+    - inventory_hostname in play_hosts
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[osd_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container mds daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_mds_daemon.sh
+  listen: "restart ceph mdss container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - mds_group_name in group_names
+    - ceph_mds_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(mds_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_mds_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[mds_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container rgw daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_rgw_daemon.sh
+  listen: "restart ceph rgws container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - rgw_group_name in group_names
+    - ceph_rgw_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(rgw_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[rgw_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container nfs daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_nfs_daemon.sh
+  listen: "restart ceph nfss container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - nfs_group_name in group_names
+    - ceph_nfs_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(nfs_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[nfs_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container rbd mirror daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_rbd_mirror_daemon.sh
+  listen: "restart ceph rbdmirrors container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - rbdmirror_group_name in group_names
+    - ceph_rbd_mirror_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(rbdmirror_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[rbdmirror_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
+
+- name: restart ceph container mgr daemon(s) because new image
+  command: /usr/bin/env bash /tmp/restart_mgr_daemon.sh
+  listen: "restart ceph mgrs container"
+  when:
+    # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - mgr_group_name in group_names
+    - ceph_mgr_container_stat.get('rc') == 0
+    - inventory_hostname == groups.get(mgr_group_name) | last
+    - inventory_hostname in play_hosts
+    - ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
+    - container_inspect_before_pull.rc == 0
+    - (container_inspect_before_pull.stdout | from_json)[0].RepoDigests[0] != (container_inspect_after_pull.stdout | from_json)[0].RepoDigests[0]
+  with_items: "{{ groups[mgr_group_name] }}"
+  run_once: true
+  delegate_to: "{{ item }}"
diff --git a/roles/ceph-docker-common/tasks/check_socket_container.yml b/roles/ceph-docker-common/tasks/check_socket_container.yml
new file mode 100644 (file)
index 0000000..7c945ab
--- /dev/null
@@ -0,0 +1,63 @@
+---
+- name: check for a mon container
+  command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+  register: ceph_mon_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check for an osd container
+  command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
+  register: ceph_osd_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check for a mds container
+  command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+  register: ceph_mds_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check for a rgw container
+  command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+  register: ceph_rgw_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check for a mgr container
+  command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+  register: ceph_mgr_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check for a rbd mirror container
+  command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+  register: ceph_rbd_mirror_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check for a nfs container
+  command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
+  register: ceph_nfs_container_stat
+  changed_when: false
+  failed_when: false
+  check_mode: no
+  when:
+    - inventory_hostname in groups.get(nfs_group_name, [])
index ad21ff9f56059af9c7713053d74ac1d8e353162e..49a8f0d2716c53f98b18476d22b0daa18990ac55 100644 (file)
@@ -1,6 +1,12 @@
 ---
-# Normal case - pull image from registry
-- name: "pull {{ ceph_docker_image }} image"
+# NOTE(leseb): using failed_when to handle the case when the image is not present yet
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image before pulling"
+  command: "docker inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+  changed_when: false
+  failed_when: false
+  register: container_inspect_before_pull
+
+- name: "pulling {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
   command: "timeout {{ docker_pull_timeout }} docker pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
   changed_when: false
   register: docker_image
   when:
     - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
 
-# Dev case - export local dev image and send it across
+- name: "inspecting {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
+  command: "docker inspect {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+  failed_when: false
+  register: container_inspect_after_pull
+  notify:
+    - restart ceph mons container
+    - restart ceph osds container
+    - restart ceph mdss container
+    - restart ceph rgws container
+    - restart ceph mgrs container
+    - restart ceph rbdmirrors container
+
 - name: export local ceph dev image
   local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
   when:
index 434c2d32433cea0a216bacce2df085fb17f437f3..5551a7a7b61dc4578ef6b43e92f54d2b9b524fce 100644 (file)
@@ -88,6 +88,9 @@
     - ansible_os_family == 'Debian'
     - ntp_service_enabled
 
+- name: include check_socket_container.yml
+  include: check_socket_container.yml
+  
 - name: include fetch_image.yml
   include: fetch_image.yml
   tags: