- also add `--foreground` which seems to fix some issue we are facing when
using timeout with `podman`.
- use this fact in the `is ceph running already?` task.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
- nfs_group_name in group_names
- ceph_nfs_container_inspect_before_pull.get('rc') == 0
-- name: set_fact timeout_command
- set_fact:
- timeout_command: "{{ 'timeout -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') else '' }}"
- when:
- - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
-
- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
ceph_docker_on_openstack: false
containerized_deployment: False
container_binary:
-
+timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
# this is only here for usage with the rolling_update.yml playbook
# because it blindly picks a mon, which may be down because
# of the rolling update
- name: is ceph running already?
- command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
+ command: "{{ timeout_command }} {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
changed_when: false
failed_when: false
check_mode: no