Update each role's task to use the respective role's username, image
name, and image tag to check if a container is already running. This was
causing false failures because we were not matching any running
containers and subsequently running checks.yml to check the status of
cluster files being left behind.
Signed-off-by: Ivan Font <ivan.font@redhat.com>
---
- name: check if a cluster is already running
- shell: "docker ps | grep -sq 'ceph/daemon'"
+ shell: "docker ps | grep -sq '{{ceph_mds_docker_username}}/{{ceph_mds_docker_imagename}}:{{ceph_mds_docker_image_tag}}'"
register: ceph_health
changed_when: false
failed_when: false
---
- name: check if a cluster is already running
- shell: "docker ps | grep -sq 'ceph/daemon'"
+ shell: "docker ps | grep -sq '{{ceph_osd_docker_username}}/{{ceph_osd_docker_imagename}}:{{ceph_osd_docker_image_tag}}'"
register: ceph_health
changed_when: false
failed_when: false
---
- name: check if a cluster is already running
- shell: "docker ps | grep -sq 'ceph/daemon'"
+ shell: "docker ps | grep -sq '{{ceph_rbd_mirror_docker_username}}/{{ceph_rbd_mirror_docker_imagename}}:{{ceph_rbd_mirror_docker_image_tag}}'"
register: ceph_health
changed_when: false
failed_when: false
---
- name: check if a cluster is already running
- shell: "docker ps | grep -sq 'ceph/daemon'"
+ shell: "docker ps | grep -sq '{{ceph_rgw_docker_username}}/{{ceph_rgw_docker_imagename}}:{{ceph_rgw_docker_image_tag}}'"
register: ceph_health
changed_when: false
failed_when: false