when:
- ansible_os_family == 'Debian'
-- block:
- - name: copy mon restart script
- template:
- src: restart_mon_daemon.sh.j2
- dest: /tmp/restart_mon_daemon.sh
- owner: root
- group: root
- mode: 0750
- listen: "restart ceph mons"
-
- - name: restart ceph mon daemon(s)
- command: /tmp/restart_mon_daemon.sh
- listen: "restart ceph mons"
+- name: copy mon restart script
+ template:
+ src: restart_mon_daemon.sh.j2
+ dest: /tmp/restart_mon_daemon.sh
+ owner: root
+ group: root
+ mode: 0750
+ listen: "restart ceph mons"
+ when:
+ - mon_group_name in group_names
+ - inventory_hostname in play_hosts
+
+- name: restart ceph mon daemon(s) - non container
+ command: /tmp/restart_mon_daemon.sh
+ listen: "restart ceph mons"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mon_group_name in group_names
+ - not containerized_deployment
- mon_socket_stat.rc == 0
+- name: restart ceph mon daemon(s) - container
+ command: /tmp/restart_mon_daemon.sh
+ listen: "restart ceph mons"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mon_group_name in group_names
+ - containerized_deployment
+ - ceph_mon_container_stat.stdout_lines|length != 0
+
# This does not just restart OSDs but everything else too. Unfortunately
# at this time the ansible role does not have an OSD id list to use
# for restarting them specifically.
- osd_group_name in group_names
- inventory_hostname in play_hosts
-- name: restart containerized ceph osds daemon(s)
+- name: restart ceph osds daemon(s) - non container
command: /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
- with_items: "{{ socket_osd_container_stat.results | default([]) }}"
when:
- # We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
- # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- osd_group_name in group_names
- - containerized_deployment
- - ((crush_location is defined and crush_location) or item.get('rc') == 0)
+ - not containerized_deployment
+ # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
+ # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
+ - ((crush_location is defined and crush_location) or osd_socket_stat.rc == 0)
+ - ceph_current_fsid.rc == 0
- handler_health_osd_check
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
- inventory_hostname in play_hosts
-- name: restart non-containerized ceph osds daemon(s)
+- name: restart ceph osds daemon(s) - container
command: /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
when:
- - osd_group_name in group_names
- - not containerized_deployment
- # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
+ # We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- - ((crush_location is defined and crush_location) or osd_socket_stat.rc == 0)
- - ceph_current_fsid.rc == 0
+ - osd_group_name in group_names
+ - containerized_deployment
+ - ((crush_location is defined and crush_location) or ceph_osd_container_stat.stdout_lines|length != 0)
- handler_health_osd_check
# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
- inventory_hostname in play_hosts
-- block:
- - name: copy mds restart script
- template:
- src: restart_mds_daemon.sh.j2
- dest: /tmp/restart_mds_daemon.sh
- owner: root
- group: root
- mode: 0750
- listen: "restart ceph mdss"
- when:
- - mds_group_name in group_names
- - inventory_hostname in play_hosts
-
- - name: restart ceph mds daemon(s)
- command: /tmp/restart_mds_daemon.sh
- listen: "restart ceph mdss"
+- name: copy mds restart script
+ template:
+ src: restart_mds_daemon.sh.j2
+ dest: /tmp/restart_mds_daemon.sh
+ owner: root
+ group: root
+ mode: 0750
+ listen: "restart ceph mdss"
+ when:
+ - mds_group_name in group_names
+ - inventory_hostname in play_hosts
+
+- name: restart ceph mds daemon(s) - non container
+ command: /tmp/restart_mds_daemon.sh
+ listen: "restart ceph mdss"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mds_group_name in group_names
+ - not containerized_deployment
- mds_socket_stat.rc == 0
+- name: restart ceph mds daemon(s) - container
+ command: /tmp/restart_mds_daemon.sh
+ listen: "restart ceph mdss"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mds_group_name in group_names
+ - containerized_deployment
+ - ceph_mds_container_stat.stdout_lines|length != 0
+
- name: copy rgw restart script
template:
src: restart_rgw_daemon.sh.j2
- rgw_group_name in group_names
- inventory_hostname in play_hosts
-- name: restart ceph rgw daemon(s)
+- name: restart ceph rgw daemon(s) - non container
command: /tmp/restart_rgw_daemon.sh
listen: "restart ceph rgws"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rgw_group_name in group_names
+ - not containerized_deployment
- rgw_socket_stat.rc == 0
+- name: restart ceph rgw daemon(s) - container
+ command: /tmp/restart_rgw_daemon.sh
+ listen: "restart ceph rgws"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - rgw_group_name in group_names
+ - containerized_deployment
+ - ceph_rgw_container_stat.stdout_lines|length != 0
+
- name: copy nfs restart script
template:
src: restart_nfs_daemon.sh.j2
- nfs_group_name in group_names
- inventory_hostname in play_hosts
-- name: restart ceph nfs daemon(s)
+- name: restart ceph nfs daemon(s) - non container
command: /tmp/restart_nfs_daemon.sh
listen: "restart ceph nfss"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- nfs_group_name in group_names
+ - not containerized_deployment
- nfs_socket_stat.rc == 0
+- name: restart ceph nfs daemon(s) - container
+ command: /tmp/restart_nfs_daemon.sh
+ listen: "restart ceph nfss"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - nfs_group_name in group_names
+ - containerized_deployment
+ - ceph_nfs_container_stat.stdout_lines|length != 0
+
- name: copy rbd mirror restart script
template:
src: restart_rbd_mirror_daemon.sh.j2
- rbdmirror_group_name in group_names
- inventory_hostname in play_hosts
-- name: restart ceph rbd mirror daemon(s)
+- name: restart ceph rbd mirror daemon(s) - non container
command: /tmp/restart_rbd_mirror_daemon.sh
listen: "restart ceph rbdmirrors"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rbdmirror_group_name in group_names
+ - not containerized_deployment
- rbd_mirror_socket_stat.rc == 0
+- name: restart ceph rbd mirror daemon(s) - container
+ command: /tmp/restart_rbd_mirror_daemon.sh
+ listen: "restart ceph rbdmirrors"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - rbdmirror_group_name in group_names
+ - containerized_deployment
+ - ceph_rbd_mirror_container_stat.stdout_lines|length != 0
+
- name: copy mgr restart script
template:
src: restart_mgr_daemon.sh.j2
- mgr_group_name in group_names
- inventory_hostname in play_hosts
-- name: restart ceph mgr daemon(s)
+- name: restart ceph mgr daemon(s) - non container
command: /tmp/restart_mgr_daemon.sh
listen: "restart ceph mgrs"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mgr_group_name in group_names
+ - not containerized_deployment
- mgr_socket_stat.rc == 0
+
+- name: restart ceph mgr daemon(s) - container
+ command: /tmp/restart_mgr_daemon.sh
+ listen: "restart ceph mgrs"
+ when:
+ # We do not want to run these checks on initial deployment (`socket.rc == 0`)
+ - mgr_group_name in group_names
+ - containerized_deployment
+ - ceph_mgr_container_stat.stdout_lines|length != 0
---
-# These checks are used to avoid running handlers at initial deployment.
-- name: set_fact docker_exec_cmd mon
- set_fact:
- docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
+- name: include check_socket_container.yml
+ include: check_socket_container.yml
when:
- - inventory_hostname in groups.get(mon_group_name, [])
- containerized_deployment
-- name: check for a ceph mon socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: mon_socket_stat
+- name: include check_socket_non_container.yml
+ include: check_socket_container.yml
when:
- - inventory_hostname in groups.get(mon_group_name, [])
-
-- name: check if the ceph mon socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mon_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: mon_socket
- when:
- - inventory_hostname in groups.get(mon_group_name, [])
- - mon_socket_stat.rc == 0
-
-- name: remove ceph mon socket if exists and not used by a process
- file:
- name: "{{ mon_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(mon_group_name, [])
- - not containerized_deployment
- - mon_socket_stat.rc == 0
- - mon_socket.rc != 0
-
-- name: check for a ceph osd socket
- shell: |
- stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
- changed_when: false
- failed_when: false
- always_run: true
- register: osd_socket_stat
- when:
- - inventory_hostname in groups.get(osd_group_name, [])
- - not containerized_deployment
-
-- name: check if the ceph osd socket is in-use
- shell: |
- fuser --silent {{ osd_socket_stat.stdout }}
- changed_when: false
- failed_when: false
- always_run: true
- register: osd_socket
- when:
- - inventory_hostname in groups.get(osd_group_name, [])
- - not containerized_deployment
- - osd_socket_stat.rc == 0
-
-- name: remove ceph osd socket if exists and not used by a process
- file:
- name: "{{ osd_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(osd_group_name, [])
- - not containerized_deployment
- - osd_socket_stat.rc == 0
- - osd_socket.rc != 0
-
-- name: set_fact docker_exec_cmd mds
- set_fact:
- docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
- - containerized_deployment
-
-- name: check for a ceph mds socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: mds_socket_stat
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
-
-- name: check if the ceph mds socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mds_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: mds_socket
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
- - mds_socket_stat.rc == 0
-
-- name: remove ceph mds socket if exists and not used by a process
- file:
- name: "{{ mds_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(mds_group_name, [])
- - not containerized_deployment
- - mds_socket_stat.rc == 0
- - mds_socket.rc != 0
-
-- name: set_fact docker_exec_cmd rgw
- set_fact:
- docker_exec_cmd: "docker exec ceph-rgw-{{ ansible_hostname }}"
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
- - containerized_deployment
-
-- name: check for a ceph rgw socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: rgw_socket_stat
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
-
-- name: check if the ceph rgw socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ rgw_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: rgw_socket
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
- - rgw_socket_stat.rc == 0
-
-- name: remove ceph rgw socket if exists and not used by a process
- file:
- name: "{{ rgw_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(rgw_group_name, [])
- - not containerized_deployment
- - rgw_socket_stat.rc == 0
- - rgw_socket.rc != 0
-
-- name: set_fact docker_exec_cmd mgr
- set_fact:
- docker_exec_cmd: "docker exec ceph-mgr-{{ ansible_hostname }}"
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
- containerized_deployment
-
-- name: check for a ceph mgr socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: mgr_socket_stat
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
-
-- name: check if the ceph mgr socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ mgr_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: mgr_socket
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
- - mgr_socket_stat.rc == 0
-
-- name: remove ceph mgr socket if exists and not used by a process
- file:
- name: "{{ mgr_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(mgr_group_name, [])
- - not containerized_deployment
- - mgr_socket_stat.rc == 0
- - mgr_socket.rc != 0
-
-- name: set_fact docker_exec_cmd rbd mirror
- set_fact:
- docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
- - containerized_deployment
-
-- name: check for a ceph rbd mirror socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: rbd_mirror_socket_stat
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
-
-- name: check if the ceph rbd mirror socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ rbd_mirror_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: rbd_mirror_socket
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
- - rbd_mirror_socket_stat.rc == 0
-
-- name: remove ceph rbd mirror socket if exists and not used by a process
- file:
- name: "{{ rbd_mirror_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(rbdmirror_group_name, [])
- - not containerized_deployment
- - rbd_mirror_socket_stat.rc == 0
- - rbd_mirror_socket.rc != 0
-
-- name: set_fact docker_exec_cmd nfs ganesha
- set_fact:
- docker_exec_cmd: "docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
- - containerized_deployment
-
-- name: check for a ceph nfs ganesha socket
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'stat --printf=%n /var/run/ganesha.pid'
- changed_when: false
- failed_when: false
- always_run: true
- register: nfs_socket_stat
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
-
-- name: check if the ceph nfs ganesha socket is in-use
- shell: |
- {{ docker_exec_cmd | default('') }} bash -c 'fuser --silent {{ nfs_socket_stat.stdout }}'
- changed_when: false
- failed_when: false
- always_run: true
- register: nfs_socket
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
- - nfs_socket_stat.rc == 0
-
-- name: remove ceph nfs ganesha socket if exists and not used by a process
- file:
- name: "{{ nfs_socket_stat.stdout }}"
- state: absent
- when:
- - inventory_hostname in groups.get(nfs_group_name, [])
- - not containerized_deployment
- - nfs_socket_stat.rc == 0
- - nfs_socket.rc != 0
-
-- name: check for a ceph socket in containerized deployment (osds)
- shell: |
- docker exec ceph-osd-"{{ ansible_hostname }}"-"{{ item | replace('/dev/', '') }}" bash -c 'stat --printf=%n /var/run/ceph/*.asok'
- changed_when: false
- failed_when: false
- always_run: true
- register: socket_osd_container_stat
- with_items: "{{ devices }}"
- when:
- - containerized_deployment
- - inventory_hostname in groups.get(osd_group_name, [])
-
--- /dev/null
+---
+- name: check for a mon container
+ command: "docker ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+ register: ceph_mon_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check for an osd container
+ command: "docker ps -q --filter='name=ceph-osd-{{ ansible_hostname }}'"
+ register: ceph_osd_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check for a mds container
+ command: "docker ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+ register: ceph_mds_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check for a rgw container
+ command: "docker ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+ register: ceph_rgw_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check for a mgr container
+ command: "docker ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+ register: ceph_mgr_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check for a rbd mirror container
+ command: "docker ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+ register: ceph_rbd_mirror_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check for a nfs container
+ command: "docker ps -q --filter='name=ceph-nfs-{{ ansible_hostname }}'"
+ register: ceph_nfs_container_stat
+ changed_when: false
+ failed_when: false
+ always_run: true
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
--- /dev/null
+---
+- name: check for a ceph mon socket
+ shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mon_socket_stat
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+
+- name: check if the ceph mon socket is in-use
+ command: fuser --silent {{ mon_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mon_socket
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ - mon_socket_stat.rc == 0
+
+- name: remove ceph mon socket if exists and not used by a process
+ file:
+ name: "{{ mon_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+ - mon_socket_stat.rc == 0
+ - mon_socket.rc != 0
+
+- name: check for a ceph osd socket
+ shell: |
+ stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-osd*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: osd_socket_stat
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+- name: check if the ceph osd socket is in-use
+ command: fuser --silent {{ osd_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: osd_socket
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - osd_socket_stat.rc == 0
+
+- name: remove ceph osd socket if exists and not used by a process
+ file:
+ name: "{{ osd_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - osd_socket_stat.rc == 0
+ - osd_socket.rc != 0
+
+- name: check for a ceph mds socket
+ shell: |
+ stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mds*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mds_socket_stat
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+
+- name: check if the ceph mds socket is in-use
+ command: fuser --silent {{ mds_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mds_socket
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+ - mds_socket_stat.rc == 0
+
+- name: remove ceph mds socket if exists and not used by a process
+ file:
+ name: "{{ mds_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+ - mds_socket_stat.rc == 0
+ - mds_socket.rc != 0
+
+- name: check for a ceph rgw socket
+ shell: |
+ stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rgw*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: rgw_socket_stat
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+
+- name: check if the ceph rgw socket is in-use
+ command: fuser --silent {{ rgw_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: rgw_socket
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_socket_stat.rc == 0
+
+- name: remove ceph rgw socket if exists and not used by a process
+ file:
+ name: "{{ rgw_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, [])
+ - rgw_socket_stat.rc == 0
+ - rgw_socket.rc != 0
+
+- name: check for a ceph mgr socket
+ shell: |
+ stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mgr*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mgr_socket_stat
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+
+- name: check if the ceph mgr socket is in-use
+ command: fuser --silent {{ mgr_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: mgr_socket
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+ - mgr_socket_stat.rc == 0
+
+- name: remove ceph mgr socket if exists and not used by a process
+ file:
+ name: "{{ mgr_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+ - mgr_socket_stat.rc == 0
+ - mgr_socket.rc != 0
+
+- name: check for a ceph rbd mirror socket
+ shell: |
+ stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-client.rbd-mirror*.asok
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: rbd_mirror_socket_stat
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+
+- name: check if the ceph rbd mirror socket is in-use
+ command: fuser --silent {{ rbd_mirror_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: rbd_mirror_socket
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ - rbd_mirror_socket_stat.rc == 0
+
+- name: remove ceph rbd mirror socket if exists and not used by a process
+ file:
+ name: "{{ rbd_mirror_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(rbdmirror_group_name, [])
+ - rbd_mirror_socket_stat.rc == 0
+ - rbd_mirror_socket.rc != 0
+
+- name: check for a ceph nfs ganesha socket
+ command: stat --printf=%n /var/run/ganesha.pid
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: nfs_socket_stat
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
+
+- name: check if the ceph nfs ganesha socket is in-use
+ command: fuser --silent {{ nfs_socket_stat.stdout }}
+ changed_when: false
+ failed_when: false
+ always_run: true
+ register: nfs_socket
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ - nfs_socket_stat.rc == 0
+
+- name: remove ceph nfs ganesha socket if exists and not used by a process
+ file:
+ name: "{{ nfs_socket_stat.stdout }}"
+ state: absent
+ when:
+ - inventory_hostname in groups.get(nfs_group_name, [])
+ - nfs_socket_stat.rc == 0
+ - nfs_socket.rc != 0
devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
when:
- inventory_hostname in groups.get(osd_group_name, [])
+
DELAY="{{ handler_health_mds_check_delay }}"
MDS_NAME="{{ ansible_hostname }}"
SOCKET=/var/run/ceph/{{ cluster }}-mds.${MDS_NAME}.asok
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-mds-{{ ansible_hostname }}"
+{% endif %}
# First, restart the daemon
systemctl restart ceph-mds@${MDS_NAME}
COUNT=10
# Wait and ensure the socket exists after restarting the daemds
while [ $RETRIES -ne 0 ]; do
- {{ docker_exec_cmd }} test -S $SOCKET && exit 0
+ $DOCKER_EXEC test -S $SOCKET && exit 0
sleep $DELAY
let RETRIES=RETRIES-1
done
DELAY="{{ handler_health_mgr_check_delay }}"
MGR_NAME="{{ ansible_hostname }}"
SOCKET=/var/run/ceph/{{ cluster }}-mgr.${MGR_NAME}.asok
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-mgr-{{ ansible_hostname }}"
+{% endif %}
# First, restart the daemon
systemctl restart ceph-mgr@${MGR_NAME}
COUNT=10
# Wait and ensure the socket exists after restarting the daemds
while [ $RETRIES -ne 0 ]; do
- {{ docker_exec_cmd }} test -S $SOCKET && exit 0
+ $DOCKER_EXEC test -S $SOCKET && exit 0
sleep $DELAY
let RETRIES=RETRIES-1
done
DELAY="{{ handler_health_mon_check_delay }}"
MONITOR_NAME="{{ monitor_name }}"
SOCKET=/var/run/ceph/{{ cluster }}-mon.${MONITOR_NAME}.asok
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-mon-{{ ansible_hostname }}"
+{% endif %}
check_quorum() {
while [ $RETRIES -ne 0 ]; do
- MEMBERS=$({{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
+ MEMBERS=$($DOCKER_EXEC ceph --cluster {{ cluster }} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/')
test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0
sleep $DELAY
let RETRIES=RETRIES-1
# If we reach this point, it means there is a problem with the quorum
echo "Error with quorum."
echo "cluster status:"
-{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s
+$DOCKER_EXEC ceph --cluster {{ cluster }} -s
exit 1
}
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
while [ $COUNT -ne 0 ]; do
- {{ docker_exec_cmd }} test -S $SOCKET && check_quorum
+ $DOCKER_EXEC test -S $SOCKET && check_quorum
sleep $DELAY
let COUNT=COUNT-1
done
DELAY="{{ handler_health_nfs_check_delay }}"
NFS_NAME="{{ ansible_hostname }}"
PID=/var/run/ganesha.pid
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-nfs-{{ ansible_hostname }}"
+{% endif %}
# First, restart the daemon
{% if containerized_deployment -%}
COUNT=10
# Wait and ensure the pid exists after restarting the daemon
while [ $RETRIES -ne 0 ]; do
- {{ docker_exec_cmd }} test -f $PID && exit 0
+ $DOCKER_EXEC test -f $PID && exit 0
sleep $DELAY
let RETRIES=RETRIES-1
done
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
RBD_MIRROR_NAME="{{ ansible_hostname }}"
SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.${RBD_MIRROR_NAME}.asok
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-rbd-mirror-{{ ansible_hostname }}"
+{% endif %}
# First, restart the daemon
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
while [ $RETRIES -ne 0 ]; do
- {{ docker_exec_cmd }} test -S $SOCKET && exit 0
+ $DOCKER_EXEC test -S $SOCKET && exit 0
sleep $DELAY
let RETRIES=RETRIES-1
done
RGW_NAME="{{ ansible_hostname }}"
RGW_PORT="{{ radosgw_civetweb_port }}"
SOCKET=/var/run/ceph/{{ cluster }}-client.rgw.${RGW_NAME}.asok
+{% if containerized_deployment %}
+DOCKER_EXEC="docker exec ceph-rgw-{{ ansible_hostname }}"
+{% endif %}
{% if radosgw_address_block | length > 0 %}
{% if ip_version == 'ipv4' -%}
{%- endif %}
check_for_curl_or_wget() {
- if {{ docker_exec_cmd }} command -v wget &>/dev/null; then
+ if $DOCKER_EXEC command -v wget &>/dev/null; then
rgw_test_command="wget --quiet"
- elif {{ docker_exec_cmd }} command -v curl &>/dev/null; then
+ elif $DOCKER_EXEC command -v curl &>/dev/null; then
rgw_test_command="curl --fail --silent --output /dev/null"
else
echo "It seems that neither curl or wget are available on your system."
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
while [ $COUNT -ne 0 ]; do
- {{ docker_exec_cmd }} test -S $SOCKET && check_rest
+ $DOCKER_EXEC test -S $SOCKET && check_rest
sleep $DELAY
let COUNT=COUNT-1
done
---
+- name: set_fact docker_exec_cmd mds
+ set_fact:
+ docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
+
- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
--- /dev/null
+../../../../../Vagrantfile
\ No newline at end of file
--- /dev/null
+---
+# this is only here to let the CI tests know
+# that this scenario is using docker
+docker: True
+
+containerized_deployment: True
+cluster: test
+monitor_interface: eth1
+radosgw_interface: eth1
+ceph_mon_docker_subnet: "{{ public_network }}"
+journal_size: 100
+ceph_docker_on_openstack: False
+public_network: "192.168.15.0/24"
+cluster_network: "192.168.16.0/24"
+osd_scenario: collocated
+ceph_rgw_civetweb_port: 8080
+osd_objectstore: filestore
+ceph_osd_docker_prepare_env: -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1
+devices:
+ - /dev/sda
+ - /dev/sdb
+ceph_osd_docker_run_script_path: /var/tmp
+rgw_override_bucket_index_max_shards: 16
+rgw_bucket_default_quota_max_objects: 1638400
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+user_config: True
+keys:
+ - { name: client.test, key: "AQAin8tUoMPDGRAACcfAQHbq4eTuUoTCZdW1Uw==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test", mode: "0600", acls: [] }
+ - { name: client.test2, key: "AQAin8tUAJkGGhAA8WZ8Lz5c7IkT8QZ5s7bI1A==", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=test2", mode: "0600", acls: [] }
--- /dev/null
+[mons]
+mon0
+mon1
+mon2
+
+[osds]
+osd0
+
+[mdss]
+mds0
+rgw0
+
+[rgws]
+rgw0
+mds0
+
+[mgrs]
+mon0
+osd0
+
+[rbdmirrors]
+rgw0
+mds0
+
+[nfss]
+rgw0
+mds0
--- /dev/null
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: True
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 3
+osd_vms: 1
+mds_vms: 1
+rgw_vms: 1
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# Deploy RESTAPI on each of the Monitors
+restapi: true
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.15
+cluster_subnet: 192.168.16
+
+# MEMORY
+# set 1024 for CentOS
+memory: 1024
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sda', '/dev/sdb' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/atomic-host
+client_vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
@pytest.mark.docker
def test_docker_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
+ hostname = node["groups"]["mons"][0]["inventory_hostname"]
cmd = "sudo docker exec ceph-mds-{hostname} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
hostname=node["vars"]["inventory_hostname"],
cluster=node["cluster_name"]
[tox]
envlist = {dev,jewel,luminous,rhcs}-{ansible2.2,ansible2.3}-{xenial_cluster,journal_collocation,centos7_cluster,dmcrypt_journal,dmcrypt_journal_collocation,docker_cluster,purge_cluster,purge_dmcrypt,docker_dedicated_journal,docker_dmcrypt_journal_collocation,update_dmcrypt,update_cluster,cluster,purge_docker_cluster,update_docker_cluster,switch_to_containers}
- {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container}
+ {dev,luminous}-{ansible2.2,ansible2.3}-{bluestore_journal_collocation,bluestore_cluster,bluestore_dmcrypt_journal,bluestore_dmcrypt_journal_collocation,bluestore_docker_dedicated_journal,bluestore_docker_dmcrypt_journal_collocation,lvm_osds,purge_lvm_osds,shrink_mon,shrink_osd,journal_collocation_auto,journal_collocation_auto_dmcrypt,shrink_mon_container,shrink_osd_container,docker_cluster_collocation}
skipsdist = True
# only available for ansible >= 2.2
ANSIBLE_STDOUT_CALLBACK = debug
docker_cluster: PLAYBOOK = site-docker.yml.sample
+ docker_cluster_collocation: PLAYBOOK = site-docker.yml.sample
update_docker_cluster: PLAYBOOK = site-docker.yml.sample
purge_docker_cluster: PLAYBOOK = site-docker.yml.sample
purge_docker_cluster: PURGE_PLAYBOOK = purge-docker-cluster.yml
cluster: {toxinidir}/tests/functional/centos/7/cluster
# tests a 1 mon, 1 osd, 1 mds and 1 rgw centos7 cluster using docker
docker_cluster: {toxinidir}/tests/functional/centos/7/docker
+ docker_cluster_collocation: {toxinidir}/tests/functional/centos/7/docker-collocation
update_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
purge_docker_cluster: {toxinidir}/tests/functional/centos/7/docker
docker_dedicated_journal: {toxinidir}/tests/functional/centos/7/docker-ded-jrn