tasks_from: container_binary
- name: perform checks, remove mds and print cluster health
- hosts: localhost
+ hosts: "{{ groups[mon_group_name][0] }}"
become: true
vars_prompt:
- name: ireallymeanit
- name: set_fact container_exec_cmd for mon0
set_fact:
- container_exec_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health is succeeded
- delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5
delay: 2
- name: exit mds if it the deployment is containerized
when: containerized_deployment | bool
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill }} exit"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: set_fact current_max_mds
set_fact:
- name: get new ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get active mds nodes list
set_fact:
- name: get ceph fs dump status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
register: ceph_fs_status
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: create a list of standby mdss
set_fact:
- name: delete the filesystem when killing last mds
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs rm --yes-i-really-mean-it {{ cephfs }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- (ceph_status.stdout | from_json)['fsmap']['up'] | int == 0
- (ceph_status.stdout | from_json)['fsmap']['up:standby'] | int == 0
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- delegate_to: "{{ groups[mon_group_name][0] }}"
msg: gather facts on all Ceph hosts for following reference
- name: confirm if user really meant to remove manager from the ceph cluster
- hosts: localhost
+ hosts: "{{ groups[mon_group_name][0] }}"
become: true
vars_prompt:
- name: ireallymeanit
- import_role:
name: ceph-facts
+ tasks_from: container_binary
- name: set_fact container_exec_cmd
when: containerized_deployment | bool
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health is succeeded
- delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5
delay: 2
- name: save mgr dump output
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{cluster}} mgr dump"
register: mgr_dump
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: get a list of names of standby mgrs
set_fact:
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json | grep {{ mgr_to_kill }}"
register: mgr_in_ceph_status
failed_when: mgr_in_ceph_status.rc == 0
- delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 3
delay: 5
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- debug: msg="gather facts on all Ceph hosts for following reference"
- name: confirm whether user really meant to remove monitor from the ceph cluster
- hosts: localhost
+ hosts: "{{ groups[mon_group_name][0] }}"
become: true
vars_prompt:
- name: ireallymeanit
- import_role:
name: ceph-facts
+ tasks_from: container_binary
tasks:
- name: pick a monitor different than the one we want to remove
- import_role:
name: ceph-facts
- tasks_from: container_binary.yml
+ tasks_from: container_binary
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
when: containerized_deployment | bool
- name: set_fact container_run_cmd
- name: confirm whether user really meant to remove rbd mirror from the ceph
cluster
- hosts: localhost
+ hosts: "{{ groups[mon_group_name][0] }}"
become: true
vars_prompt:
- name: ireallymeanit
- import_role:
name: ceph-facts
+ tasks_from: container_binary
- name: exit playbook, if no rbdmirror was given
fail:
- name: set_fact container_exec_cmd for mon0
when: containerized_deployment | bool
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
register: ceph_health
until: ceph_health is succeeded
- delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5
delay: 2
- name: get servicemap details
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} -s -f json"
register: ceph_health
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: set_fact rbdmirror_gids
set_fact:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- name: check presence of "{{ rbdmirror_to_kill_hostname }}"
fail: