roles:
- ceph-defaults
- ceph-validate
+ - ceph-facts
- hosts: osds
gather_facts: False
- role: ceph-infra
- role: ceph-docker-common
when: containerized_deployment | bool
+ - role: ceph-facts
- role: ceph-common
when: not containerized_deployment | bool
- role: ceph-config
gather_facts: true
become: true
- tasks:
-
- - import_role:
- name: ceph-defaults
- private: false
-
- - name: gather monitors facts
- setup:
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
-
- - import_role:
- name: ceph-facts
- private: false
-
- - name: get all the running osds
- shell: |
- systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
- register: osd_units
- ignore_errors: true
-
- - name: disable ceph osd service
- service:
- name: "{{ item }}"
- state: stopped
- enabled: no
- with_items: "{{ osd_units.stdout_lines }}"
-
- - name: remove osd mountpoint tree
- file:
- path: /var/lib/ceph/osd/
- state: absent
- register: remove_osd_mountpoints
- ignore_errors: true
-
- - name: for ceph-disk based deployment
- block:
- - name: get prepare container
- command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
- register: prepare_containers
- ignore_errors: true
-
- - name: remove ceph osd prepare container
- command: "docker rm -f {{ item }}"
- with_items: "{{ prepare_containers.stdout_lines }}"
- ignore_errors: true
-
- # NOTE(leseb): hope someone will find a more elegant way one day...
- - name: see if encrypted partitions are present
- shell: |
- blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
- register: encrypted_ceph_partuuid
-
- - name: get ceph data partitions
- command: |
- blkid -o device -t PARTLABEL="ceph data"
- failed_when: false
- register: ceph_data_partition_to_erase_path
-
- - name: get ceph lockbox partitions
- command: |
- blkid -o device -t PARTLABEL="ceph lockbox"
- failed_when: false
- register: ceph_lockbox_partition_to_erase_path
-
- - name: get ceph block partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block"
- failed_when: false
- register: ceph_block_partition_to_erase_path
-
- - name: get ceph journal partitions
- command: |
- blkid -o device -t PARTLABEL="ceph journal"
- failed_when: false
- register: ceph_journal_partition_to_erase_path
-
- - name: get ceph db partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block.db"
- failed_when: false
- register: ceph_db_partition_to_erase_path
-
- - name: get ceph wal partitions
- command: |
- blkid -o device -t PARTLABEL="ceph block.wal"
- failed_when: false
- register: ceph_wal_partition_to_erase_path
-
- - name: set_fact combined_devices_list
- set_fact:
- combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
- ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
- ceph_block_partition_to_erase_path.get('stdout_lines', []) +
- ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
- ceph_db_partition_to_erase_path.get('stdout_lines', []) +
- ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
-
- - name: resolve parent device
- command: lsblk --nodeps -no pkname "{{ item }}"
- register: tmp_resolved_parent_device
- with_items:
- - "{{ combined_devices_list }}"
-
- - name: set_fact resolved_parent_device
- set_fact:
- resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
-
- - name: zap ceph osd disks
- shell: |
- docker run --rm \
- --privileged=true \
- --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
- -v /dev/:/dev/ \
- -e OSD_DEVICE=/dev/{{ item }} \
- {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
- zap_device
- with_items:
- - "{{ resolved_parent_device }}"
-
- - name: wait until the zap containers die
- shell: |
- docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
- register: zap_alive
- failed_when: false
- until: zap_alive.rc != 0
- retries: 5
- delay: 10
-
- - name: remove ceph osd zap disk container
- docker_container:
- image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
- state: absent
- with_items:
- - "{{ resolved_parent_device }}"
-
- - name: remove ceph osd service
- file:
- path: /etc/systemd/system/ceph-osd@.service
- state: absent
- when:
- - osd_scenario != "lvm"
-
- - name: for ceph-volume based deployments
- block:
- - name: zap and destroy osds created by ceph-volume with lvm_volumes
- ceph_volume:
- data: "{{ item.data }}"
- data_vg: "{{ item.data_vg|default(omit) }}"
- journal: "{{ item.journal|default(omit) }}"
- journal_vg: "{{ item.journal_vg|default(omit) }}"
- db: "{{ item.db|default(omit) }}"
- db_vg: "{{ item.db_vg|default(omit) }}"
- wal: "{{ item.wal|default(omit) }}"
- wal_vg: "{{ item.wal_vg|default(omit) }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: 1
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- CEPH_CONTAINER_BINARY: "docker"
- with_items: "{{ lvm_volumes }}"
-
- - name: zap and destroy osds created by ceph-volume with devices
- ceph_volume:
- data: "{{ item }}"
- action: "zap"
- environment:
- CEPH_VOLUME_DEBUG: 1
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
- CEPH_CONTAINER_BINARY: "docker"
- with_items: "{{ devices | default([]) }}"
- when:
- - osd_scenario == "lvm"
-
- - name: remove ceph osd image
- docker_image:
- state: absent
- repository: "{{ ceph_docker_registry }}"
- name: "{{ ceph_docker_image }}"
- tag: "{{ ceph_docker_image_tag }}"
- force: yes
- tags:
- remove_img
- ignore_errors: true
-
- - name: include vars from group_vars/osds.yml
- include_vars:
- file: "{{ item }}"
- with_first_found:
- - files:
- - "{{ playbook_dir }}/group_vars/osds"
- - "{{ playbook_dir }}/group_vars/osds.yml"
- skip: true
-
- - name: find all osd_disk_prepare logs
- find:
- paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
- pattern: "ceph-osd-prepare-*.log"
- register: osd_disk_prepare_logs
-
- - name: ensure all osd_disk_prepare logs are removed
- file:
- path: "{{ item.path }}"
- state: absent
- with_items:
- - "{{ osd_disk_prepare_logs.files }}"
+ # This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts
+ roles:
+ - ceph-defaults
+
+ post_tasks:
+ - name: gather monitors facts
+ setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups.get(mon_group_name | default('mons')) }}"
+
+ - import_role:
+ name: ceph-facts
+ private: false
+
+ - name: get all the running osds
+ shell: |
+ systemctl list-units | grep 'loaded[[:space:]]\+active' | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"
+ register: osd_units
+ ignore_errors: true
+
+ - name: disable ceph osd service
+ service:
+ name: "{{ item }}"
+ state: stopped
+ enabled: no
+ with_items: "{{ osd_units.stdout_lines }}"
+
+ - name: remove osd mountpoint tree
+ file:
+ path: /var/lib/ceph/osd/
+ state: absent
+ register: remove_osd_mountpoints
+ ignore_errors: true
+
+ - name: for ceph-disk based deployment
+ block:
+ - name: get prepare container
+ command: "docker ps -a -q --filter='name=ceph-osd-prepare'"
+ register: prepare_containers
+ ignore_errors: true
+
+ - name: remove ceph osd prepare container
+ command: "docker rm -f {{ item }}"
+ with_items: "{{ prepare_containers.stdout_lines }}"
+ ignore_errors: true
+
+ # NOTE(leseb): hope someone will find a more elegant way one day...
+ - name: see if encrypted partitions are present
+ shell: |
+ blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
+ register: encrypted_ceph_partuuid
+
+ - name: get ceph data partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph data"
+ failed_when: false
+ register: ceph_data_partition_to_erase_path
+
+ - name: get ceph lockbox partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph lockbox"
+ failed_when: false
+ register: ceph_lockbox_partition_to_erase_path
+
+ - name: get ceph block partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block"
+ failed_when: false
+ register: ceph_block_partition_to_erase_path
+
+ - name: get ceph journal partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph journal"
+ failed_when: false
+ register: ceph_journal_partition_to_erase_path
+
+ - name: get ceph db partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block.db"
+ failed_when: false
+ register: ceph_db_partition_to_erase_path
+
+ - name: get ceph wal partitions
+ command: |
+ blkid -o device -t PARTLABEL="ceph block.wal"
+ failed_when: false
+ register: ceph_wal_partition_to_erase_path
+
+ - name: set_fact combined_devices_list
+ set_fact:
+ combined_devices_list: "{{ ceph_data_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_lockbox_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_block_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_journal_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_db_partition_to_erase_path.get('stdout_lines', []) +
+ ceph_wal_partition_to_erase_path.get('stdout_lines', []) }}"
+
+ - name: resolve parent device
+ command: lsblk --nodeps -no pkname "{{ item }}"
+ register: tmp_resolved_parent_device
+ with_items:
+ - "{{ combined_devices_list }}"
+
+ - name: set_fact resolved_parent_device
+ set_fact:
+ resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
+
+ - name: zap ceph osd disks
+ shell: |
+ docker run --rm \
+ --privileged=true \
+ --name ceph-osd-zap-{{ ansible_hostname }}-{{ item }} \
+ -v /dev/:/dev/ \
+ -e OSD_DEVICE=/dev/{{ item }} \
+ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
+ zap_device
+ with_items:
+ - "{{ resolved_parent_device }}"
+
+ - name: wait until the zap containers die
+ shell: |
+ docker ps | grep -sq ceph-osd-zap-{{ ansible_hostname }}
+ register: zap_alive
+ failed_when: false
+ until: zap_alive.rc != 0
+ retries: 5
+ delay: 10
+
+ - name: remove ceph osd zap disk container
+ docker_container:
+ image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ name: "ceph-osd-zap-{{ ansible_hostname }}-{{ item }}"
+ state: absent
+ with_items:
+ - "{{ resolved_parent_device }}"
+
+ - name: remove ceph osd service
+ file:
+ path: /etc/systemd/system/ceph-osd@.service
+ state: absent
+ when:
+ - osd_scenario != "lvm"
+
+ - name: for ceph-volume based deployments
+ block:
+ - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ ceph_volume:
+ data: "{{ item.data }}"
+ data_vg: "{{ item.data_vg|default(omit) }}"
+ journal: "{{ item.journal|default(omit) }}"
+ journal_vg: "{{ item.journal_vg|default(omit) }}"
+ db: "{{ item.db|default(omit) }}"
+ db_vg: "{{ item.db_vg|default(omit) }}"
+ wal: "{{ item.wal|default(omit) }}"
+ wal_vg: "{{ item.wal_vg|default(omit) }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "docker"
+ with_items: "{{ lvm_volumes }}"
+
+ - name: zap and destroy osds created by ceph-volume with devices
+ ceph_volume:
+ data: "{{ item }}"
+ action: "zap"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ CEPH_CONTAINER_BINARY: "docker"
+ with_items: "{{ devices | default([]) }}"
+ when:
+ - osd_scenario == "lvm"
+
+ - name: remove ceph osd image
+ docker_image:
+ state: absent
+ repository: "{{ ceph_docker_registry }}"
+ name: "{{ ceph_docker_image }}"
+ tag: "{{ ceph_docker_image_tag }}"
+ force: yes
+ tags:
+ remove_img
+ ignore_errors: true
+
+ - name: include vars from group_vars/osds.yml
+ include_vars:
+ file: "{{ item }}"
+ with_first_found:
+ - files:
+ - "{{ playbook_dir }}/group_vars/osds"
+ - "{{ playbook_dir }}/group_vars/osds.yml"
+ skip: true
+
+ - name: find all osd_disk_prepare logs
+ find:
+ paths: "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}"
+ pattern: "ceph-osd-prepare-*.log"
+ register: osd_disk_prepare_logs
+
+ - name: ensure all osd_disk_prepare logs are removed
+ file:
+ path: "{{ item.path }}"
+ state: absent
+ with_items:
+ - "{{ osd_disk_prepare_logs.files }}"
- name: purge ceph mon cluster
gather_facts: true
become: true
- tasks:
-
- - import_role:
- name: ceph-defaults
- private: false
+ # This is a tricks so we can access 'ceph-defaults' defaults variables in 'ceph-facts'
+ roles:
+ - ceph-defaults
+ post_tasks:
- import_role:
name: ceph-facts
private: false
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
tasks:
- name: set_fact docker_exec_cmd_osd
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
roles:
- ceph-defaults
+ - ceph-facts
post_tasks:
- name: pick a monitor different than the one we want to remove
roles:
- ceph-defaults
+ - ceph-facts
post_tasks:
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mon
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mgr
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-osd
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-mds
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-rgw
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-rbd-mirror
roles:
- ceph-defaults
+ - ceph-facts
- ceph-handler
- ceph-docker-common
- ceph-nfs
+++ /dev/null
----
-- name: check if it is atomic host
- stat:
- path: /run/ostree-booted
- register: stat_ostree
-
-- name: set_fact is_atomic
- set_fact:
- is_atomic: "{{ stat_ostree.stat.exists }}"
-
-- name: set_fact monitor_name ansible_hostname
- set_fact:
- monitor_name: "{{ ansible_hostname }}"
- when:
- - not mon_use_fqdn
-
-- name: set_fact monitor_name ansible_fqdn
- set_fact:
- monitor_name: "{{ ansible_fqdn }}"
- when:
- - mon_use_fqdn
-
-- name: set_fact docker_exec_cmd
- set_fact:
- docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - containerized_deployment
- - groups.get(mon_group_name, []) | length > 0
-
-# this task shouldn't run in a rolling_update situation
-# because it blindly picks a mon, which may be down because
-# of the rolling update
-- name: is ceph running already?
- command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
- changed_when: false
- failed_when: false
- check_mode: no
- register: ceph_current_status
- run_once: true
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - not rolling_update
- - groups.get(mon_group_name, []) | length > 0
-
-# We want this check to be run only on the first node
-- name: check if {{ fetch_directory }} directory exists
- local_action:
- module: stat
- path: "{{ fetch_directory }}/monitor_keyring.conf"
- become: false
- register: monitor_keyring_conf
- run_once: true
-
-# set this as a default when performing a rolling_update
-# so the rest of the tasks here will succeed
-- name: set_fact ceph_current_status rc 1
- set_fact:
- ceph_current_status:
- rc: 1
- when:
- - rolling_update or groups.get(mon_group_name, []) | length == 0
-
-- name: create a local fetch directory if it does not exist
- local_action:
- module: file
- path: "{{ fetch_directory }}"
- state: directory
- changed_when: false
- become: false
- when:
- - (cephx or generate_fsid)
-
-- name: set_fact ceph_current_status (convert to json)
- set_fact:
- ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
- when:
- - not rolling_update
- - ceph_current_status.rc == 0
-
-- name: set_fact fsid from ceph_current_status
- set_fact:
- fsid: "{{ ceph_current_status.fsid }}"
- when:
- - ceph_current_status.fsid is defined
-
-# Set ceph_release to ceph_stable by default
-- name: set_fact ceph_release ceph_stable_release
- set_fact:
- ceph_release: "{{ ceph_stable_release }}"
-
-- name: generate cluster fsid
- local_action:
- module: shell
- python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- register: cluster_uuid
- become: false
- when:
- - generate_fsid
- - ceph_current_status.fsid is undefined
-
-- name: reuse cluster fsid when cluster is already running
- local_action:
- module: shell
- echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- become: false
- when:
- - ceph_current_status.fsid is defined
-
-- name: read cluster fsid if it already exists
- local_action:
- module: command
- cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
- changed_when: false
- register: cluster_uuid
- become: false
- check_mode: no
- when:
- - generate_fsid
-
-- name: set_fact fsid
- set_fact:
- fsid: "{{ cluster_uuid.stdout }}"
- when:
- - generate_fsid
-
-- name: set_fact mds_name ansible_hostname
- set_fact:
- mds_name: "{{ ansible_hostname }}"
- when:
- - not mds_use_fqdn
-
-- name: set_fact mds_name ansible_fqdn
- set_fact:
- mds_name: "{{ ansible_fqdn }}"
- when:
- - mds_use_fqdn
-
-- name: set_fact rbd_client_directory_owner ceph
- set_fact:
- rbd_client_directory_owner: ceph
- when:
- - rbd_client_directory_owner is not defined
- or not rbd_client_directory_owner
-
-- name: set_fact rbd_client_directory_group rbd_client_directory_group
- set_fact:
- rbd_client_directory_group: ceph
- when:
- - rbd_client_directory_group is not defined
- or not rbd_client_directory_group
-
-- name: set_fact rbd_client_directory_mode 0770
- set_fact:
- rbd_client_directory_mode: "0770"
- when:
- - rbd_client_directory_mode is not defined
- or not rbd_client_directory_mode
-
-- name: resolve device link(s)
- command: readlink -f {{ item }}
- changed_when: false
- with_items: "{{ devices }}"
- register: devices_prepare_canonicalize
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build devices from resolved symlinks
- set_fact:
- devices: "{{ devices | default([]) + [ item.stdout ] }}"
- with_items: "{{ devices_prepare_canonicalize.results }}"
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact build final devices list
- set_fact:
- devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
- when:
- - devices is defined
- - inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
- - osd_scenario|default('dummy') != 'lvm'
-
-- name: set_fact ceph_uid for debian based system - non container
- set_fact:
- ceph_uid: 64045
- when:
- - not containerized_deployment
- - ansible_os_family == 'Debian'
-
-- name: set_fact ceph_uid for red hat or suse based system - non container
- set_fact:
- ceph_uid: 167
- when:
- - not containerized_deployment
- - ansible_os_family in ['RedHat', 'Suse']
-
-- name: set_fact ceph_uid for debian based system - container
- set_fact:
- ceph_uid: 64045
- when:
- - containerized_deployment
- - ceph_docker_image_tag | string is search("ubuntu")
-
-- name: set_fact ceph_uid for red hat based system - container
- set_fact:
- ceph_uid: 167
- when:
- - containerized_deployment
- - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
-
-- name: set_fact ceph_uid for red hat
- set_fact:
- ceph_uid: 167
- when:
- - containerized_deployment
- - ceph_docker_image is search("rhceph")
-
-- name: set_fact rgw_hostname
- set_fact:
- rgw_hostname: "{% set _value = ansible_hostname -%}
- {% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() -%}
- {% if key == ansible_fqdn -%}
- {% set _value = key -%}
- {% endif -%}
- {% endfor -%}
- {{ _value }}"
- when:
- - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
- - ceph_current_status['servicemap'] is defined
- - ceph_current_status['servicemap']['services'] is defined
- - ceph_current_status['servicemap']['services']['rgw'] is defined
-
-- name: set_fact osd_pool_default_pg_num
- set_fact:
- osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
-
-- name: set_fact osd_pool_default_size
- set_fact:
- osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
----
-- name: include facts.yml
- include_tasks: facts.yml
+---
\ No newline at end of file
--- /dev/null
+# Ansible role: ceph-facts
+
+Documentation is available at http://docs.ceph.com/ceph-ansible/.
--- /dev/null
+---
\ No newline at end of file
--- /dev/null
+---
+galaxy_info:
+ company: Red Hat
+ author: Guillaume Abrioux
+ description: Set some facts for ceph to be deployed
+ license: Apache
+ min_ansible_version: 2.7
+ platforms:
+ - name: Ubuntu
+ versions:
+ - xenial
+ - name: EL
+ versions:
+ - 7
+ galaxy_tags:
+ - system
+dependencies: []
--- /dev/null
+---
+- name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+- name: set_fact is_atomic
+ set_fact:
+ is_atomic: "{{ stat_ostree.stat.exists }}"
+
+- name: set_fact monitor_name ansible_hostname
+ set_fact:
+ monitor_name: "{{ ansible_hostname }}"
+ when:
+ - not mon_use_fqdn
+
+- name: set_fact monitor_name ansible_fqdn
+ set_fact:
+ monitor_name: "{{ ansible_fqdn }}"
+ when:
+ - mon_use_fqdn
+
+- name: set_fact docker_exec_cmd
+ set_fact:
+ docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - containerized_deployment
+ - groups.get(mon_group_name, []) | length > 0
+
+# this task shouldn't run in a rolling_update situation
+# because it blindly picks a mon, which may be down because
+# of the rolling update
+- name: is ceph running already?
+ command: "timeout 5 {{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s -f json"
+ changed_when: false
+ failed_when: false
+ check_mode: no
+ register: ceph_current_status
+ run_once: true
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - not rolling_update
+ - groups.get(mon_group_name, []) | length > 0
+
+# We want this check to be run only on the first node
+- name: check if {{ fetch_directory }} directory exists
+ local_action:
+ module: stat
+ path: "{{ fetch_directory }}/monitor_keyring.conf"
+ become: false
+ register: monitor_keyring_conf
+ run_once: true
+
+# set this as a default when performing a rolling_update
+# so the rest of the tasks here will succeed
+- name: set_fact ceph_current_status rc 1
+ set_fact:
+ ceph_current_status:
+ rc: 1
+ when:
+ - rolling_update or groups.get(mon_group_name, []) | length == 0
+
+- name: create a local fetch directory if it does not exist
+ local_action:
+ module: file
+ path: "{{ fetch_directory }}"
+ state: directory
+ changed_when: false
+ become: false
+ when:
+ - (cephx or generate_fsid)
+
+- name: set_fact ceph_current_status (convert to json)
+ set_fact:
+ ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
+ when:
+ - not rolling_update
+ - ceph_current_status.rc == 0
+
+- name: set_fact fsid from ceph_current_status
+ set_fact:
+ fsid: "{{ ceph_current_status.fsid }}"
+ when:
+ - ceph_current_status.fsid is defined
+
+# Set ceph_release to ceph_stable by default
+- name: set_fact ceph_release ceph_stable_release
+ set_fact:
+ ceph_release: "{{ ceph_stable_release }}"
+
+- name: generate cluster fsid
+ local_action:
+ module: shell
+ python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ register: cluster_uuid
+ become: false
+ when:
+ - generate_fsid
+ - ceph_current_status.fsid is undefined
+
+- name: reuse cluster fsid when cluster is already running
+ local_action:
+ module: shell
+ echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ become: false
+ when:
+ - ceph_current_status.fsid is defined
+
+- name: read cluster fsid if it already exists
+ local_action:
+ module: command
+ cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ removes: "{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ changed_when: false
+ register: cluster_uuid
+ become: false
+ check_mode: no
+ when:
+ - generate_fsid
+
+- name: set_fact fsid
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+ when:
+ - generate_fsid
+
+- name: set_fact mds_name ansible_hostname
+ set_fact:
+ mds_name: "{{ ansible_hostname }}"
+ when:
+ - not mds_use_fqdn
+
+- name: set_fact mds_name ansible_fqdn
+ set_fact:
+ mds_name: "{{ ansible_fqdn }}"
+ when:
+ - mds_use_fqdn
+
+- name: set_fact rbd_client_directory_owner ceph
+ set_fact:
+ rbd_client_directory_owner: ceph
+ when:
+ - rbd_client_directory_owner is not defined
+ or not rbd_client_directory_owner
+
+- name: set_fact rbd_client_directory_group rbd_client_directory_group
+ set_fact:
+ rbd_client_directory_group: ceph
+ when:
+ - rbd_client_directory_group is not defined
+ or not rbd_client_directory_group
+
+- name: set_fact rbd_client_directory_mode 0770
+ set_fact:
+ rbd_client_directory_mode: "0770"
+ when:
+ - rbd_client_directory_mode is not defined
+ or not rbd_client_directory_mode
+
+- name: resolve device link(s)
+ command: readlink -f {{ item }}
+ changed_when: false
+ with_items: "{{ devices }}"
+ register: devices_prepare_canonicalize
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build devices from resolved symlinks
+ set_fact:
+ devices: "{{ devices | default([]) + [ item.stdout ] }}"
+ with_items: "{{ devices_prepare_canonicalize.results }}"
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact build final devices list
+ set_fact:
+ devices: "{{ devices | reject('search','/dev/disk') | list | unique }}"
+ when:
+ - devices is defined
+ - inventory_hostname in groups.get(osd_group_name, [])
+ - not osd_auto_discovery|default(False)
+ - osd_scenario|default('dummy') != 'lvm'
+
+- name: set_fact ceph_uid for debian based system - non container
+ set_fact:
+ ceph_uid: 64045
+ when:
+ - not containerized_deployment
+ - ansible_os_family == 'Debian'
+
+- name: set_fact ceph_uid for red hat or suse based system - non container
+ set_fact:
+ ceph_uid: 167
+ when:
+ - not containerized_deployment
+ - ansible_os_family in ['RedHat', 'Suse']
+
+- name: set_fact ceph_uid for debian based system - container
+ set_fact:
+ ceph_uid: 64045
+ when:
+ - containerized_deployment
+ - ceph_docker_image_tag | string is search("ubuntu")
+
+- name: set_fact ceph_uid for red hat based system - container
+ set_fact:
+ ceph_uid: 167
+ when:
+ - containerized_deployment
+ - ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
+
+- name: set_fact ceph_uid for red hat
+ set_fact:
+ ceph_uid: 167
+ when:
+ - containerized_deployment
+ - ceph_docker_image is search("rhceph")
+
+- name: set_fact rgw_hostname
+ set_fact:
+ rgw_hostname: "{% set _value = ansible_hostname -%}
+ {% for key in ceph_current_status['servicemap']['services']['rgw']['daemons'].keys() -%}
+ {% if key == ansible_fqdn -%}
+ {% set _value = key -%}
+ {% endif -%}
+ {% endfor -%}
+ {{ _value }}"
+ when:
+ - inventory_hostname in groups.get(rgw_group_name, []) or inventory_hostname in groups.get(nfs_group_name, [])
+ - ceph_current_status['servicemap'] is defined
+ - ceph_current_status['servicemap']['services'] is defined
+ - ceph_current_status['servicemap']['services']['rgw'] is defined
+
+- name: set_fact osd_pool_default_pg_num
+ set_fact:
+ osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
+- name: set_fact osd_pool_default_size
+ set_fact:
+ osd_pool_default_size: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_size', ceph_osd_pool_default_size) }}"
--- /dev/null
+---
+- name: include facts.yml
+ include_tasks: facts.yml
roles:
- role: ceph-defaults
tags: [with_pkg, fetch_container_image]
+ - role: ceph-facts
+ tags: [with_pkg, fetch_container_image]
- role: ceph-validate
- role: ceph-infra
- role: ceph-handler
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-docker-common
when:
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
roles:
- { role: ceph-defaults, tags: ['ceph_update_config'] }
+ - { role: ceph-facts, tags: ['ceph_update_config'] }
- role: ceph-handler
- ceph-docker-common
- { role: ceph-config, tags: ['ceph_update_config'] }
any_errors_fatal: true
gather_facts: false
become: True
+ roles:
+ - ceph-defaults
tasks:
- name: get ceph status from the first monitor
- command: docker exec ceph-mon-{{ hostvars[groups['mons'][0]]['ansible_hostname'] }} ceph --cluster {{ cluster | default ('ceph') }} -s
+ command: docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+ - name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed
\ No newline at end of file
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
roles:
- role: ceph-defaults
tags: ['ceph_update_config']
+ - role: ceph-facts
+ tags: ['ceph_update_config']
- role: ceph-handler
- role: ceph-common
- role: ceph-config
gather_facts: false
become: True
any_errors_fatal: true
- tasks:
+ roles:
+ - role: ceph-defaults
+ post_tasks:
- name: get ceph status from the first monitor
- command: ceph --cluster {{ cluster | default ('ceph') }} -s
+ command: ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- ignore_errors: true # we skip the error if mon_group_name is different than 'mons'
- - name: "show ceph status for cluster {{ cluster | default ('ceph') }}"
+ - name: "show ceph status for cluster {{ cluster }}"
debug:
msg: "{{ ceph_status.stdout_lines }}"
- delegate_to: "{{ groups['mons'][0] }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
when: not ceph_status.failed
\ No newline at end of file