This addresses all errors reported by the Ansible linter.
Signed-off-by: Guillaume Abrioux <gabrioux@ibm.com>
with:
python-version: '3.10'
architecture: x64
- - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint==6.16.0 netaddr
+ - run: pip install -r <(grep ansible tests/requirements.txt) ansible-lint netaddr
- run: ansible-galaxy install -r requirements.yml
- - run: ansible-lint -x 106,204,205,208 -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site-container.yml.sample dashboard.yml
- - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample --syntax-check --list-tasks -vv
- - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site-container.yml.sample --syntax-check --list-tasks -vv
- - run: ansible-playbook -i ./tests/functional/all_daemons/hosts dashboard.yml --syntax-check --list-tasks -vv
- - run: ansible-playbook -i ./tests/functional/all_daemons/hosts infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
+ - run: ansible-lint -x 'yaml[line-length],role-name,run-once' -v --force-color ./roles/*/ ./infrastructure-playbooks/*.yml site-container.yml.sample site.yml.sample dashboard.yml
+ - run: ansible-playbook -i ./tests/functional/all_daemons/hosts site.yml.sample site-container.yml.sample dashboard.yml infrastructure-playbooks/*.yml --syntax-check --list-tasks -vv
---
-- hosts:
+- name: Deploy node_exporter
+ hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ mds_group_name|default('mdss') }}"
gather_facts: false
become: true
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- - name: set ceph node exporter install 'In Progress'
+ - name: Set ceph node exporter install 'In Progress'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_node_exporter:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tags: ['ceph_update_config']
- - import_role:
+
+ - name: Import ceph-container-engine
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry
when:
- not containerized_deployment | bool
- ceph_docker_registry_auth | bool
- - import_role:
+
+ - name: Import ceph-node-exporter role
+ ansible.builtin.import_role:
name: ceph-node-exporter
post_tasks:
- - name: set ceph node exporter install 'Complete'
+ - name: Set ceph node exporter install 'Complete'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_node_exporter:
status: "Complete"
end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-- hosts: "{{ monitoring_group_name | default('monitoring') }}"
+- name: Deploy grafana and prometheus
+ hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false
become: true
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- - name: set ceph grafana install 'In Progress'
+ - name: Set ceph grafana install 'In Progress'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_grafana:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- - import_role:
- name: ceph-facts
- tags: ['ceph_update_config']
- - import_role:
+ # - ansible.builtin.import_role:
+ # name: ceph-facts
+ # tags: ['ceph_update_config']
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
tags: ['ceph_update_config']
- - import_role:
+
+ - name: Import ceph-prometheus role
+ ansible.builtin.import_role:
name: ceph-prometheus
- - import_role:
+
+ - name: Import ceph-grafana role
+ ansible.builtin.import_role:
name: ceph-grafana
post_tasks:
- - name: set ceph grafana install 'Complete'
+ - name: Set ceph grafana install 'Complete'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_grafana:
status: "Complete"
# using groups[] here otherwise it can't fallback to the mon if there's no mgr group.
# adding an additional | default(omit) in case where no monitors are present (external ceph cluster)
-- hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}"
+- name: Deploy dashboard
+ hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}"
gather_facts: false
become: true
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- - name: set ceph dashboard install 'In Progress'
+ - name: Set ceph dashboard install 'In Progress'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_dashboard:
status: "In Progress"
start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
tasks:
- - import_role:
- name: ceph-facts
- tags: ['ceph_update_config']
- - import_role:
+ # - name: Import ceph-facts role
+ # ansible.builtin.import_role:
+ # name: ceph-facts
+ # tags: ['ceph_update_config']
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
tags: ['ceph_update_config']
- - import_role:
+
+ - name: Import ceph-dashboard role
+ ansible.builtin.import_role:
name: ceph-dashboard
post_tasks:
- - name: set ceph dashboard install 'Complete'
+ - name: Set ceph dashboard install 'Complete'
run_once: true
- set_stats:
+ ansible.builtin.set_stats:
data:
installer_phase_ceph_dashboard:
status: "Complete"
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
-#configure_firewall: True
+#configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
-#upgrade_ceph_packages: False
+#upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755"
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
-#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
-#ceph_installation_dir: "/path/to/ceph_installation/"
+# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
-#use_installer: false
+# use_installer: false
# Root directory for ceph-ansible
-#ansible_dir: "/path/to/ceph-ansible"
+# ansible_dir: "/path/to/ceph-ansible"
######################
#ip_version: ipv4
#mon_host_v1:
-# enabled: True
+# enabled: true
# suffix: ':6789'
#mon_host_v2:
# suffix: ':3300'
-#enable_ceph_volume_debug: False
+#enable_ceph_volume_debug: false
##########
# CEPHFS #
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
-#common_single_host_mode: true
+# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
#ceph_docker_image_tag: latest-main
#ceph_docker_registry: quay.io
#ceph_docker_registry_auth: false
-#ceph_docker_registry_username:
-#ceph_docker_registry_password:
-#ceph_docker_http_proxy:
-#ceph_docker_https_proxy:
+# ceph_docker_registry_username:
+# ceph_docker_registry_password:
+# ceph_docker_http_proxy:
+# ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}"
#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
-#containerized_deployment: False
+#containerized_deployment: false
#container_binary:
#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
-# pg_autoscale_mode: False
+# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
#############
# DASHBOARD #
#############
-#dashboard_enabled: True
+#dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
#dashboard_admin_user: admin
#dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
-#dashboard_admin_password: p@ssw0rd
+# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
#dashboard_crt: ''
#dashboard_key: ''
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: ''
-#dashboard_rgw_api_no_ssl_verify: False
+#dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: ''
#dashboard_disabled_features: []
#prometheus_frontend_vip: ''
#node_exporter_port: 9100
#grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
-#grafana_admin_password: admin
+# grafana_admin_password: admin
# We only need this for SSL (https) connections
#grafana_crt: ''
#grafana_key: ''
#grafana_plugins:
# - vonage-status-panel
# - grafana-piechart-panel
-#grafana_allow_embedding: True
+#grafana_allow_embedding: true
#grafana_port: 3000
#grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {}
#prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
-#prometheus_storage_tsdb_retention_time: 15d
+# prometheus_storage_tsdb_retention_time: 15d
#alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
#
# Example:
#
-#rbd_devices:
-# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+# rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
#
# Example:
#
-#client_connections:
-# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
-# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+# client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
-#no_log_on_ceph_key_tasks: True
+#no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
-
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
#container_exec_cmd:
#docker: false
-#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
+#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
#keys:
-# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
-# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
+# - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
+# - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
# GENERAL #
###########
# Whether or not to generate secure certificate to iSCSI gateway nodes
-#generate_crt: False
+#generate_crt: false
#iscsi_conf_overrides: {}
#iscsi_pool_name: rbd
-#iscsi_pool_size: 3
+# iscsi_pool_size: 3
-#copy_admin_key: True
+#copy_admin_key: true
##################
# RBD-TARGET-API #
# ceph_mds_systemd_overrides will override the systemd settings
# for the ceph-mds services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mds_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mds_systemd_overrides:
+# Service:
+# PrivateDevices: false
# ceph_mgr_systemd_overrides will override the systemd settings
# for the ceph-mgr services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mgr_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mgr_systemd_overrides:
+# Service:
+# PrivateDevices: false
# ceph_mon_systemd_overrides will override the systemd settings
# for the ceph-mon services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mon_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mon_systemd_overrides:
+# Service:
+# PrivateDevices: false
#ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
# Note: keys are optional and can be generated, but not on containerized, where
# they must be configered.
-#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
-#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
#
# Example:
-#CACHEINODE {
-# #Entries_HWMark = 100000;
-#}
+# CACHEINODE {
+# # Entries_HWMark = 100000;
+# }
#
-#ganesha_core_param_overrides:
-#ganesha_ceph_export_overrides:
-#ganesha_rgw_export_overrides:
-#ganesha_rgw_section_overrides:
-#ganesha_log_overrides:
-#ganesha_conf_overrides: |
-# CACHEINODE {
-# #Entries_HWMark = 100000;
-# }
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+# CACHEINODE {
+# # Entries_HWMark = 100000;
+# }
##########
# DOCKER #
# All scenario(except 3rd) inherit from the following device declaration
# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
-#devices:
-# - /dev/sdb
-# - /dev/sdc
-# - /dev/sdd
-# - /dev/sde
+# devices:
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
#devices: []
# Declare devices to be used as block.db devices
-#dedicated_devices:
-# - /dev/sdx
-# - /dev/sdy
+# dedicated_devices:
+# - /dev/sdx
+# - /dev/sdy
#dedicated_devices: []
# Declare devices to be used as block.wal devices
-#bluestore_wal_devices:
-# - /dev/nvme0n1
-# - /dev/nvme0n2
+# bluestore_wal_devices:
+# - /dev/nvme0n1
+# - /dev/nvme0n2
#bluestore_wal_devices: []
-#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore you use the data will be encrypted
-#dmcrypt: False
+#dmcrypt: true
# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only.
-#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
-#ceph_osd_docker_cpuset_mems: "0"
+# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+# ceph_osd_docker_cpuset_mems: "0"
# PREPARE DEVICE
#
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_osd_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: false
###########
# ceph_rbd_mirror_systemd_overrides will override the systemd settings
# for the ceph-rbd-mirror services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_rbd_mirror_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_rbd_mirror_systemd_overrides:
+# Service:
+# PrivateDevices: false
# - no-tlsv11
# - no-tls-tickets
#
-#virtual_ips:
-# - 192.168.238.250
-# - 192.168.238.251
+# virtual_ips:
+# - 192.168.238.250
+# - 192.168.238.251
#
-#virtual_ip_netmask: 24
-#virtual_ip_interface: ens33
+# virtual_ip_netmask: 24
+# virtual_ip_interface: ens33
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
-#rgw_create_pools:
-# "{{ rgw_zone }}.rgw.buckets.data":
-# pg_num: 64
-# type: ec
-# ec_profile: myecprofile
-# ec_k: 5
-# ec_m: 3
-# "{{ rgw_zone }}.rgw.buckets.index":
-# pg_num: 16
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.meta":
-# pg_num: 8
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.log":
-# pg_num: 8
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.control":
-# pg_num: 8
-# size: 3
-# type: replicated
-# rule_name: foo
+# rgw_create_pools:
+# "{{ rgw_zone }}.rgw.buckets.data":
+# pg_num: 64
+# type: ec
+# ec_profile: myecprofile
+# ec_k: 5
+# ec_m: 3
+# "{{ rgw_zone }}.rgw.buckets.index":
+# pg_num: 16
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.meta":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.log":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.control":
+# pg_num: 8
+# size: 3
+# type: replicated
+# rule_name: foo
##########
# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
#ceph_rgw_docker_memory_limit: "4096m"
#ceph_rgw_docker_cpu_limit: 8
-#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
-#ceph_rgw_docker_cpuset_mems: "0"
+# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+# ceph_rgw_docker_cpuset_mems: "0"
#ceph_rgw_docker_extra_env:
#ceph_config_keys: [] # DON'T TOUCH ME
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_rgw_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_rgw_systemd_overrides:
+# Service:
+# PrivateDevices: false
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
-#configure_firewall: True
+#configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
#ceph_mon_firewall_zone: public
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
-#upgrade_ceph_packages: False
+#upgrade_ceph_packages: false
#ceph_use_distro_backports: false # DEBIAN ONLY
#ceph_directories_mode: "0755"
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
-#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
-#ceph_installation_dir: "/path/to/ceph_installation/"
+# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
-#use_installer: false
+# use_installer: false
# Root directory for ceph-ansible
-#ansible_dir: "/path/to/ceph-ansible"
+# ansible_dir: "/path/to/ceph-ansible"
######################
#ip_version: ipv4
#mon_host_v1:
-# enabled: True
+# enabled: true
# suffix: ':6789'
#mon_host_v2:
# suffix: ':3300'
-#enable_ceph_volume_debug: False
+#enable_ceph_volume_debug: false
##########
# CEPHFS #
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
-#common_single_host_mode: true
+# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
ceph_docker_image_tag: "latest"
ceph_docker_registry: "registry.redhat.io"
ceph_docker_registry_auth: true
-#ceph_docker_registry_username:
-#ceph_docker_registry_password:
-#ceph_docker_http_proxy:
-#ceph_docker_https_proxy:
+# ceph_docker_registry_username:
+# ceph_docker_registry_password:
+# ceph_docker_http_proxy:
+# ceph_docker_https_proxy:
#ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
#ceph_client_docker_image: "{{ ceph_docker_image }}"
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
-# pg_autoscale_mode: False
+# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
#############
# DASHBOARD #
#############
-#dashboard_enabled: True
+#dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
#dashboard_admin_user: admin
#dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
-#dashboard_admin_password: p@ssw0rd
+# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
#dashboard_crt: ''
#dashboard_key: ''
#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
#dashboard_rgw_api_user_id: ceph-dashboard
#dashboard_rgw_api_admin_resource: ''
-#dashboard_rgw_api_no_ssl_verify: False
+#dashboard_rgw_api_no_ssl_verify: false
#dashboard_frontend_vip: ''
#dashboard_disabled_features: []
#prometheus_frontend_vip: ''
#node_exporter_port: 9100
#grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
-#grafana_admin_password: admin
+# grafana_admin_password: admin
# We only need this for SSL (https) connections
#grafana_crt: ''
#grafana_key: ''
#grafana_plugins:
# - vonage-status-panel
# - grafana-piechart-panel
-#grafana_allow_embedding: True
+#grafana_allow_embedding: true
#grafana_port: 3000
#grafana_network: "{{ public_network }}"
#grafana_conf_overrides: {}
#prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
-#prometheus_storage_tsdb_retention_time: 15d
+# prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
#alertmanager_container_cpu_period: 100000
#alertmanager_container_cpu_cores: 2
#
# Example:
#
-#rbd_devices:
-# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+# rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
#rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
#
# Example:
#
-#client_connections:
-# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
-# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+# client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
#client_connections: {}
-#no_log_on_ceph_key_tasks: True
+#no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
-
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
#container_exec_cmd:
#docker: false
-#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
+#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
# Ensure that all monitors are present in the mons
# group in your inventory so that the ceph configuration file
# is created correctly for the new OSD(s).
-- hosts: mons
+- name: Pre-requisites operations for adding new monitor(s)
+ hosts: mons
gather_facts: false
vars:
delegate_facts_host: true
become: true
pre_tasks:
- - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
+ - name: Import raw_install_python tasks
+ ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- - import_role:
+
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
run_once: true
when: delegate_facts_host | bool
tasks:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-validate role
+ ansible.builtin.import_role:
name: ceph-validate
- - import_role:
+
+ - name: Import ceph-infra role
+ ansible.builtin.import_role:
name: ceph-infra
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
-- hosts: mons
+- name: Deploy Ceph monitors
+ hosts: mons
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mon role
+ ansible.builtin.import_role:
name: ceph-mon
- - import_role:
+
+ - name: Import ceph-crash role
+ ansible.builtin.import_role:
name: ceph-crash
when: containerized_deployment | bool
-# update config files on OSD nodes
-- hosts: osds
+- name: Update config file on OSD nodes
+ hosts: osds
gather_facts: true
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=backup -e target_node=mon01
# ansible-playbook -i hosts, backup-and-restore-ceph-files.yml -e backup_dir=/usr/share/ceph-ansible/backup-ceph-files -e mode=restore -e target_node=mon01
-- hosts: localhost
+- name: Backup and restore Ceph files
+ hosts: localhost
become: true
gather_facts: true
tasks:
- - name: exit playbook, if user did not set the source node
- fail:
+ - name: Exit playbook, if user did not set the source node
+ ansible.builtin.fail:
msg: >
"You must pass the node name: -e target_node=<inventory_name>.
The name must match what is set in your inventory."
- target_node is not defined
or target_node not in groups.get('all', [])
- - name: exit playbook, if user did not set the backup directory
- fail:
+ - name: Exit playbook, if user did not set the backup directory
+ ansible.builtin.fail:
msg: >
"you must pass the backup directory path: -e backup_dir=<backup directory path>"
when: backup_dir is not defined
- - name: exit playbook, if user did not set the playbook mode (backup|restore)
- fail:
+ - name: Exit playbook, if user did not set the playbook mode (backup|restore)
+ ansible.builtin.fail:
msg: >
"you must pass the mode: -e mode=<backup|restore>"
when:
- mode is not defined
or mode not in ['backup', 'restore']
- - name: gather facts on source node
- setup:
+ - name: Gather facts on source node
+ ansible.builtin.setup:
delegate_to: "{{ target_node }}"
delegate_facts: true
- - name: backup mode
+ - name: Backup mode
when: mode == 'backup'
block:
- - name: create a temp directory
+ - name: Create a temp directory
ansible.builtin.tempfile:
state: directory
suffix: ansible-archive-ceph
register: tmp_dir
delegate_to: "{{ target_node }}"
- - name: archive files
- archive:
+ - name: Archive files
+ community.general.archive:
path: "{{ item }}"
dest: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
format: tar
+ mode: "0644"
delegate_to: "{{ target_node }}"
loop:
- /etc/ceph
- /var/lib/ceph
- - name: create backup directory
+ - name: Create backup directory
become: false
- file:
+ ansible.builtin.file:
path: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}"
state: directory
+ mode: "0755"
- - name: backup files
- fetch:
+ - name: Backup files
+ ansible.builtin.fetch:
src: "{{ tmp_dir.path }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
- flat: yes
+ flat: true
loop:
- /etc/ceph
- /var/lib/ceph
delegate_to: "{{ target_node }}"
- - name: remove temp directory
- file:
+ - name: Remove temp directory
+ ansible.builtin.file:
path: "{{ tmp_dir.path }}"
state: absent
delegate_to: "{{ target_node }}"
- - name: restore mode
+ - name: Restore mode
when: mode == 'restore'
block:
- - name: unarchive files
+ - name: Unarchive files
ansible.builtin.unarchive:
src: "{{ backup_dir }}/{{ hostvars[target_node]['ansible_facts']['hostname'] }}/backup{{ item | replace('/', '-') }}.tar"
dest: "{{ item | dirname }}"
#
# It currently runs on localhost
-- hosts: localhost
+- name: CephX key management examples
+ hosts: localhost
gather_facts: false
vars:
cluster: ceph
- client.leseb1
- client.pythonnnn
keys_to_create:
- - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" }
- - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
- - { name: client.path, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
+ - { name: client.pythonnnn, caps: { mon: "allow rwx", mds: "allow *" }, mode: "0600" }
+ - { name: client.existpassss, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
+ - { name: client.path, caps: { mon: "allow r", osd: "allow *" }, mode: "0600" }
tasks:
- - name: create ceph key(s) module
+ - name: Create ceph key(s) module
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- - name: update ceph key(s)
+ - name: Update ceph key(s)
ceph_key:
name: "{{ item.name }}"
state: update
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_create }}"
- - name: delete ceph key(s)
+ - name: Delete ceph key(s)
ceph_key:
name: "{{ item }}"
state: absent
containerized: "{{ container_exec_cmd | default(False) }}"
with_items: "{{ keys_to_delete }}"
- - name: info ceph key(s)
+ - name: Info ceph key(s)
ceph_key:
name: "{{ item }}"
state: info
ignore_errors: true
with_items: "{{ keys_to_info }}"
- - name: list ceph key(s)
+ - name: List ceph key(s)
ceph_key:
state: list
cluster: "{{ cluster }}"
register: list_keys
ignore_errors: true
- - name: fetch_initial_keys
+ - name: Fetch_initial_keys # noqa: ignore-errors
ceph_key:
state: fetch_initial_keys
cluster: "{{ cluster }}"
# This playbook does a cephadm adopt for all the Ceph services
#
-- name: confirm whether user really meant to adopt the cluster by cephadm
+- name: Confirm whether user really meant to adopt the cluster by cephadm
hosts: localhost
connection: local
become: false
gather_facts: false
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to adopt the cluster by cephadm ?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if user did not mean to adopt the cluster by cephadm
- fail:
+ - name: Exit playbook, if user did not mean to adopt the cluster by cephadm
+ ansible.builtin.fail:
msg: >
Exiting cephadm-adopt playbook, cluster was NOT adopted.
To adopt the cluster, either say 'yes' on the prompt or
invoking the playbook
when: ireallymeanit != 'yes'
- - name: import_role ceph-defaults
- import_role:
+ - name: Import_role ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: check if a legacy grafana-server group exists
- import_role:
+ - name: Check if a legacy grafana-server group exists
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
-- name: gather facts and prepare system for cephadm
+- name: Gather facts and prepare system for cephadm
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
- any_errors_fatal: True
+ any_errors_fatal: true
gather_facts: false
vars:
delegate_facts_host: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
run_once: true
when: delegate_facts_host | bool
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: set_fact ceph_cmd
- set_fact:
+ - name: Set_fact ceph_cmd
+ ansible.builtin.set_fact:
ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:ro -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}"
- - name: check pools have an application enabled
- command: "{{ ceph_cmd }} health detail --format json"
+ - name: Check pools have an application enabled
+ ansible.builtin.command: "{{ ceph_cmd }} health detail --format json"
register: health_detail
run_once: true
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: check for POOL_APP_NOT_ENABLED warning
- fail:
+ - name: Check for POOL_APP_NOT_ENABLED warning
+ ansible.builtin.fail:
msg: "Make sure all your pool have an application enabled."
run_once: true
delegate_to: localhost
- (health_detail.stdout | default('{}', True) | from_json)['status'] == "HEALTH_WARN"
- "'POOL_APP_NOT_ENABLED' in (health_detail.stdout | default('{}', True) | from_json)['checks']"
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml
- when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0
+ when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
- - name: get the ceph version
- command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version"
+ - name: Get the ceph version
+ ansible.builtin.command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version"
changed_when: false
register: ceph_version_out
- - name: set_fact ceph_version
- set_fact:
+ - name: Set_fact ceph_version
+ ansible.builtin.set_fact:
ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}"
- - name: fail on pre octopus ceph releases
- fail:
+ - name: Fail on pre octopus ceph releases
+ ansible.builtin.fail:
msg: >
Your Ceph version {{ ceph_version }} is not supported for this operation.
Please upgrade your cluster with the rolling_update.yml playbook first.
when: ceph_version is version('15.2', '<')
- - name: check if it is atomic host
- stat:
+ - name: Check if it is atomic host
+ ansible.builtin.stat:
path: /run/ostree-booted
register: stat_ostree
- - name: set_fact is_atomic
- set_fact:
+ - name: Set_fact is_atomic
+ ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
when: not containerized_deployment | bool
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry.yml
when:
- not containerized_deployment | bool
- ceph_docker_registry_auth | bool
- - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
- command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ - name: Pulling Ceph container image
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: docker_image
until: docker_image.rc == 0
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, [])
- - name: configure repository for installing cephadm
+ - name: Configure repository for installing cephadm
when: containerized_deployment | bool
tags: with_pkg
block:
- - name: set_fact ceph_origin
- set_fact:
+ - name: Set_fact ceph_origin
+ ansible.builtin.set_fact:
ceph_origin: repository
when: ceph_origin == 'dummy'
- - name: set_fact ceph_repository
- set_fact:
+ - name: Set_fact ceph_repository
+ ansible.builtin.set_fact:
ceph_repository: community
when: ceph_repository == 'dummy'
- - name: validate repository variables
- import_role:
+ - name: Validate repository variables
+ ansible.builtin.import_role:
name: ceph-validate
tasks_from: check_repository.yml
- - name: configure repository
- import_role:
+ - name: Configure repository
+ ansible.builtin.import_role:
name: ceph-common
tasks_from: "configure_repository.yml"
- - name: install cephadm requirements
+ - name: Install cephadm requirements
tags: with_pkg
- package:
+ ansible.builtin.package:
name: ['python3', 'lvm2']
register: result
until: result is succeeded
- - name: install cephadm
+ - name: Install cephadm
tags: with_pkg
- package:
+ ansible.builtin.package:
name: cephadm
register: result
until: result is succeeded
- - name: install cephadm mgr module
+ - name: Install cephadm mgr module
tags: with_pkg
- package:
+ ansible.builtin.package:
name: ceph-mgr-cephadm
register: result
until: result is succeeded
- not containerized_deployment | bool
- mgr_group_name in group_names
- - name: get current fsid
- command: "{{ ceph_cmd }} fsid"
+ - name: Get current fsid
+ ansible.builtin.command: "{{ ceph_cmd }} fsid"
register: current_fsid
run_once: true
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: get a minimal ceph configuration
- command: "{{ ceph_cmd }} config generate-minimal-conf"
+ - name: Get a minimal ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config generate-minimal-conf"
register: minimal_config
run_once: true
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: set_fact fsid
- set_fact:
+ - name: Set_fact fsid
+ ansible.builtin.set_fact:
fsid: "{{ current_fsid.stdout }}"
run_once: true
- - name: enable cephadm mgr module
+ - name: Enable cephadm mgr module
ceph_mgr_module:
name: cephadm
cluster: "{{ cluster }}"
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set cephadm as orchestrator backend
- command: "{{ ceph_cmd }} orch set backend cephadm"
+ - name: Set cephadm as orchestrator backend
+ ansible.builtin.command: "{{ ceph_cmd }} orch set backend cephadm"
changed_when: false
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: check if there is an existing ssh keypair
- stat:
+ - name: Check if there is an existing ssh keypair
+ ansible.builtin.stat:
path: "{{ item }}"
loop:
- "{{ cephadm_ssh_priv_key_path }}"
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set fact
- set_fact:
+ - name: Set fact
+ ansible.builtin.set_fact:
stat_ssh_key_pair: "{{ ssh_keys.results | map(attribute='stat.exists') | list }}"
- - name: fail if either ssh public or private key is missing
- fail:
+ - name: Fail if either ssh public or private key is missing
+ ansible.builtin.fail:
msg: "One part of the ssh keypair of user {{ cephadm_ssh_user }} is missing"
when:
- false in stat_ssh_key_pair
- true in stat_ssh_key_pair
- - name: generate cephadm ssh key if there is none
- command: "{{ ceph_cmd }} cephadm generate-key"
+ - name: Generate cephadm ssh key if there is none
+ ansible.builtin.command: "{{ ceph_cmd }} cephadm generate-key"
when: not true in stat_ssh_key_pair
changed_when: false
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: use existing user keypair for remote connections
+ - name: Use existing user keypair for remote connections
when: not false in stat_ssh_key_pair
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- command: >
+ ansible.builtin.command: >
{{ container_binary + ' run --rm --net=host --security-opt label=disable
-v /etc/ceph:/etc/ceph:z
-v /var/lib/ceph:/var/lib/ceph:ro
-v /var/run/ceph:/var/run/ceph:z
-v ' + item.1 + ':/etc/ceph/cephadm.' + item.0 + ':ro --entrypoint=ceph '+ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}
--cluster {{ cluster }} cephadm set-{{ item.0 }}-key -i /etc/ceph/cephadm.{{ item.0 }}
+ changed_when: false
with_together:
- - [ 'pub', 'priv' ]
- - [ '{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}' ]
+ - ['pub', 'priv']
+ - ['{{ cephadm_ssh_pub_key_path }}', '{{ cephadm_ssh_priv_key_path }}']
- - name: get the cephadm ssh pub key
- command: "{{ ceph_cmd }} cephadm get-pub-key"
+ - name: Get the cephadm ssh pub key
+ ansible.builtin.command: "{{ ceph_cmd }} cephadm get-pub-key"
changed_when: false
run_once: true
register: cephadm_pubpkey
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: allow cephadm key for {{ cephadm_ssh_user }} account
- authorized_key:
+ - name: Allow cephadm key
+ ansible.posix.authorized_key:
user: "{{ cephadm_ssh_user }}"
key: '{{ cephadm_pubpkey.stdout }}'
- - name: set cephadm ssh user to {{ cephadm_ssh_user }}
- command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}"
+ - name: Set cephadm ssh user to {{ cephadm_ssh_user }}
+ ansible.builtin.command: "{{ ceph_cmd }} cephadm set-user {{ cephadm_ssh_user }}"
changed_when: false
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: run cephadm prepare-host
- command: cephadm prepare-host
+ - name: Run cephadm prepare-host
+ ansible.builtin.command: cephadm prepare-host
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set default container image in ceph configuration
- command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ - name: Set default container image in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set container image base in ceph configuration
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
+ - name: Set container image base in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
changed_when: false
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set dashboard container image in ceph mgr configuration
+ - name: Set dashboard container image in ceph mgr configuration
when: dashboard_enabled | bool
run_once: true
block:
- - name: set alertmanager container image in ceph configuration
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
+ - name: Set alertmanager container image in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set grafana container image in ceph configuration
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
+ - name: Set grafana container image in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set node-exporter container image in ceph configuration
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
+ - name: Set node-exporter container image in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: set prometheus container image in ceph configuration
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
+ - name: Set prometheus container image in ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- - name: enable the osd memory autotune for hci environment
- command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true"
+ - name: Enable the osd memory autotune for hci environment
+ ansible.builtin.command: "{{ ceph_cmd }} config set osd osd_memory_target_autotune true"
changed_when: false
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
when: is_hci | bool
- - name: set autotune_memory_target_ratio
- command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}"
+ - name: Set autotune_memory_target_ratio
+ ansible.builtin.command: "{{ ceph_cmd }} config set mgr mgr/cephadm/autotune_memory_target_ratio {{ '0.2' if is_hci | bool else '0.7' }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: manage nodes with cephadm - ipv4
- command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
+ - name: Manage nodes with cephadm - ipv4
+ ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | first }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv4
- - name: manage nodes with cephadm - ipv6
- command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
+ - name: Manage nodes with cephadm - ipv6
+ ansible.builtin.command: "{{ ceph_cmd }} orch host add {{ ansible_facts['nodename'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(cephadm_mgmt_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | intersect(adopt_label_group_names) | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: cephadm_mgmt_network.split(',')[0] is ansible.utils.ipv6
- - name: add ceph label for core component
- command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph"
+ - name: Add ceph label for core component
+ ansible.builtin.command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['nodename'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, [])
- - name: get the client.admin keyring
+ - name: Get the client.admin keyring
ceph_key:
name: client.admin
cluster: "{{ cluster }}"
delegate_to: '{{ groups[mon_group_name][0] }}'
register: client_admin_keyring
- - name: copy the client.admin keyring
- copy:
+ - name: Copy the client.admin keyring
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ client_admin_keyring.stdout + '\n' }}"
owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
- "{{ groups.get(mgr_group_name, []) }}"
- "{{ groups.get(rbdmirror_group_name, []) }}"
- - name: assimilate ceph configuration
- command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf"
+ - name: Assimilate ceph configuration
+ ansible.builtin.command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf"
changed_when: false
when: inventory_hostname in groups.get(mon_group_name, []) or
inventory_hostname in groups.get(osd_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, [])
- - name: set_fact cephadm_cmd
- set_fact:
+ - name: Set_fact cephadm_cmd
+ ansible.builtin.set_fact:
cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
- - name: set container registry info
- command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}"
+ - name: Set container registry info
+ ansible.builtin.command: "{{ ceph_cmd }} cephadm registry-login {{ ceph_docker_registry }} {{ ceph_docker_registry_username }} {{ ceph_docker_registry_password }}"
changed_when: false
no_log: true
run_once: true
delegate_to: '{{ groups[mon_group_name][0] }}'
when: ceph_docker_registry_auth | bool
- - name: remove logrotate configuration
- file:
+ - name: Remove logrotate configuration
+ ansible.builtin.file:
path: /etc/logrotate.d/ceph
state: absent
when: inventory_hostname in groups.get(mon_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: store existing rbd mirror peers in monitor config store
+- name: Store existing rbd mirror peers in monitor config store
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
any_errors_fatal: true
gather_facts: true
tasks:
- - name: store existing rbd mirror peers in monitor config store
+ - name: Store existing rbd mirror peers in monitor config store
when:
- ceph_rbd_mirror_configure | default(True) | bool
- ceph_rbd_mirror_remote_user is defined
- ceph_rbd_mirror_remote_cluster is defined
block:
- - name: import ceph-defaults
- import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: import ceph-validate
- import_role:
+ - name: Import ceph-validate
+ ansible.builtin.import_role:
name: ceph-validate
tasks_from: check_rbdmirror.yml
- - name: import container_binary
- import_role:
+ - name: Import container_binary
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: set_fact rbd_cmd
- set_fact:
+ - name: Set_fact rbd_cmd
+ ansible.builtin.set_fact:
rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }} -n client.rbd-mirror.{{ ansible_facts['hostname'] }} -k /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
- - name: set_fact admin_rbd_cmd
- set_fact:
+ - name: Set_fact admin_rbd_cmd
+ ansible.builtin.set_fact:
admin_rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}"
- - name: get mirror pool info
- command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json"
+ - name: Get mirror pool info
+ ansible.builtin.command: "{{ rbd_cmd }} mirror pool info {{ ceph_rbd_mirror_pool }} --format json"
register: mirror_pool_info
changed_when: false
- - name: set_fact mirror_peer_found
- set_fact:
- mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list) }}"
+ - name: Set_fact mirror_peer_found
+ ansible.builtin.set_fact:
+ mirror_peer_uuid: "{{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list) }}"
- - name: remove current rbd mirror peer, add new peer into mon config store
+ - name: Remove current rbd mirror peer, add new peer into mon config store
when: mirror_peer_uuid | length > 0
block:
- - name: get remote user keyring
- slurp:
+ - name: Get remote user keyring
+ ansible.builtin.slurp:
src: "/etc/ceph/{{ ceph_rbd_mirror_remote_cluster }}.{{ ceph_rbd_mirror_remote_user }}.keyring"
register: remote_user_keyring
- - name: get quorum_status
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
+ - name: Get quorum_status
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
register: quorum_status
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set_fact mon_ip_list
- set_fact:
+ - name: Set_fact mon_ip_list
+ ansible.builtin.set_fact:
mon_ip_list: "{{ mon_ip_list | default([]) | union([item['addr'].split(':')[0]]) }}"
loop: "{{ (quorum_status.stdout | default('{}') | from_json)['monmap']['mons'] }}"
run_once: true
- - name: remove current mirror peer
- command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^'+ceph_rbd_mirror_remote_cluster+'$') | map(attribute='uuid') | list)[0] }}"
+ - name: Remove current mirror peer
+ ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer remove {{ ceph_rbd_mirror_pool }} {{ ((mirror_pool_info.stdout | default('{}') | from_json)['peers'] | selectattr('site_name', 'match', '^' + ceph_rbd_mirror_remote_cluster + '$') | map(attribute='uuid') | list)[0] }}"
delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
changed_when: false
- - name: get remote user keyring secret
- set_fact:
+ - name: Get remote user keyring secret
+ ansible.builtin.set_fact:
remote_user_keyring_secret: "{{ item.split('=', 1)[1] | trim }}"
with_items: "{{ (remote_user_keyring.content | b64decode).split('\n') }}"
when: "'key = ' in item"
- - name: create a temporary file
- tempfile:
+ - name: Create a temporary file
+ ansible.builtin.tempfile:
path: /etc/ceph
state: file
suffix: _ceph-ansible
register: tmp_file
delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
- - name: write secret to temporary file
- copy:
+ - name: Write secret to temporary file
+ ansible.builtin.copy:
dest: "{{ tmp_file.path }}"
content: "{{ remote_user_keyring_secret }}"
+ mode: preserve
delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
- - name: re-add mirror peer
- command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}"
+ - name: Re-add mirror peer
+ ansible.builtin.command: "{{ admin_rbd_cmd }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ','.join(mon_ip_list) }} --remote-key-file {{ tmp_file.path }}"
delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
changed_when: false
- - name: rm temporary file
- file:
+ - name: Rm temporary file
+ ansible.builtin.file:
path: "{{ tmp_file.path }}"
state: absent
delegate_to: "{{ groups.get(mon_group_name | default('mons'))[0] }}"
-- name: adopt ceph mon daemons
+- name: Adopt ceph mon daemons
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: adopt mon daemon
+ - name: Adopt mon daemon
cephadm_adopt:
name: "mon.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
pull: false
firewalld: "{{ true if configure_firewall | bool else false }}"
- - name: reset failed ceph-mon systemd unit
- command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303
+ - name: Reset failed ceph-mon systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
when: containerized_deployment | bool
- - name: remove ceph-mon systemd files
- file:
+ - name: Remove ceph-mon systemd files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-mon@.service.d
- /etc/systemd/system/ceph-mon.target
- - name: waiting for the monitor to join the quorum...
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
+ - name: Waiting for the monitor to join the quorum...
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph quorum_status --format json"
changed_when: false
register: ceph_health_raw
until: >
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: adopt ceph mgr daemons
+- name: Adopt ceph mgr daemons
hosts: "{{ groups['mgrs'] | default(groups['mons']) | default(omit) }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: adopt mgr daemon
+ - name: Adopt mgr daemon
cephadm_adopt:
name: "mgr.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
pull: false
firewalld: "{{ true if configure_firewall | bool else false }}"
- - name: reset failed ceph-mgr systemd unit
- command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303
+ - name: Reset failed ceph-mgr systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
when: containerized_deployment | bool
- - name: remove ceph-mgr systemd files
- file:
+ - name: Remove ceph-mgr systemd files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-mgr.target
-- name: stop and remove legacy iscsigw daemons
+- name: Stop and remove legacy iscsigw daemons
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop and disable iscsigw systemd services
- service:
+ - name: Stop and disable iscsigw systemd services
+ ansible.builtin.service:
name: '{{ item }}'
state: stopped
enabled: false
- rbd-target-gw
- tcmu-runner
- - name: reset failed iscsigw systemd units
- command: 'systemctl reset-failed {{ item }}' # noqa 303
+ - name: Reset failed iscsigw systemd units
+ ansible.builtin.command: 'systemctl reset-failed {{ item }}' # noqa command-instead-of-module
changed_when: false
failed_when: false
with_items:
- tcmu-runner
when: containerized_deployment | bool
- - name: remove iscsigw systemd unit files
- file:
+ - name: Remove iscsigw systemd unit files
+ ansible.builtin.file:
path: '/etc/systemd/system/{{ item }}.service'
state: absent
with_items:
when: containerized_deployment | bool
-- name: redeploy iscsigw daemons
+- name: Redeploy iscsigw daemons
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: update the placement of iscsigw hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'"
+ - name: Update the placement of iscsigw hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'"
run_once: true
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: set osd flags
+- name: Set osd flags
hosts: "{{ osd_group_name|default('osds') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: get pool list
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ - name: Get pool list
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
check_mode: false
- - name: get balancer module status
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ - name: Get balancer module status
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_adopt
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
check_mode: false
- - name: set_fact pools_pgautoscaler_mode
- set_fact:
+ - name: Set_fact pools_pgautoscaler_mode
+ ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
run_once: true
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- - name: disable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ - name: Disable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status_adopt.stdout | from_json)['active'] | bool
- - name: disable pg autoscale on pools
+ - name: Disable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: set osd flags
+ - name: Set osd flags
ceph_osd_flag:
cluster: "{{ cluster }}"
name: "{{ item }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: adopt ceph osd daemons
+- name: Adopt ceph osd daemons
hosts: "{{ osd_group_name|default('osd') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
when: containerized_deployment | bool
- - name: get osd list
+ - name: Get osd list
ceph_volume:
cluster: "{{ cluster }}"
action: list
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: osd_list
- - name: set osd fsid for containerized deployment
- lineinfile:
+ - name: Set osd fsid for containerized deployment
+ ansible.builtin.lineinfile:
path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid'
line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}"
owner: '{{ ceph_uid }}'
group: '{{ ceph_uid }}'
create: true
+ mode: "0644"
with_dict: '{{ osd_list.stdout | from_json }}'
when: containerized_deployment | bool
- - name: set osd type for containerized deployment
- lineinfile:
+ - name: Set osd type for containerized deployment
+ ansible.builtin.lineinfile:
path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type'
line: 'bluestore'
owner: '{{ ceph_uid }}'
group: '{{ ceph_uid }}'
create: true
+ mode: "0644"
loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
when: containerized_deployment | bool
- - name: adopt osd daemon
+ - name: Adopt osd daemon
cephadm_adopt:
name: "osd.{{ item }}"
cluster: "{{ cluster }}"
firewalld: "{{ true if configure_firewall | bool else false }}"
loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
- - name: remove ceph-osd systemd and ceph-osd-run.sh files
- file:
+ - name: Remove ceph-osd systemd and ceph-osd-run.sh files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-osd.target
- "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
- - name: remove osd directory
- file:
+ - name: Remove osd directory
+ ansible.builtin.file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
state: absent
loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
- - name: remove any legacy directories in /var/lib/ceph/mon (workaround)
- file:
+ - name: Remove any legacy directories in /var/lib/ceph/mon (workaround)
+ ansible.builtin.file:
path: "/var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}"
state: absent
- - name: waiting for clean pgs...
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json"
+ - name: Waiting for clean pgs...
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph pg stat --format json"
changed_when: false
register: ceph_health_post
until: >
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: unset osd flags
+- name: Unset osd flags
hosts: "{{ osd_group_name|default('osds') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: re-enable pg autoscale on pools
+ - name: Re-enable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: unset osd flags
+ - name: Unset osd flags
ceph_osd_flag:
cluster: "{{ cluster }}"
name: "{{ item }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: re-enable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ - name: Re-enable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status_adopt.stdout | from_json)['active'] | bool
-- name: redeploy mds daemons
+- name: Redeploy mds daemons
hosts: "{{ mds_group_name|default('mdss') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: update the placement of metadata hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'"
+ - name: Update the placement of metadata hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'"
run_once: true
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: stop and remove legacy ceph mds daemons
+- name: Stop and remove legacy ceph mds daemons
hosts: "{{ mds_group_name|default('mdss') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop and disable ceph-mds systemd service
- service:
+ - name: Stop and disable ceph-mds systemd service
+ ansible.builtin.service:
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
- - name: stop and disable ceph-mds systemd target
- service:
+ - name: Stop and disable ceph-mds systemd target
+ ansible.builtin.service:
name: ceph-mds.target
state: stopped
enabled: false
failed_when: false
- - name: reset failed ceph-mds systemd unit
- command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303
+ - name: Reset failed ceph-mds systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
when: containerized_deployment | bool
- - name: remove ceph-mds systemd files
- file:
+ - name: Remove ceph-mds systemd files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-mds@.service.d
- /etc/systemd/system/ceph-mds.target
- - name: remove legacy ceph mds data
- file:
+ - name: Remove legacy ceph mds data
+ ansible.builtin.file:
path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
state: absent
-- name: redeploy rgw daemons
+- name: Redeploy rgw daemons
hosts: "{{ rgw_group_name | default('rgws') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
- - name: import rgw ssl certificate into kv store
+ - name: Import rgw ssl certificate into kv store
when: radosgw_frontend_ssl_certificate | length > 0
block:
- - name: slurp rgw ssl certificate
- slurp:
+ - name: Slurp rgw ssl certificate
+ ansible.builtin.slurp:
src: "{{ radosgw_frontend_ssl_certificate }}"
register: rgw_ssl_cert
- - name: store ssl certificate in kv store
- command: >
+ - name: Store ssl certificate in kv store
+ ansible.builtin.command: >
{{ container_binary }} run --rm -i -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }}
config-key set rgw/cert/rgw.{{ ansible_facts['hostname'] }} -i -
args:
stdin: "{{ rgw_ssl_cert.content | b64decode }}"
- stdin_add_newline: no
+ stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set_fact rgw_subnet
- set_fact:
+ - name: Set_fact rgw_subnet
+ ansible.builtin.set_fact:
rgw_subnet: "--networks {{ radosgw_address_block }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
- - name: update the placement of radosgw hosts
- command: >
+ - name: Update the placement of radosgw hosts
+ ansible.builtin.command: >
{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} --
ceph orch apply rgw {{ ansible_facts['hostname'] }}
--placement='count-per-host:{{ radosgw_num_instances }} {{ ansible_facts['nodename'] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: stop and remove legacy ceph rgw daemons
+- name: Stop and remove legacy ceph rgw daemons
hosts: "{{ rgw_group_name|default('rgws') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
- - name: stop and disable ceph-radosgw systemd service
- service:
+ - name: Stop and disable ceph-radosgw systemd service
+ ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: false
failed_when: false
loop: '{{ rgw_instances }}'
- - name: stop and disable ceph-radosgw systemd target
- service:
+ - name: Stop and disable ceph-radosgw systemd target
+ ansible.builtin.service:
name: ceph-radosgw.target
state: stopped
enabled: false
failed_when: false
- - name: reset failed ceph-radosgw systemd unit
- command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303
+ - name: Reset failed ceph-radosgw systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
loop: '{{ rgw_instances }}'
when: containerized_deployment | bool
- - name: remove ceph-radosgw systemd files
- file:
+ - name: Remove ceph-radosgw systemd files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-radosgw@.service.d
- /etc/systemd/system/ceph-radosgw.target
- - name: remove legacy ceph radosgw data
- file:
+ - name: Remove legacy ceph radosgw data
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: absent
loop: '{{ rgw_instances }}'
- - name: remove legacy ceph radosgw directory
- file:
+ - name: Remove legacy ceph radosgw directory
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
state: absent
-- name: stop and remove legacy ceph nfs daemons
+- name: Stop and remove legacy ceph nfs daemons
hosts: "{{ nfs_group_name|default('nfss') }}"
tags: 'ceph_nfs_adopt'
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
name: ceph-nfs
tasks_from: create_rgw_nfs_user.yml
- - name: enable ceph mgr nfs module
+ - name: Enable ceph mgr nfs module
ceph_mgr_module:
name: "nfs"
cluster: "{{ cluster }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: stop and disable ceph-nfs systemd service
- service:
+ - name: Stop and disable ceph-nfs systemd service
+ ansible.builtin.service:
name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
- - name: reset failed ceph-nfs systemd unit
- command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa 303
+ - name: Reset failed ceph-nfs systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
when: containerized_deployment | bool
- - name: remove ceph-nfs systemd unit files
- file:
+ - name: Remove ceph-nfs systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-nfs@.service
- /etc/systemd/system/ceph-nfs@.service.d
- - name: remove legacy ceph radosgw directory
- file:
+ - name: Remove legacy ceph radosgw directory
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
state: absent
- - name: create nfs ganesha cluster
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}"
+ - name: Create nfs ganesha cluster
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs cluster create {{ ansible_facts['hostname'] }} {{ ansible_facts['hostname'] }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: create cephfs export
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}"
+ - name: Create cephfs export
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create cephfs {{ cephfs }} {{ ansible_facts['hostname'] }} {{ ceph_nfs_ceph_pseudo_path }} --squash {{ ceph_nfs_ceph_squash }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: nfs_file_gw | bool
- - name: create rgw export
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}"
+ - name: Create rgw export
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph nfs export create rgw --cluster-id {{ ansible_facts['hostname'] }} --pseudo-path {{ ceph_nfs_rgw_pseudo_path }} --user-id {{ ceph_nfs_rgw_user }} --squash {{ ceph_nfs_rgw_squash }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: nfs_obj_gw | bool
-- name: redeploy rbd-mirror daemons
+- name: Redeploy rbd-mirror daemons
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: update the placement of rbd-mirror hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'"
+ - name: Update the placement of rbd-mirror hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'"
run_once: true
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: stop and remove legacy rbd-mirror daemons
+- name: Stop and remove legacy rbd-mirror daemons
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop and disable rbd-mirror systemd service
- service:
+ - name: Stop and disable rbd-mirror systemd service
+ ansible.builtin.service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
- - name: stop and disable rbd-mirror systemd target
- service:
+ - name: Stop and disable rbd-mirror systemd target
+ ansible.builtin.service:
name: ceph-rbd-mirror.target
state: stopped
enabled: false
failed_when: false
- - name: reset failed rbd-mirror systemd unit
- command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303
+ - name: Reset failed rbd-mirror systemd unit
+ ansible.builtin.command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
when: containerized_deployment | bool
- - name: remove rbd-mirror systemd files
- file:
+ - name: Remove rbd-mirror systemd files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/ceph-rbd-mirror.target
-- name: redeploy ceph-crash daemons
+- name: Redeploy ceph-crash daemons
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ rbdmirror_group_name|default('rbdmirrors') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop and disable ceph-crash systemd service
- service:
+ - name: Stop and disable ceph-crash systemd service
+ ansible.builtin.service:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
enabled: false
failed_when: false
- - name: remove ceph-crash systemd unit file
- file:
+ - name: Remove ceph-crash systemd unit file
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-crash@.service
state: absent
- - name: update the placement of ceph-crash hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'"
+ - name: Update the placement of ceph-crash hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply crash --placement='label:ceph'"
run_once: true
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: redeploy alertmanager/grafana/prometheus daemons
+- name: Redeploy alertmanager/grafana/prometheus daemons
hosts: "{{ monitoring_group_name|default('monitoring') }}"
serial: 1
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: check whether a ceph config file is present
- stat:
+ - name: Check whether a ceph config file is present
+ ansible.builtin.stat:
path: "/etc/ceph/{{ cluster }}.conf"
register: ceph_config
- - name: ensure /etc/ceph is present
- file:
+ - name: Ensure /etc/ceph is present
+ ansible.builtin.file:
path: /etc/ceph
state: directory
owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_directories_mode }}"
- - name: write a ceph.conf with minimal config
- copy:
+ - name: Write a ceph.conf with minimal config
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.conf"
content: "{{ minimal_config.stdout }}"
owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when: not ceph_config.stat.exists | bool
- - name: with dashboard enabled
+ - name: With dashboard enabled
when: dashboard_enabled | bool
block:
- - name: ensure alertmanager/prometheus data directories are present
- file:
+ - name: Ensure alertmanager/prometheus data directories are present
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
+ mode: "0755"
with_items:
- - "{{ alertmanager_data_dir }}"
- - "{{ prometheus_data_dir }}"
+ - "{{ alertmanager_data_dir }}"
+ - "{{ prometheus_data_dir }}"
# (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service
- - name: stop and disable alertmanager systemd unit
- service:
+ - name: Stop and disable alertmanager systemd unit
+ ansible.builtin.service:
name: alertmanager
state: stopped
enabled: false
failed_when: false
# (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml
- - name: create alertmanager config symlink
- file:
+ - name: Create alertmanager config symlink
+ ansible.builtin.file:
path: /etc/prometheus/alertmanager.yml
src: '{{ alertmanager_conf_dir }}/alertmanager.yml'
state: link
# (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/
- - name: create alertmanager data symlink
- file:
+ - name: Create alertmanager data symlink
+ ansible.builtin.file:
path: '{{ prometheus_data_dir }}/alertmanager'
src: '{{ alertmanager_data_dir }}'
state: link
- - name: adopt alertmanager daemon
+ - name: Adopt alertmanager daemon
cephadm_adopt:
name: "alertmanager.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
pull: false
firewalld: "{{ true if configure_firewall | bool else false }}"
- - name: remove alertmanager systemd unit file
- file:
+ - name: Remove alertmanager systemd unit file
+ ansible.builtin.file:
path: /etc/systemd/system/alertmanager.service
state: absent
- - name: remove the legacy alertmanager data
- file:
+ - name: Remove the legacy alertmanager data
+ ansible.builtin.file:
path: '{{ alertmanager_data_dir }}'
state: absent
- - name: stop and disable prometheus systemd unit
- service:
+ - name: Stop and disable prometheus systemd unit
+ ansible.builtin.service:
name: prometheus
state: stopped
enabled: false
failed_when: false
- - name: remove alertmanager data symlink
- file:
+ - name: Remove alertmanager data symlink
+ ansible.builtin.file:
path: '{{ prometheus_data_dir }}/alertmanager'
state: absent
# (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
- - name: tmp copy the prometheus data
- copy:
+ - name: Tmp copy the prometheus data
+ ansible.builtin.copy:
src: '{{ prometheus_data_dir }}/'
dest: /var/lib/prom_metrics
owner: 65534
group: 65534
+ mode: preserve
remote_src: true
# (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
- - name: restore the prometheus data
- copy:
+ - name: Restore the prometheus data
+ ansible.builtin.copy:
src: /var/lib/prom_metrics/
dest: /var/lib/prometheus/metrics
owner: 65534
group: 65534
+ mode: preserve
remote_src: true
- - name: remove the tmp prometheus data copy
- file:
+ - name: Remove the tmp prometheus data copy
+ ansible.builtin.file:
path: /var/lib/prom_metrics
state: absent
- - name: adopt prometheus daemon
+ - name: Adopt prometheus daemon
cephadm_adopt:
name: "prometheus.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
pull: false
firewalld: "{{ true if configure_firewall | bool else false }}"
- - name: remove prometheus systemd unit file
- file:
+ - name: Remove prometheus systemd unit file
+ ansible.builtin.file:
path: /etc/systemd/system/prometheus.service
state: absent
- - name: remove the legacy prometheus data
- file:
+ - name: Remove the legacy prometheus data
+ ansible.builtin.file:
path: '{{ prometheus_data_dir }}'
state: absent
# (workaround) cephadm adopt grafana only stops grafana systemd service
- - name: stop and disable grafana systemd unit
- service:
+ - name: Stop and disable grafana systemd unit
+ ansible.builtin.service:
name: grafana-server
state: stopped
enabled: false
failed_when: false
- - name: adopt grafana daemon
+ - name: Adopt grafana daemon
cephadm_adopt:
name: "grafana.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
pull: false
firewalld: "{{ true if configure_firewall | bool else false }}"
- - name: remove grafana systemd unit file
- file:
+ - name: Remove grafana systemd unit file
+ ansible.builtin.file:
path: /etc/systemd/system/grafana-server.service
state: absent
- - name: remove the legacy grafana data
- file:
+ - name: Remove the legacy grafana data
+ ansible.builtin.file:
path: /var/lib/grafana
state: absent
-- name: redeploy node-exporter daemons
+- name: Redeploy node-exporter daemons
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: with dashboard enabled
+ - name: With dashboard enabled
when: dashboard_enabled | bool
block:
- - name: stop and disable node-exporter systemd service
- service:
+ - name: Stop and disable node-exporter systemd service
+ ansible.builtin.service:
name: node_exporter
state: stopped
enabled: false
failed_when: false
- - name: remove node_exporter systemd unit file
- file:
+ - name: Remove node_exporter systemd unit file
+ ansible.builtin.file:
path: /etc/systemd/system/node_exporter.service
state: absent
- - name: update the placement of node-exporter hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'"
+ - name: Update the placement of node-exporter hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply node-exporter --placement='*'"
run_once: true
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: adjust placement daemons
+- name: Adjust placement daemons
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: update the placement of monitor hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'"
+ - name: Update the placement of monitor hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set_fact mgr_placement
- set_fact:
+ - name: Set_fact mgr_placement
+ ansible.builtin.set_fact:
mgr_placement_count: "{{ groups.get(mgr_group_name, []) | length if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name, []) | length }}"
- - name: set_fact mgr_placement_label
- set_fact:
+ - name: Set_fact mgr_placement_label
+ ansible.builtin.set_fact:
mgr_placement_label: "{{ mgr_group_name if groups.get(mgr_group_name, []) | length > 0 else mon_group_name }}"
- - name: update the placement of manager hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'"
+ - name: Update the placement of manager hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply mgr --placement='{{ mgr_placement_count }} label:{{ mgr_placement_label }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: with dashboard enabled
+ - name: With dashboard enabled
when: dashboard_enabled | bool
block:
- - name: update the placement of alertmanager hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ - name: Update the placement of alertmanager hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of grafana hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ - name: Update the placement of grafana hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of prometheus hosts
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
+ - name: Update the placement of prometheus hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: show ceph orchestrator status
+- name: Show ceph orchestrator status
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
- any_errors_fatal: True
+ any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: show ceph orchestrator services
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh"
+ - name: Show ceph orchestrator services
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ls --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: show ceph orchestrator daemons
- command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh"
+ - name: Show ceph orchestrator daemons
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -k /etc/ceph/{{ cluster }}.client.admin.keyring --fsid {{ fsid }} -- ceph orch ps --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: inform users about cephadm
- debug:
+ - name: Inform users about cephadm
+ ansible.builtin.debug:
msg: |
This Ceph cluster is now managed by cephadm. Any new changes to the
cluster need to be achieved by using the cephadm CLI and you don't
---
-- name: gather facts and prepare system for cephadm
+- name: Gather facts and prepare system for cephadm
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
vars:
delegate_facts_host: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: validate if monitor group doesn't exist or empty
- fail:
+ - name: Validate if monitor group doesn't exist or empty
+ ansible.builtin.fail:
msg: "you must add a [mons] group and add at least one node."
run_once: true
when: groups[mon_group_name] is undefined or groups[mon_group_name] | length == 0
- - name: validate if manager group doesn't exist or empty
- fail:
+ - name: Validate if manager group doesn't exist or empty
+ ansible.builtin.fail:
msg: "you must add a [mgrs] group and add at least one node."
run_once: true
when: groups[mgr_group_name] is undefined or groups[mgr_group_name] | length == 0
- - name: validate monitor network configuration
- fail:
+ - name: Validate monitor network configuration
+ ansible.builtin.fail:
msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
when:
- mon_group_name in group_names
- monitor_address_block == 'subnet'
- monitor_interface == 'interface'
- - name: validate dashboard configuration
+ - name: Validate dashboard configuration
when: dashboard_enabled | bool
run_once: true
block:
- - name: fail if [monitoring] group doesn't exist or empty
- fail:
+ - name: Fail if [monitoring] group doesn't exist or empty
+ ansible.builtin.fail:
msg: "you must add a [monitoring] group and add at least one node."
when: groups[monitoring_group_name] is undefined or groups[monitoring_group_name] | length == 0
- - name: fail when dashboard_admin_password is not set
- fail:
+ - name: Fail when dashboard_admin_password is not set
+ ansible.builtin.fail:
msg: "you must set dashboard_admin_password."
when: dashboard_admin_password is undefined
- - name: validate container registry credentials
- fail:
+ - name: Validate container registry credentials
+ ansible.builtin.fail:
msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
when:
- ceph_docker_registry_auth | bool
- (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
(ceph_docker_registry_username | length == 0 or ceph_docker_registry_password | length == 0)
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
run_once: true
when: delegate_facts_host | bool
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: check if it is atomic host
- stat:
+ - name: Check if it is atomic host
+ ansible.builtin.stat:
path: /run/ostree-booted
register: stat_ostree
- - name: set_fact is_atomic
- set_fact:
+ - name: Set_fact is_atomic
+ ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry.yml
when: ceph_docker_registry_auth | bool
- - name: configure repository for installing cephadm
+ - name: Configure repository for installing cephadm
vars:
ceph_origin: repository
ceph_repository: community
block:
- - name: validate repository variables
- import_role:
+ - name: Validate repository variables
+ ansible.builtin.import_role:
name: ceph-validate
tasks_from: check_repository.yml
- - name: configure repository
- import_role:
+ - name: Configure repository
+ ansible.builtin.import_role:
name: ceph-common
tasks_from: "configure_repository.yml"
- - name: install cephadm requirements
- package:
+ - name: Install cephadm requirements
+ ansible.builtin.package:
name: ['python3', 'lvm2']
register: result
until: result is succeeded
- - name: install cephadm
- package:
+ - name: Install cephadm
+ ansible.builtin.package:
name: cephadm
register: result
until: result is succeeded
- - name: set_fact cephadm_cmd
- set_fact:
+ - name: Set_fact cephadm_cmd
+ ansible.builtin.set_fact:
cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"
-- name: bootstrap the cluster
+- name: Bootstrap the cluster
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_monitor_address.yml
- - name: create /etc/ceph directory
- file:
+ - name: Create /etc/ceph directory
+ ansible.builtin.file:
path: /etc/ceph
state: directory
+ mode: "0755"
- - name: bootstrap the new cluster
+ - name: Bootstrap the new cluster
cephadm_bootstrap:
mon_ip: "{{ _current_monitor_address }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
ssh_user: "{{ cephadm_ssh_user | default('root') }}"
ssh_config: "{{ cephadm_ssh_config | default(omit) }}"
- - name: set default container image in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ - name: Set default container image in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set container image base in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
+ - name: Set container image base in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set dashboard container image in ceph mgr configuration
+ - name: Set dashboard container image in ceph mgr configuration
when: dashboard_enabled | bool
block:
- - name: set alertmanager container image in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
+ - name: Set alertmanager container image in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set grafana container image in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
+ - name: Set grafana container image in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set node-exporter container image in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
+ - name: Set node-exporter container image in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: set prometheus container image in ceph configuration
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
+ - name: Set prometheus container image in ceph configuration
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: add the other nodes
+- name: Add the other nodes
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: get the cephadm ssh pub key
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
+ - name: Get the cephadm ssh pub key
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} cephadm get-pub-key"
changed_when: false
run_once: true
register: cephadm_pubpkey
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: allow cephadm key for {{ cephadm_ssh_user | default('root') }} account
- authorized_key:
+ - name: Allow cephadm key
+ ansible.posix.authorized_key:
user: "{{ cephadm_ssh_user | default('root') }}"
key: '{{ cephadm_pubpkey.stdout }}'
- - name: run cephadm prepare-host
- command: cephadm prepare-host
+ - name: Run cephadm prepare-host
+ ansible.builtin.command: cephadm prepare-host
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: manage nodes with cephadm - ipv4
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
+ - name: Manage nodes with cephadm - ipv4
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv4'
- - name: manage nodes with cephadm - ipv6
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
+ - name: Manage nodes with cephadm - ipv6
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ansible.utils.ipwrap }} {{ group_names | join(' ') }} {{ '_admin' if mon_group_name | default('mons') in group_names else '' }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
when: ip_version == 'ipv6'
- - name: add ceph label for core component
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
+ - name: Add ceph label for core component
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: adjust service placement
+- name: Adjust service placement
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: update the placement of monitor hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
+ - name: Update the placement of monitor hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mon --placement='label:{{ mon_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: waiting for the monitor to join the quorum...
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
+ - name: Waiting for the monitor to join the quorum...
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} quorum_status --format json"
changed_when: false
register: ceph_health_raw
until: (ceph_health_raw.stdout | from_json)["quorum_names"] | length == groups.get(mon_group_name, []) | length
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of manager hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
+ - name: Update the placement of manager hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply mgr --placement='label:{{ mgr_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of crash hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
+ - name: Update the placement of crash hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: adjust monitoring service placement
+- name: Adjust monitoring service placement
hosts: "{{ monitoring_group_name|default('monitoring') }}"
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: with dashboard enabled
+ - name: With dashboard enabled
when: dashboard_enabled | bool
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true
block:
- - name: enable the prometheus module
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
+ - name: Enable the prometheus module
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} mgr module enable prometheus"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of alertmanager hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
+ - name: Update the placement of alertmanager hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of grafana hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
+ - name: Update the placement of grafana hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply grafana --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of prometheus hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
+ - name: Update the placement of prometheus hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply prometheus --placement='label:{{ monitoring_group_name }}'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: update the placement of node-exporter hosts
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
+ - name: Update the placement of node-exporter hosts
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
-- name: print information
+- name: Print information
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: show ceph orchestrator services
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
+ - name: Show ceph orchestrator services
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ls --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: show ceph orchestrator daemons
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
+ - name: Show ceph orchestrator daemons
+ ansible.builtin.command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch ps --refresh"
changed_when: false
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- - name: inform users about cephadm
- debug:
+ - name: Inform users about cephadm
+ ansible.builtin.debug:
msg: |
This Ceph cluster is now ready to receive more configuration like
adding OSD, MDS daemons, create pools or keyring.
# It is *not* intended to restart services since we don't want to multiple services
# restarts.
-- hosts:
- - mons
- - osds
- - mdss
- - rgws
- - nfss
- - rbdmirrors
- - clients
- - iscsigws
- - mgrs
- - monitoring
+- name: Pre-requisite and facts gathering
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - iscsigws
+ - mgrs
+ - monitoring
gather_facts: false
- become: True
+ become: true
any_errors_fatal: true
vars:
- delegate_facts_host: True
+ delegate_facts_host: true
pre_tasks:
- - import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
+ - name: Import raw_install_python tasks
+ ansible.builtin.import_tasks: "{{ playbook_dir }}/../raw_install_python.yml"
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
# pre-tasks for following import -
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name | default('clients'), [])) }}"
run_once: true
when: delegate_facts_host | bool
-- hosts:
+- name: Migrate to podman
+ hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
- "{{ mds_group_name | default('mdss') }}"
gather_facts: false
become: true
tasks:
- - name: set_fact docker2podman and container_binary
- set_fact:
- docker2podman: True
+ - name: Set_fact docker2podman and container_binary
+ ansible.builtin.set_fact:
+ docker2podman: true
container_binary: podman
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - name: install podman
- package:
+ - name: Install podman
+ ansible.builtin.package:
name: podman
state: present
register: result
tags: with_pkg
when: not is_atomic | bool
- - name: check podman presence # noqa : 305
- shell: command -v podman
+ - name: Check podman presence # noqa command-instead-of-shell
+ ansible.builtin.shell: command -v podman
register: podman_presence
changed_when: false
failed_when: false
- - name: pulling images from docker daemon
+ - name: Pulling images from docker daemon
when: podman_presence.rc == 0
block:
- - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image from docker daemon"
- command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ - name: Pulling Ceph container image from docker daemon
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
inventory_hostname in groups.get(iscsi_gw_group_name, []) or
inventory_hostname in groups.get(nfs_group_name, [])
- - name: "pulling alertmanager/grafana/prometheus images from docker daemon"
- command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
+ - name: Pulling alertmanager/grafana/prometheus images from docker daemon
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ item }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
- dashboard_enabled | bool
- inventory_hostname in groups.get(monitoring_group_name, [])
- - name: "pulling {{ node_exporter_container_image }} image from docker daemon"
- command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
+ - name: Pulling node_exporter image from docker daemon
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull docker-daemon:{{ node_exporter_container_image }}"
changed_when: false
register: pull_image
until: pull_image.rc == 0
delay: 10
when: dashboard_enabled | bool
- - import_role:
+ - name: Import ceph-mon role
+ ansible.builtin.import_role:
name: ceph-mon
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, [])
- - import_role:
+ - name: Import ceph-iscsi-gw role
+ ansible.builtin.import_role:
name: ceph-iscsi-gw
tasks_from: systemd.yml
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- - import_role:
+ - name: Import ceph-mds role
+ ansible.builtin.import_role:
name: ceph-mds
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mds_group_name, [])
- - import_role:
+ - name: Import ceph-mgr role
+ ansible.builtin.import_role:
name: ceph-mgr
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mgr_group_name, [])
- - import_role:
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
name: ceph-nfs
tasks_from: systemd.yml
when: inventory_hostname in groups.get(nfs_group_name, [])
- - import_role:
+ - name: Import ceph-osd role
+ ansible.builtin.import_role:
name: ceph-osd
tasks_from: systemd.yml
when: inventory_hostname in groups.get(osd_group_name, [])
- - import_role:
+ - name: Import ceph-rbd-mirror role
+ ansible.builtin.import_role:
name: ceph-rbd-mirror
tasks_from: systemd.yml
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- - import_role:
+ - name: Import ceph-rgw role
+ ansible.builtin.import_role:
name: ceph-rgw
tasks_from: systemd.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
- - import_role:
+ - name: Import ceph-crash role
+ ansible.builtin.import_role:
name: ceph-crash
tasks_from: systemd.yml
when: inventory_hostname in groups.get(mon_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, [])
- - name: dashboard configuration
+ - name: Dashboard configuration
when: dashboard_enabled | bool
block:
- - import_role:
+ - name: Import ceph-node-exporter role
+ ansible.builtin.import_role:
name: ceph-node-exporter
tasks_from: systemd.yml
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- - import_role:
+ - name: Import ceph-grafana role
+ ansible.builtin.import_role:
name: ceph-grafana
tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- - import_role:
+ - name: Import ceph-prometheus role
+ ansible.builtin.import_role:
name: ceph-prometheus
tasks_from: systemd.yml
when: inventory_hostname in groups.get(monitoring_group_name, [])
- - name: reload systemd daemon
- systemd:
- daemon_reload: yes
\ No newline at end of file
+ - name: Reload systemd daemon
+ ansible.builtin.systemd:
+ daemon_reload: true
-- hosts:
- - mons
- - osds
- - mdss
- - rgws
- - nfss
- - rbdmirrors
- - clients
- - mgrs
- - iscsigws
+---
+- name: Gather ceph logs
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - mgrs
+ - iscsigws
gather_facts: false
- become: yes
+ become: true
tasks:
- - name: create a temp directory
- tempfile:
+ - name: Create a temp directory
+ ansible.builtin.tempfile:
state: directory
prefix: ceph_ansible
run_once: true
become: false
delegate_to: localhost
- - name: set_fact lookup_ceph_config - lookup keys, conf and logs
- find:
+ - name: Set_fact lookup_ceph_config - lookup keys, conf and logs
+ ansible.builtin.find:
paths:
- /etc/ceph
- /var/log/ceph
register: ceph_collect
- - name: collect ceph logs, config and keys in "{{ localtempfile.path }}" on the machine running ansible
- fetch:
+ - name: Collect ceph logs, config and keys on the machine running ansible
+ ansible.builtin.fetch:
src: "{{ item.path }}"
dest: "{{ localtempfile.path }}"
- fail_on_missing: no
- flat: no
+ fail_on_missing: false
+ flat: false
with_items: "{{ ceph_collect.files }}"
-- name: creates logical volumes for the bucket index or fs journals on a single device.
+---
+- name: Creates logical volumes for the bucket index or fs journals on a single device.
become: true
hosts: osds
tasks:
- - name: include vars of lv_vars.yaml
- include_vars:
- file: lv_vars.yaml # noqa 505
- failed_when: false
+ - name: Include vars of lv_vars.yaml
+ ansible.builtin.include_vars:
+ file: lv_vars.yaml # noqa missing-import
+ failed_when: false
- # ensure nvme_device is set
- - name: fail if nvme_device is not defined
- fail:
- msg: "nvme_device has not been set by the user"
- when: nvme_device is undefined or nvme_device == 'dummy'
+ # ensure nvme_device is set
+ - name: Fail if nvme_device is not defined
+ ansible.builtin.fail:
+ msg: "nvme_device has not been set by the user"
+ when: nvme_device is undefined or nvme_device == 'dummy'
- # need to check if lvm2 is installed
- - name: install lvm2
- package:
- name: lvm2
- state: present
- register: result
- until: result is succeeded
+ # need to check if lvm2 is installed
+ - name: Install lvm2
+ ansible.builtin.package:
+ name: lvm2
+ state: present
+ register: result
+ until: result is succeeded
- # Make entire nvme device a VG
- - name: add nvme device as lvm pv
- lvg:
- force: yes
- pvs: "{{ nvme_device }}"
- pesize: 4
- state: present
- vg: "{{ nvme_vg_name }}"
+ # Make entire nvme device a VG
+ - name: Add nvme device as lvm pv
+ community.general.lvg:
+ force: true
+ pvs: "{{ nvme_device }}"
+ pesize: 4
+ state: present
+ vg: "{{ nvme_vg_name }}"
- - name: create lvs for fs journals for the bucket index on the nvme device
- lvol:
- lv: "{{ item.journal_name }}"
- vg: "{{ nvme_vg_name }}"
- size: "{{ journal_size }}"
- pvs: "{{ nvme_device }}"
- with_items: "{{ nvme_device_lvs }}"
+ - name: Create lvs for fs journals for the bucket index on the nvme device
+ community.general.lvol:
+ lv: "{{ item.journal_name }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ journal_size }}"
+ pvs: "{{ nvme_device }}"
+ with_items: "{{ nvme_device_lvs }}"
- - name: create lvs for fs journals for hdd devices
- lvol:
- lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
- vg: "{{ nvme_vg_name }}"
- size: "{{ journal_size }}"
- with_items: "{{ hdd_devices }}"
+ - name: Create lvs for fs journals for hdd devices
+ community.general.lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ journal_size }}"
+ with_items: "{{ hdd_devices }}"
- - name: create the lv for data portion of the bucket index on the nvme device
- lvol:
- lv: "{{ item.lv_name }}"
- vg: "{{ nvme_vg_name }}"
- size: "{{ item.size }}"
- pvs: "{{ nvme_device }}"
- with_items: "{{ nvme_device_lvs }}"
+ - name: Create the lv for data portion of the bucket index on the nvme device
+ community.general.lvol:
+ lv: "{{ item.lv_name }}"
+ vg: "{{ nvme_vg_name }}"
+ size: "{{ item.size }}"
+ pvs: "{{ nvme_device }}"
+ with_items: "{{ nvme_device_lvs }}"
- # Make sure all hdd devices have a unique volume group
- - name: create vgs for all hdd devices
- lvg:
- force: yes
- pvs: "{{ item }}"
- pesize: 4
- state: present
- vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
- with_items: "{{ hdd_devices }}"
+ # Make sure all hdd devices have a unique volume group
+ - name: Create vgs for all hdd devices
+ community.general.lvg:
+ force: true
+ pvs: "{{ item }}"
+ pesize: 4
+ state: present
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ with_items: "{{ hdd_devices }}"
- - name: create lvs for the data portion on hdd devices
- lvol:
- lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
- vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
- size: "{{ hdd_lv_size }}"
- pvs: "{{ item }}"
- with_items: "{{ hdd_devices }}"
+ - name: Create lvs for the data portion on hdd devices
+ community.general.lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ size: "{{ hdd_lv_size }}"
+ pvs: "{{ item }}"
+ with_items: "{{ hdd_devices }}"
- - name: "write output for osds.yml to {{ logfile_path }}"
- become: false
- copy:
- content: "{{ logfile }}"
- dest: "{{ logfile_path }}"
- delegate_to: localhost
+ - name: Write output for osds.yml
+ become: false
+ ansible.builtin.copy:
+ content: "{{ logfile }}"
+ dest: "{{ logfile_path }}"
+ mode: preserve
+ delegate_to: localhost
-- name: tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
+---
+- name: Tear down existing osd filesystems then logical volumes, volume groups, and physical volumes
become: true
hosts: osds
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to tear down the logical volumes?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if user did not mean to tear down logical volumes
- fail:
- msg: >
- "Exiting lv-teardown playbook, logical volumes were NOT torn down.
- To tear down the logical volumes, either say 'yes' on the prompt or
- or use `-e ireallymeanit=yes` on the command line when
- invoking the playbook"
- when: ireallymeanit != 'yes'
+ - name: Exit playbook, if user did not mean to tear down logical volumes
+ ansible.builtin.fail:
+ msg: >
+ "Exiting lv-teardown playbook, logical volumes were NOT torn down.
+ To tear down the logical volumes, either say 'yes' on the prompt or
+ or use `-e ireallymeanit=yes` on the command line when
+ invoking the playbook"
+ when: ireallymeanit != 'yes'
- - name: include vars of lv_vars.yaml
- include_vars:
- file: lv_vars.yaml # noqa 505
- failed_when: false
+ - name: Include vars of lv_vars.yaml
+ ansible.builtin.include_vars:
+ file: lv_vars.yaml # noqa missing-import
+ failed_when: false
- # need to check if lvm2 is installed
- - name: install lvm2
- package:
- name: lvm2
- state: present
- register: result
- until: result is succeeded
+ # need to check if lvm2 is installed
+ - name: Install lvm2
+ ansible.builtin.package:
+ name: lvm2
+ state: present
+ register: result
+ until: result is succeeded
# BEGIN TEARDOWN
- - name: find any existing osd filesystems
- shell: |
- set -o pipefail;
- grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
- register: old_osd_filesystems
- changed_when: false
+ - name: Find any existing osd filesystems
+ ansible.builtin.shell: |
+ set -o pipefail;
+ grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
+ register: old_osd_filesystems
+ changed_when: false
- - name: tear down any existing osd filesystem
- ansible.posix.mount:
- path: "{{ item }}"
- state: unmounted
- with_items: "{{ old_osd_filesystems.stdout_lines }}"
+ - name: Tear down any existing osd filesystem
+ ansible.posix.mount:
+ path: "{{ item }}"
+ state: unmounted
+ with_items: "{{ old_osd_filesystems.stdout_lines }}"
- - name: kill all lvm commands that may have been hung
- command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
- failed_when: false
- changed_when: false
+ - name: Kill all lvm commands that may have been hung
+ ansible.builtin.command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
+ failed_when: false
+ changed_when: false
- ## Logcal Vols
- - name: tear down existing lv for bucket index
- lvol:
- lv: "{{ item.lv_name }}"
- vg: "{{ nvme_vg_name }}"
- state: absent
- force: yes
- with_items: "{{ nvme_device_lvs }}"
+ ## Logcal Vols
+ - name: Tear down existing lv for bucket index
+ community.general.lvol:
+ lv: "{{ item.lv_name }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: true
+ with_items: "{{ nvme_device_lvs }}"
- - name: tear down any existing hdd data lvs
- lvol:
- lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
- vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
- state: absent
- force: yes
- with_items: "{{ hdd_devices }}"
+ - name: Tear down any existing hdd data lvs
+ community.general.lvol:
+ lv: "{{ hdd_lv_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: true
+ with_items: "{{ hdd_devices }}"
- - name: tear down any existing lv of journal for bucket index
- lvol:
- lv: "{{ item.journal_name }}"
- vg: "{{ nvme_vg_name }}"
- state: absent
- force: yes
- with_items: "{{ nvme_device_lvs }}"
+ - name: Tear down any existing lv of journal for bucket index
+ community.general.lvol:
+ lv: "{{ item.journal_name }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: true
+ with_items: "{{ nvme_device_lvs }}"
- - name: tear down any existing lvs of hdd journals
- lvol:
- lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
- vg: "{{ nvme_vg_name }}"
- state: absent
- force: yes
- with_items: "{{ hdd_devices }}"
+ - name: Tear down any existing lvs of hdd journals
+ community.general.lvol:
+ lv: "{{ hdd_journal_prefix }}-{{ item.split('/')[-1] }}"
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: true
+ with_items: "{{ hdd_devices }}"
- ## Volume Groups
- - name: remove vg on nvme device
- lvg:
- vg: "{{ nvme_vg_name }}"
- state: absent
- force: yes
+ ## Volume Groups
+ - name: Remove vg on nvme device
+ community.general.lvg:
+ vg: "{{ nvme_vg_name }}"
+ state: absent
+ force: true
- - name: remove vg for each hdd device
- lvg:
- vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
- state: absent
- force: yes
- with_items: "{{ hdd_devices }}"
+ - name: Remove vg for each hdd device
+ community.general.lvg:
+ vg: "{{ hdd_vg_prefix }}-{{ item.split('/')[-1] }}"
+ state: absent
+ force: true
+ with_items: "{{ hdd_devices }}"
- ## Physical Vols
- - name: tear down pv for nvme device
- command: "pvremove --force --yes {{ nvme_device }}"
- changed_when: false
+ ## Physical Vols
+ - name: Tear down pv for nvme device
+ ansible.builtin.command: "pvremove --force --yes {{ nvme_device }}"
+ changed_when: false
- - name: tear down pv for each hdd device
- command: "pvremove --force --yes {{ item }}"
- changed_when: false
- with_items: "{{ hdd_devices }}"
+ - name: Tear down pv for each hdd device
+ ansible.builtin.command: "pvremove --force --yes {{ item }}"
+ changed_when: false
+ with_items: "{{ hdd_devices }}"
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: confirm whether user really meant to purge the cluster
+- name: Confirm whether user really meant to purge the cluster
hosts: localhost
gather_facts: false
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to purge the cluster?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if user did not mean to purge cluster
- fail:
+ - name: Exit playbook, if user did not mean to purge cluster
+ ansible.builtin.fail:
msg: >
"Exiting purge-cluster playbook, cluster was NOT purged.
To purge the cluster, either say 'yes' on the prompt or
when: ireallymeanit != 'yes'
-- name: gather facts on all hosts
+- name: Gather facts on all hosts
hosts:
- mons
- osds
- monitoring
become: true
tasks:
- - debug:
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
msg: "gather facts on all Ceph hosts for following reference"
-- name: check there's no ceph kernel threads present
+- name: Check there's no ceph kernel threads present
hosts: clients
become: true
gather_facts: false
any_errors_fatal: true
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - block:
- - name: get nfs nodes ansible facts
- setup:
+ - name: Nfs related tasks
+ when: groups[nfs_group_name] | default([]) | length > 0
+ block:
+ - name: Get nfs nodes ansible facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
with_items: "{{ groups[nfs_group_name] }}"
run_once: true
- - name: get all nfs-ganesha mount points
- command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+ - name: Get all nfs-ganesha mount points
+ ansible.builtin.command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
+ changed_when: false
with_items: "{{ groups[nfs_group_name] }}"
- - name: ensure nfs-ganesha mountpoint(s) are unmounted
+ - name: Ensure nfs-ganesha mountpoint(s) are unmounted
ansible.posix.mount:
path: "{{ item.split(' ')[1] }}"
state: unmounted
with_items:
- "{{ nfs_ganesha_mount_points.results | map(attribute='stdout_lines') | list }}"
when: item | length > 0
- when: groups[nfs_group_name] | default([]) | length > 0
- - name: ensure cephfs mountpoint(s) are unmounted
- command: umount -a -t ceph
+ - name: Ensure cephfs mountpoint(s) are unmounted
+ ansible.builtin.command: umount -a -t ceph
changed_when: false
- - name: find mapped rbd ids
- find:
+ - name: Find mapped rbd ids
+ ansible.builtin.find:
paths: /sys/bus/rbd/devices
file_type: any
register: rbd_mapped_ids
- - name: use sysfs to unmap rbd devices
- shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
+ - name: Use sysfs to unmap rbd devices
+ ansible.builtin.shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
changed_when: false
with_items: "{{ rbd_mapped_ids.files }}"
- - name: unload ceph kernel modules
- modprobe:
+ - name: Unload ceph kernel modules
+ community.general.modprobe:
name: "{{ item }}"
state: absent
with_items:
- libceph
-- name: purge ceph nfs cluster
+- name: Purge ceph nfs cluster
hosts: nfss
gather_facts: false # Already gathered previously
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop ceph nfss with systemd
- service:
+ - name: Stop ceph nfss with systemd
+ ansible.builtin.service:
name: "{{ 'ceph-nfs@' + ansible_facts['hostname'] if containerized_deployment | bool else 'nfs-ganesha' }}"
state: stopped
failed_when: false
- - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
- file:
+ - name: Remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /etc/systemd/system/ceph-nfs@.service
-- name: purge node-exporter
+- name: Purge node-exporter
hosts:
- mons
- osds
- iscsigws
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - block:
- - import_role:
+ - name: Dashboard related tasks
+ when: dashboard_enabled | bool
+ block:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: disable node_exporter service
- service:
+ - name: Disable node_exporter service
+ ansible.builtin.service:
name: node_exporter
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: remove node_exporter service file
- file:
+ - name: Remove node_exporter service file
+ ansible.builtin.file:
name: /etc/systemd/system/node_exporter.service
state: absent
- - name: remove node-exporter image
- command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
+ - name: Remove node-exporter image
+ ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
failed_when: false
+ changed_when: false
tags:
- remove_img
- when: dashboard_enabled | bool
-- name: purge ceph monitoring
+- name: Purge ceph monitoring
hosts: monitoring
become: true
vars:
- prometheus
- alertmanager
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - block:
- - import_role:
+ - name: Dashboard related tasks
+ when: dashboard_enabled | bool
+ block:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: stop services
- service:
+ - name: Stop services
+ ansible.builtin.service:
name: "{{ item }}"
state: stopped
- enabled: no
+ enabled: false
with_items: "{{ grafana_services }}"
failed_when: false
- - name: remove service files
- file:
+ - name: Remove service files
+ ansible.builtin.file:
name: "/etc/systemd/system/{{ item }}.service"
state: absent
with_items: "{{ grafana_services }}"
failed_when: false
- - name: remove ceph dashboard container images
- command: "{{ container_binary }} rmi {{ item }}"
+ - name: Remove ceph dashboard container images
+ ansible.builtin.command: "{{ container_binary }} rmi {{ item }}"
with_items:
- "{{ prometheus_container_image }}"
- "{{ grafana_container_image }}"
- "{{ alertmanager_container_image }}"
failed_when: false
+ changed_when: false
tags:
- remove_img
- - name: remove data
- file:
+ - name: Remove data
+ ansible.builtin.file:
name: "{{ item }}"
state: absent
with_items:
- /var/lib/prometheus
- /etc/prometheus
failed_when: false
- when: dashboard_enabled | bool
-- name: purge ceph mds cluster
+- name: Purge ceph mds cluster
hosts: mdss
gather_facts: false # Already gathered previously
become: true
tasks:
- - name: stop ceph mdss with systemd
- service:
+ - name: Stop ceph mdss with systemd
+ ansible.builtin.service:
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: remove ceph mds service
- file:
+ - name: Remove ceph mds service
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-mds{{ item }}
state: absent
loop:
- '.target'
-- name: purge ceph mgr cluster
+- name: Purge ceph mgr cluster
hosts: mgrs
gather_facts: false # Already gathered previously
become: true
tasks:
- - name: stop ceph mgrs with systemd
- service:
+ - name: Stop ceph mgrs with systemd
+ ansible.builtin.service:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
- enabled: no
+ enabled: false
failed_when: false
when: ansible_facts['service_mgr'] == 'systemd'
- - name: remove ceph mgr service
- file:
+ - name: Remove ceph mgr service
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-mgr{{ item }}
state: absent
loop:
- '@.service'
- '.target'
-- name: purge rgwloadbalancer cluster
+- name: Purge rgwloadbalancer cluster
hosts: rgwloadbalancers
gather_facts: false # Already gathered previously
become: true
tasks:
- - name: stop rgwloadbalancer services
- service:
+ - name: Stop rgwloadbalancer services
+ ansible.builtin.service:
name: ['keepalived', 'haproxy']
state: stopped
- enabled: no
+ enabled: false
failed_when: false
-- name: purge ceph rgw cluster
+- name: Purge ceph rgw cluster
hosts: rgws
gather_facts: false # Already gathered previously
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_radosgw_address
- - name: stop ceph rgws with systemd
- service:
+ - name: Stop ceph rgws with systemd
+ ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
with_items: "{{ rgw_instances }}"
- - name: remove ceph rgw service
- file:
+ - name: Remove ceph rgw service
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-radosgw{{ item }}
state: absent
loop:
- '.target'
-- name: purge ceph rbd-mirror cluster
+- name: Purge ceph rbd-mirror cluster
hosts: rbdmirrors
gather_facts: false # Already gathered previously
become: true
tasks:
- - name: stop ceph rbd mirror with systemd
- service:
+ - name: Stop ceph rbd mirror with systemd
+ ansible.builtin.service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: remove ceph rbd-mirror service
- file:
+ - name: Remove ceph rbd-mirror service
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-rbd-mirror{{ item }}
state: absent
loop:
- '.target'
-- name: purge ceph osd cluster
+- name: Purge ceph osd cluster
vars:
- reboot_osd_node: False
+ reboot_osd_node: false
hosts: osds
gather_facts: false # Already gathered previously
become: true
handlers:
- - name: restart machine
- shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+ - name: Restart machine # noqa: ignore-errors
+ ansible.builtin.shell: sleep 2 && shutdown -r now "Ansible updates triggered"
async: 1
poll: 0
ignore_errors: true
+ changed_when: false
- - name: wait for server to boot
+ - name: Wait for server to boot
become: false
- wait_for:
+ ansible.builtin.wait_for:
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
state: started
timeout: 500
delegate_to: localhost
- - name: remove data
- shell: rm -rf /var/lib/ceph/* # noqa 302
+ - name: Remove data
+ ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form
+ changed_when: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: default lvm_volumes if not defined
- set_fact:
+ - name: Default lvm_volumes if not defined
+ ansible.builtin.set_fact:
lvm_volumes: []
when: lvm_volumes is not defined
- - name: get osd numbers
- shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
+ - name: Get osd numbers
+ ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa risky-shell-pipe
register: osd_ids
changed_when: false
- - name: stop ceph-osd
- service:
+ - name: Stop ceph-osd
+ ansible.builtin.service:
name: ceph-osd@{{ item }}
state: stopped
- enabled: no
+ enabled: false
with_items: "{{ osd_ids.stdout_lines }}"
- - name: remove ceph udev rules
- file:
+ - name: Remove ceph udev rules
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
when: not containerized_deployment | bool
# NOTE(leseb): hope someone will find a more elegant way one day...
- - name: see if encrypted partitions are present
- shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa 306
+ - name: See if encrypted partitions are present
+ ansible.builtin.shell: blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2 # noqa risky-shell-pipe
register: encrypted_ceph_partuuid
changed_when: false
- - name: get osd data and lockbox mount points
- shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa 306
+ - name: Get osd data and lockbox mount points
+ ansible.builtin.shell: (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }' # noqa risky-shell-pipe
register: mounted_osd
changed_when: false
- - name: drop all cache
- shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
+ - name: Drop all cache
+ ansible.builtin.shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
changed_when: false
- - name: see if ceph-volume is installed # noqa : 305
- shell: command -v ceph-volume
+ - name: See if ceph-volume is installed # noqa command-instead-of-shell
+ ansible.builtin.shell: command -v ceph-volume
changed_when: false
failed_when: false
register: ceph_volume_present
when: not containerized_deployment | bool
- - name: zap and destroy osds by osd ids
+ - name: Zap and destroy osds by osd ids
ceph_volume:
osd_id: "{{ item | int }}"
action: "zap"
- osd_auto_discovery | default(False) | bool
- (containerized_deployment | bool or ceph_volume_present.rc == 0)
- - name: umount osd data partition
+ - name: Umount osd data partition
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ mounted_osd.stdout_lines }}"
- - name: remove osd mountpoint tree
- file:
+ - name: Remove osd mountpoint tree
+ ansible.builtin.file:
path: /var/lib/ceph/osd/
state: absent
register: remove_osd_mountpoints
ignore_errors: true
- - name: is reboot needed
- command: echo requesting reboot
+ - name: Is reboot needed
+ ansible.builtin.command: echo requesting reboot
delegate_to: localhost
become: false
notify:
- - restart machine
- - wait for server to boot
- - remove data
+ - Restart machine
+ - Wait for server to boot
+ - Remove data
+ changed_when: false
when:
- reboot_osd_node | bool
- remove_osd_mountpoints.failed is defined
- - name: wipe table on dm-crypt devices
- command: dmsetup wipe_table --force "{{ item }}"
+ - name: Wipe table on dm-crypt devices
+ ansible.builtin.command: dmsetup wipe_table --force "{{ item }}"
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ changed_when: false
when: encrypted_ceph_partuuid.stdout_lines | length > 0
- - name: delete dm-crypt devices if any
- command: dmsetup remove --retry --force {{ item }}
+ - name: Delete dm-crypt devices if any
+ ansible.builtin.command: dmsetup remove --retry --force {{ item }}
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ changed_when: false
when: encrypted_ceph_partuuid.stdout_lines | length > 0
- - name: get payload_offset
- shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa 306
+ - name: Get payload_offset
+ ansible.builtin.shell: cryptsetup luksDump /dev/disk/by-partuuid/{{ item }} | awk '/Payload offset:/ { print $3 }' # noqa risky-shell-pipe
register: payload_offset
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
+ changed_when: false
when: encrypted_ceph_partuuid.stdout_lines | length > 0
- - name: get physical sector size
- command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
+ - name: Get physical sector size
+ ansible.builtin.command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
changed_when: false
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
when: encrypted_ceph_partuuid.stdout_lines | length > 0
register: phys_sector_size
- - name: wipe dmcrypt device
- command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
+ - name: Wipe dmcrypt device
+ ansible.builtin.command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
changed_when: false
with_together:
- "{{ encrypted_ceph_partuuid.stdout_lines }}"
- "{{ payload_offset.results }}"
- "{{ phys_sector_size.results }}"
- - name: get ceph data partitions
- shell: |
+ - name: Get ceph data partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph data"
changed_when: false
failed_when: false
register: ceph_data_partition_to_erase_path
- - name: get ceph lockbox partitions
- shell: |
+ - name: Get ceph lockbox partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph lockbox"
changed_when: false
failed_when: false
register: ceph_lockbox_partition_to_erase_path
- - name: see if ceph-volume is installed # noqa : 305
- shell: command -v ceph-volume
+ - name: See if ceph-volume is installed # noqa: command-instead-of-shell
+ ansible.builtin.shell: command -v ceph-volume
changed_when: false
failed_when: false
register: ceph_volume_present
when: not containerized_deployment | bool
- - name: zap and destroy osds created by ceph-volume with lvm_volumes
+ - name: Zap and destroy osds created by ceph-volume with lvm_volumes
ceph_volume:
data: "{{ item.data }}"
- data_vg: "{{ item.data_vg|default(omit) }}"
- journal: "{{ item.journal|default(omit) }}"
- journal_vg: "{{ item.journal_vg|default(omit) }}"
- db: "{{ item.db|default(omit) }}"
- db_vg: "{{ item.db_vg|default(omit) }}"
- wal: "{{ item.wal|default(omit) }}"
- wal_vg: "{{ item.wal_vg|default(omit) }}"
+ data_vg: "{{ item.data_vg | default(omit) }}"
+ journal: "{{ item.journal | default(omit) }}"
+ journal_vg: "{{ item.journal_vg | default(omit) }}"
+ db: "{{ item.db | default(omit) }}"
+ db_vg: "{{ item.db_vg | default(omit) }}"
+ wal: "{{ item.wal | default(omit) }}"
+ wal_vg: "{{ item.wal_vg | default(omit) }}"
action: "zap"
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
- containerized_deployment | bool
or ceph_volume_present.rc == 0
- - name: zap and destroy osds created by ceph-volume with devices
+ - name: Zap and destroy osds created by ceph-volume with devices
ceph_volume:
data: "{{ item }}"
action: "zap"
- containerized_deployment | bool
or ceph_volume_present.rc == 0
- - name: get ceph block partitions
- shell: |
+ - name: Get ceph block partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph block"
changed_when: false
failed_when: false
register: ceph_block_partition_to_erase_path
- - name: get ceph journal partitions
- shell: |
+ - name: Get ceph journal partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph journal"
changed_when: false
failed_when: false
register: ceph_journal_partition_to_erase_path
- - name: get ceph db partitions
- shell: |
+ - name: Get ceph db partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph block.db"
changed_when: false
failed_when: false
register: ceph_db_partition_to_erase_path
- - name: get ceph wal partitions
- shell: |
+ - name: Get ceph wal partitions
+ ansible.builtin.shell: |
blkid -o device -t PARTLABEL="ceph block.wal"
changed_when: false
failed_when: false
register: ceph_wal_partition_to_erase_path
- - name: set_fact combined_devices_list
- set_fact:
+ - name: Set_fact combined_devices_list
+ ansible.builtin.set_fact:
combined_devices_list: "{{ ceph_data_partition_to_erase_path.stdout_lines +
ceph_lockbox_partition_to_erase_path.stdout_lines +
ceph_block_partition_to_erase_path.stdout_lines +
ceph_db_partition_to_erase_path.stdout_lines +
ceph_wal_partition_to_erase_path.stdout_lines }}"
- - name: resolve parent device
- command: lsblk --nodeps -no pkname "{{ item }}"
+ - name: Resolve parent device
+ ansible.builtin.command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
changed_when: false
with_items: "{{ combined_devices_list }}"
- - name: set_fact resolved_parent_device
- set_fact:
+ - name: Set_fact resolved_parent_device
+ ansible.builtin.set_fact:
resolved_parent_device: "{{ tmp_resolved_parent_device.results | map(attribute='stdout') | list | unique }}"
- - name: wipe partitions
- shell: |
+ - name: Wipe partitions
+ ansible.builtin.shell: |
wipefs --all "{{ item }}"
dd if=/dev/zero of="{{ item }}" bs=1 count=4096
changed_when: false
with_items: "{{ combined_devices_list }}"
- - name: check parent device partition
- parted:
+ - name: Check parent device partition
+ community.general.parted:
device: "/dev/{{ item }}"
loop: "{{ resolved_parent_device }}"
register: parted_info
- - name: fail if there is a boot partition on the device
- fail:
+ - name: Fail if there is a boot partition on the device
+ ansible.builtin.fail:
msg: "{{ item.item }} has a boot partition"
loop: "{{ parted_info.results }}"
when: "'boot' in (item.partitions | map(attribute='flags') | list | flatten)"
- - name: zap ceph journal/block db/block wal partitions # noqa 306
- shell: |
+ - name: Zap ceph journal/block db/block wal partitions # noqa risky-shell-pipe
+ ansible.builtin.shell: |
sgdisk -Z --clear --mbrtogpt -g -- /dev/"{{ item }}"
dd if=/dev/zero of=/dev/"{{ item }}" bs=1M count=200
parted -s /dev/"{{ item }}" mklabel gpt
with_items: "{{ resolved_parent_device }}"
changed_when: false
- - name: remove ceph osd service
- file:
+ - name: Remove ceph osd service
+ ansible.builtin.file:
path: /etc/systemd/system/ceph-osd{{ item }}
state: absent
loop:
- '@.service'
- '.target'
-- name: purge ceph mon cluster
+- name: Purge ceph mon cluster
hosts: mons
gather_facts: false # already gathered previously
become: true
tasks:
- - name: stop ceph mons with systemd
- service:
+ - name: Stop ceph mons with systemd
+ ansible.builtin.service:
name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
with_items:
- mon
- mgr
- - name: remove monitor store and bootstrap keys
- file:
+ - name: Remove monitor store and bootstrap keys
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /var/lib/ceph/bootstrap-mgr
- /var/lib/ceph/tmp
- - name: remove ceph mon and mgr service
- file:
+ - name: Remove ceph mon and mgr service
+ ansible.builtin.file:
path: "/etc/systemd/system/ceph-{{ item.0 }}{{ item.1 }}"
state: absent
loop: "{{ ['mon', 'mgr'] | product(['@.service', '.target']) | list }}"
-- name: purge ceph-crash daemons
+- name: Purge ceph-crash daemons
hosts:
- mons
- osds
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop ceph-crash service
- service:
+ - name: Stop ceph-crash service
+ ansible.builtin.service:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: systemctl reset-failed ceph-crash # noqa 303
- command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
+ - name: Systemctl reset-failed ceph-crash # noqa command-instead-of-module
+ ansible.builtin.command: "systemctl reset-failed {{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
changed_when: false
failed_when: false
- - name: remove service file
- file:
+ - name: Remove service file
+ ansible.builtin.file:
name: "/etc/systemd/system/ceph-crash{{ '@' if containerized_deployment | bool else '' }}.service"
state: absent
failed_when: false
- - name: remove /var/lib/ceph/crash
- file:
+ - name: Remove /var/lib/ceph/crash
+ ansible.builtin.file:
path: /var/lib/ceph/crash
state: absent
-- name: check container hosts
+- name: Check container hosts
hosts:
- mons
- osds
- mgrs
become: true
tasks:
- - name: containerized_deployment only
+ - name: Containerized_deployment only
when: containerized_deployment | bool
block:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: remove stopped/exited containers
- command: >
+ - name: Remove stopped/exited containers
+ ansible.builtin.command: >
{{ container_binary }} container prune -f
changed_when: false
- - name: show container list on all the nodes (should be empty)
- command: >
+ - name: Show container list on all the nodes (should be empty)
+ ansible.builtin.command: >
{{ container_binary }} ps --filter='name=ceph' -a -q
register: containers_list
changed_when: false
- - name: show container images on all the nodes (should be empty if tags was passed remove_img)
- command: >
+ - name: Show container images on all the nodes (should be empty if tags was passed remove_img)
+ ansible.builtin.command: >
{{ container_binary }} images
register: images_list
changed_when: false
- - name: fail if container are still present
- fail:
+ - name: Fail if container are still present
+ ansible.builtin.fail:
msg: "It looks like container are still present."
- when: containers_list.stdout_lines|length > 0
+ when: containers_list.stdout_lines | length > 0
-- name: final cleanup - check any running ceph, purge ceph packages, purge config and remove data
+- name: Final cleanup - check any running ceph, purge ceph packages, purge config and remove data
vars:
# When set to true both groups of packages are purged.
# This can cause problem with qemu-kvm
gather_facts: false # Already gathered previously
become: true
handlers:
- - name: get osd data and lockbox mount points
- shell: "(grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
+ - name: Get osd data and lockbox mount points
+ ansible.builtin.shell: "set -o pipefail && (grep /var/lib/ceph/osd /proc/mounts || echo -n) | awk '{ print $2 }'"
register: mounted_osd
changed_when: false
- listen: "remove data"
+ listen: "Remove data"
- - name: umount osd data partition
+ - name: Umount osd data partition
ansible.posix.mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ mounted_osd.stdout_lines }}"
- listen: "remove data"
+ listen: "Remove data"
- - name: remove data
- shell: rm -rf /var/lib/ceph/* # noqa 302
- listen: "remove data"
+ - name: Remove data
+ ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa no-free-form
+ changed_when: false
+ listen: "Remove data"
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: non containerized related tasks
+ - name: Non containerized related tasks
when: not containerized_deployment | bool
block:
- - name: purge ceph packages with yum
- yum:
+ - name: Purge ceph packages with yum
+ ansible.builtin.yum:
name: "{{ ceph_packages }}"
state: absent
when: ansible_facts['pkg_mgr'] == 'yum'
- - name: purge ceph packages with dnf
- dnf:
+ - name: Purge ceph packages with dnf
+ ansible.builtin.dnf:
name: "{{ ceph_packages }}"
state: absent
when: ansible_facts['pkg_mgr'] == 'dnf'
- - name: purge ceph packages with apt
- apt:
+ - name: Purge ceph packages with apt
+ ansible.builtin.apt:
name: "{{ ceph_packages }}"
state: absent
purge: true
when: ansible_facts['pkg_mgr'] == 'apt'
- - name: purge remaining ceph packages with yum
- yum:
+ - name: Purge remaining ceph packages with yum
+ ansible.builtin.yum:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- - name: purge remaining ceph packages with dnf
- dnf:
+ - name: Purge remaining ceph packages with dnf
+ ansible.builtin.dnf:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- - name: purge remaining ceph packages with apt
- apt:
+ - name: Purge remaining ceph packages with apt
+ ansible.builtin.apt:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- - name: purge extra packages with yum
- yum:
+ - name: Purge extra packages with yum
+ ansible.builtin.yum:
name: "{{ extra_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- - name: purge extra packages with dnf
- dnf:
+ - name: Purge extra packages with dnf
+ ansible.builtin.dnf:
name: "{{ extra_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- - name: purge extra packages with apt
- apt:
+ - name: Purge extra packages with apt
+ ansible.builtin.apt:
name: "{{ extra_packages }}"
state: absent
when:
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- - name: remove config and any ceph socket left
- file:
+ - name: Remove config and any ceph socket left
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /etc/haproxy
- /run/ceph
- - name: remove logs
- file:
- path: /var/log/ceph
- state: absent
+ - name: Remove logs
+ ansible.builtin.file:
+ path: /var/log/ceph
+ state: absent
- - name: request data removal
- command: echo requesting data removal # noqa 301
+ - name: Request data removal
+ ansible.builtin.command: echo requesting data removal # noqa no-changed-when
become: false
delegate_to: localhost
- notify: remove data
+ notify: Remove data
- - name: purge dnf cache
- command: dnf clean all
+ - name: Purge dnf cache
+ ansible.builtin.command: dnf clean all
+ changed_when: false
when: ansible_facts['pkg_mgr'] == 'dnf'
- - name: clean apt
- command: apt-get clean # noqa 303
+ - name: Clean apt
+ ansible.builtin.command: apt-get clean # noqa command-instead-of-module
+ changed_when: false
when: ansible_facts['pkg_mgr'] == 'apt'
- - name: purge ceph repo file in /etc/yum.repos.d
- file:
+ - name: Purge ceph repo file in /etc/yum.repos.d
+ ansible.builtin.file:
path: '/etc/yum.repos.d/{{ item }}.repo'
state: absent
with_items:
- ceph_stable
when: ansible_facts['os_family'] == 'RedHat'
- - name: check for anything running ceph
- command: "ps -u ceph -U ceph"
+ - name: Check for anything running ceph
+ ansible.builtin.command: "ps -u ceph -U ceph"
register: check_for_running_ceph
changed_when: false
failed_when: check_for_running_ceph.rc == 0
- - name: find ceph systemd unit files to remove
- find:
+ - name: Find ceph systemd unit files to remove
+ ansible.builtin.find:
paths: "/etc/systemd/system"
pattern: "ceph*"
recurse: true
file_type: any
register: systemd_files
- - name: remove ceph systemd unit files
- file:
+ - name: Remove ceph systemd unit files
+ ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items: "{{ systemd_files.files }}"
when: ansible_facts['service_mgr'] == 'systemd'
- - name: containerized related tasks
+ - name: Containerized related tasks
when: containerized_deployment | bool
block:
- - name: check if it is Atomic host
- stat: path=/run/ostree-booted
+ - name: Check if it is Atomic host
+ ansible.builtin.stat:
+ path: /run/ostree-booted
register: stat_ostree
- - name: set fact for using Atomic host
- set_fact:
+ - name: Set fact for using Atomic host
+ ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: remove ceph container image
- command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+ - name: Remove ceph container image
+ ansible.builtin.command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
when:
- inventory_hostname not in groups.get(client_group_name, [])
tags:
- remove_img
- - name: stop docker service
- service:
+ - name: Stop docker service # noqa: ignore-errors
+ ansible.builtin.service:
name: docker
state: stopped
- enabled: no
+ enabled: false
when:
- not is_atomic
- container_binary == 'docker'
tags:
- remove_docker
- - name: remove docker on debian/ubuntu
- apt:
+ - name: Remove docker on debian/ubuntu
+ ansible.builtin.apt:
name: ['docker-ce', 'docker-engine', 'docker.io', 'python-docker', 'python3-docker']
state: absent
- update_cache: yes
- autoremove: yes
+ update_cache: true
+ autoremove: true
when: ansible_facts['os_family'] == 'Debian'
tags:
- remove_docker
- - name: red hat based systems tasks
+ - name: Red hat based systems tasks
+ when:
+ ansible_facts['os_family'] == 'RedHat' and
+ not is_atomic
+ tags:
+ - remove_docker
block:
- - name: yum related tasks on red hat
+ - name: Yum related tasks on red hat
+ when: ansible_facts['pkg_mgr'] == "yum"
block:
- - name: remove packages on redhat
- yum:
+ - name: Remove packages on redhat
+ ansible.builtin.yum:
name: ['epel-release', 'docker', 'python-docker-py']
state: absent
- - name: remove package dependencies on redhat
- command: yum -y autoremove
+ - name: Remove package dependencies on redhat
+ ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module
+ changed_when: false
- - name: remove package dependencies on redhat again
- command: yum -y autoremove
- when:
- ansible_facts['pkg_mgr'] == "yum"
+ - name: Remove package dependencies on redhat again
+ ansible.builtin.command: yum -y autoremove # noqa: command-instead-of-module
+ changed_when: false
- - name: dnf related tasks on red hat
+ - name: Dnf related tasks on red hat
+ when: ansible_facts['pkg_mgr'] == "dnf"
block:
- - name: remove docker on redhat
- dnf:
+ - name: Remove docker on redhat
+ ansible.builtin.dnf:
name: ['docker', 'python3-docker']
state: absent
- - name: remove package dependencies on redhat
- command: dnf -y autoremove
+ - name: Remove package dependencies on redhat
+ ansible.builtin.command: dnf -y autoremove
+ changed_when: false
- - name: remove package dependencies on redhat again
- command: dnf -y autoremove
- when:
- ansible_facts['pkg_mgr'] == "dnf"
- when:
- ansible_facts['os_family'] == 'RedHat' and
- not is_atomic
- tags:
- - remove_docker
+ - name: Remove package dependencies on redhat again
+ ansible.builtin.command: dnf -y autoremove
+ changed_when: false
- - name: find any service-cid file left
- find:
+ - name: Find any service-cid file left
+ ansible.builtin.find:
paths: /run
patterns:
- "ceph-*.service-cid"
- "alertmanager.service-cid"
register: service_cid_files
- - name: rm any service-cid file
- file:
+ - name: Rm any service-cid file
+ ansible.builtin.file:
path: "{{ item.path }}"
state: absent
with_items: "{{ service_cid_files.files }}"
-- name: purge ceph directories
+- name: Purge ceph directories
hosts:
- mons
- osds
gather_facts: false # Already gathered previously
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: purge ceph directories - containerized deployments
+ - name: Purge ceph directories - containerized deployments
when: containerized_deployment | bool
block:
- - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
- file:
+ - name: Purge ceph directories and ceph socket
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /run/ceph
- "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
- - name: remove ceph data
- shell: rm -rf /var/lib/ceph/* # noqa 302
+ - name: Remove ceph data
+ ansible.builtin.shell: rm -rf /var/lib/ceph/* # noqa: no-free-form
changed_when: false
- - name: remove /var/lib/ceph
- file:
+ - name: Remove /var/lib/ceph
+ ansible.builtin.file:
path: /var/lib/ceph
state: absent
# (todo): remove this when we are able to manage docker
# service on atomic host.
- - name: remove docker data
- shell: rm -rf /var/lib/docker/* # noqa 302
+ - name: Remove docker data
+ ansible.builtin.shell: rm -rf /var/lib/docker/* # noqa: no-free-form
+ changed_when: false
when: not is_atomic | bool
tags:
- remove_docker
-- name: purge fetch directory
+- name: Purge fetch directory
hosts: localhost
gather_facts: false
tasks:
- - name: set fetch_directory value if not set
- set_fact:
+ - name: Set fetch_directory value if not set
+ ansible.builtin.set_fact:
fetch_directory: "fetch/"
when: fetch_directory is not defined
- - name: purge fetch directory for localhost
- file:
+ - name: Purge fetch directory for localhost
+ ansible.builtin.file:
path: "{{ fetch_directory | default('fetch/') }}"
state: absent
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: confirm whether user really meant to purge the dashboard
+- name: Confirm whether user really meant to purge the dashboard
hosts: localhost
gather_facts: false
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to purge the dashboard?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if user did not mean to purge dashboard
- fail:
+ - name: Exit playbook, if user did not mean to purge dashboard
+ ansible.builtin.fail:
msg: >
"Exiting purge-dashboard playbook, dashboard was NOT purged.
To purge the dashboard, either say 'yes' on the prompt or
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: import_role ceph-defaults
- import_role:
+ - name: Import_role ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: check if a legacy grafana-server group exists
- import_role:
+ - name: Check if a legacy grafana-server group exists
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
-- name: gather facts on all hosts
+- name: Gather facts on all hosts
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ monitoring_group_name | default('monitoring') }}"
become: true
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
+ msg: "gather facts on all Ceph hosts for following reference"
-- name: purge node exporter
+- name: Purge node exporter
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: disable node_exporter service
- service:
+ - name: Disable node_exporter service
+ ansible.builtin.service:
name: node_exporter
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: remove node_exporter service files
- file:
+ - name: Remove node_exporter service files
+ ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
- /etc/systemd/system/node_exporter.service
- /run/node_exporter.service-cid
- - name: remove node-exporter image
- command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
+ - name: Remove node-exporter image
+ ansible.builtin.command: "{{ container_binary }} rmi {{ node_exporter_container_image }}"
changed_when: false
failed_when: false
-- name: purge ceph monitoring
+- name: Purge ceph monitoring
hosts: "{{ monitoring_group_name | default('monitoring') }}"
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: stop services
- service:
+ - name: Stop services
+ ansible.builtin.service:
name: "{{ item }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
loop:
- alertmanager
- prometheus
- grafana-server
- - name: remove systemd service files
- file:
+ - name: Remove systemd service files
+ ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
- /run/prometheus.service-cid
- /run/grafana-server.service-cid
- - name: remove ceph dashboard container images
- command: "{{ container_binary }} rmi {{ item }}"
+ - name: Remove ceph dashboard container images
+ ansible.builtin.command: "{{ container_binary }} rmi {{ item }}"
loop:
- "{{ alertmanager_container_image }}"
- "{{ prometheus_container_image }}"
changed_when: false
failed_when: false
- - name: remove ceph-grafana-dashboards package on RedHat or SUSE
- package:
+ - name: Remove ceph-grafana-dashboards package on RedHat or SUSE
+ ansible.builtin.package:
name: ceph-grafana-dashboards
state: absent
when:
- not containerized_deployment | bool
- ansible_facts['os_family'] in ['RedHat', 'Suse']
- - name: remove data
- file:
+ - name: Remove data
+ ansible.builtin.file:
name: "{{ item }}"
state: absent
loop:
- "{{ prometheus_data_dir }}"
- /var/lib/grafana
-- name: purge ceph dashboard
+- name: Purge ceph dashboard
hosts: "{{ groups[mgr_group_name] | default(groups[mon_group_name]) | default(omit) }}"
gather_facts: false
become: true
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: remove the dashboard admin user
+ - name: Remove the dashboard admin user
ceph_dashboard_user:
name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: remove radosgw system user
+ - name: Remove radosgw system user
radosgw_user:
name: "{{ dashboard_rgw_api_user_id }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: groups.get(rgw_group_name, []) | length > 0
- - name: disable mgr dashboard and prometheus modules
+ - name: Disable mgr dashboard and prometheus modules
ceph_mgr_module:
name: "{{ item }}"
cluster: "{{ cluster }}"
- dashboard
- prometheus
- - name: remove TLS certificate and key files
- file:
+ - name: Remove TLS certificate and key files
+ ansible.builtin.file:
name: "/etc/ceph/ceph-dashboard.{{ item }}"
state: absent
loop:
- key
when: dashboard_protocol == "https"
- - name: remove ceph-mgr-dashboard package
- package:
+ - name: Remove ceph-mgr-dashboard package
+ ansible.builtin.package:
name: ceph-mgr-dashboard
state: absent
when: not containerized_deployment | bool
---
-
- name: Confirm removal of the iSCSI gateway configuration
hosts: localhost
vars_prompt:
- - name: purge_config
+ - name: purge_config # noqa: name[casing]
prompt: Which configuration elements should be purged? (all, lio or abort)
default: 'abort'
- private: no
+ private: false
tasks:
- name: Exit playbook if user aborted the purge
- fail:
+ ansible.builtin.fail:
msg: >
"You have aborted the purge of the iSCSI gateway configuration"
when: purge_config == 'abort'
- - name: set_fact igw_purge_type
- set_fact:
+ - name: Set_fact igw_purge_type
+ ansible.builtin.set_fact:
igw_purge_type: "{{ purge_config }}"
-- name: stopping the gateways
+- name: Stopping the gateways
hosts:
- iscsigws
- become: yes
- vars:
- - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
-
+ become: true
tasks:
- - name: stopping and disabling iscsi daemons
- service:
+ - name: Stopping and disabling iscsi daemons
+ ansible.builtin.service:
name: "{{ item }}"
state: stopped
- enabled: no
+ enabled: false
with_items:
- rbd-target-gw
- rbd-target-api
- tcmu-runner
-- name: removing the gateway configuration
+- name: Removing the gateway configuration
hosts:
- iscsigws
- become: yes
+ become: true
vars:
- - igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
+ igw_purge_type: "{{ hostvars['localhost']['igw_purge_type'] }}"
tasks:
- - name: igw_purge | deleting configured rbd devices
- igw_purge: mode="disks"
+ - name: Igw_purge | deleting configured rbd devices
+ igw_purge:
+ mode: "disks"
when: igw_purge_type == 'all'
run_once: true
- - name: igw_purge | purging the gateway configuration
- igw_purge: mode="gateway"
+ - name: Igw_purge | purging the gateway configuration
+ igw_purge:
+ mode: "gateway"
run_once: true
- - name: restart and enable iscsi daemons
+ - name: Restart and enable iscsi daemons
when: igw_purge_type == 'lio'
- service:
+ ansible.builtin.service:
name: "{{ item }}"
state: started
- enabled: yes
+ enabled: true
with_items:
- tcmu-runner
- rbd-target-api
- rbd-target-gw
-- name: remove the gateways from the ceph dashboard
+- name: Remove the gateways from the ceph dashboard
hosts: mons
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: iscsi gateways with ceph dashboard
+ - name: Iscsi gateways with ceph dashboard
when: dashboard_enabled | bool
run_once: true
block:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: set_fact container_exec_cmd
- set_fact:
+ - name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- - name: get iscsi gateway list
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
+ - name: Get iscsi gateway list
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-list -f json"
changed_when: false
register: gateways
- - name: remove iscsi gateways
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
+ - name: Remove iscsi gateways
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-rm {{ item }}"
with_items: '{{ (gateways.stdout | from_json)["gateways"] }}'
+ changed_when: false
# admin_secret_key
#
# Additionally modify the users list and buckets list to create the
-# users and buckets you want
+# users and buckets you want
#
-- name: add rgw users and buckets
+- name: Add rgw users and buckets
connection: local
hosts: localhost
- gather_facts: no
+ gather_facts: false
tasks:
- - name: add rgw users and buckets
- ceph_add_users_buckets:
- rgw_host: '172.20.0.2'
- port: 8000
- admin_access_key: '8W56BITCSX27CD555Z5B'
- admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
- users:
- - username: 'test1'
- fullname: 'tester'
- email: 'dan1@email.com'
- maxbucket: 666
- suspend: false
- autogenkey: false
- accesskey: 'B3AR4Q33L59YV56A9A2F'
- secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
- userquota: true
- usermaxsize: '1000'
- usermaxobjects: 3
- bucketquota: true
- bucketmaxsize: '1000'
- bucketmaxobjects: 3
- - username: 'test2'
- fullname: 'tester'
- buckets:
- - bucket: 'bucket1'
- user: 'test2'
- - bucket: 'bucket2'
- user: 'test1'
- - bucket: 'bucket3'
- user: 'test1'
- - bucket: 'bucket4'
- user: 'test1'
- - bucket: 'bucket5'
- user: 'test1'
- - bucket: 'bucket6'
- user: 'test2'
- - bucket: 'bucket7'
- user: 'test2'
- - bucket: 'bucket8'
- user: 'test2'
- - bucket: 'bucket9'
- user: 'test2'
- - bucket: 'bucket10'
- user: 'test2'
+ - name: Add rgw users and buckets
+ ceph_add_users_buckets:
+ rgw_host: '172.20.0.2'
+ port: 8000
+ admin_access_key: '8W56BITCSX27CD555Z5B'
+ admin_secret_key: 'JcrsUNDNPAvnAWHiBmwKOzMNreOIw2kJWAclQQ20'
+ users:
+ - username: 'test1'
+ fullname: 'tester'
+ email: 'dan1@email.com'
+ maxbucket: 666
+ suspend: false
+ autogenkey: false
+ accesskey: 'B3AR4Q33L59YV56A9A2F'
+ secretkey: 'd84BRnMysnVGSyZiRlYUMduVgIarQWiNMdKzrF76'
+ userquota: true
+ usermaxsize: '1000'
+ usermaxobjects: 3
+ bucketquota: true
+ bucketmaxsize: '1000'
+ bucketmaxobjects: 3
+ - username: 'test2'
+ fullname: 'tester'
+ buckets:
+ - bucket: 'bucket1'
+ user: 'test2'
+ - bucket: 'bucket2'
+ user: 'test1'
+ - bucket: 'bucket3'
+ user: 'test1'
+ - bucket: 'bucket4'
+ user: 'test1'
+ - bucket: 'bucket5'
+ user: 'test1'
+ - bucket: 'bucket6'
+ user: 'test2'
+ - bucket: 'bucket7'
+ user: 'test2'
+ - bucket: 'bucket8'
+ user: 'test2'
+ - bucket: 'bucket9'
+ user: 'test2'
+ - bucket: 'bucket10'
+ user: 'test2'
# If you run Red Hat Ceph Storage and are doing a **major** update (e.g: from 2 to 3), you have to change the ceph_rhcs_version to a newer one
#
-- name: confirm whether user really meant to upgrade the cluster
+- name: Confirm whether user really meant to upgrade the cluster
hosts: localhost
tags: always
become: false
gather_facts: false
vars:
- - mgr_group_name: mgrs
+ mgr_group_name: mgrs
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to upgrade the cluster?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if user did not mean to upgrade cluster
- fail:
+ - name: Exit playbook, if user did not mean to upgrade cluster
+ ansible.builtin.fail:
msg: >
"Exiting rolling_update.yml playbook, cluster was NOT upgraded.
To upgrade the cluster, either say 'yes' on the prompt or
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: import_role ceph-defaults
- import_role:
+ - name: Import_role ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: check if a legacy grafana-server group exists
- import_role:
+ - name: Check if a legacy grafana-server group exists
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
- "{{ monitoring_group_name|default('monitoring') }}"
tags: always
- any_errors_fatal: True
- become: True
- gather_facts: False
+ any_errors_fatal: true
+ become: true
+ gather_facts: false
vars:
- delegate_facts_host: True
+ delegate_facts_host: true
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
+ msg: "gather facts on all Ceph hosts for following reference"
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
run_once: true
when: delegate_facts_host | bool
- - name: set_fact rolling_update
- set_fact:
+ - name: Set_fact rolling_update
+ ansible.builtin.set_fact:
rolling_update: true
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+ - name: Import ceph-infra role
+ ansible.builtin.import_role:
name: ceph-infra
tags: ceph_infra
- - import_role:
+ - name: Import ceph-validate role
+ ansible.builtin.import_role:
name: ceph-validate
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
when:
- (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
- (containerized_deployment | bool) or (dashboard_enabled | bool)
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry
when:
- (containerized_deployment | bool) or (dashboard_enabled | bool)
- ceph_docker_registry_auth | bool
- - name: check ceph release in container image
+ - name: Check ceph release in container image
when:
- groups.get(mon_group_name, []) | length > 0
- containerized_deployment | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
block:
- - name: get the ceph release being deployed
- command: "{{ ceph_cmd }} --cluster {{ cluster }} --version"
+ - name: Get the ceph release being deployed
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} --version"
register: ceph_version
changed_when: false
- name: check ceph release being deployed
- fail:
+ ansible.builtin.fail:
msg: "This version of ceph-ansible is intended for upgrading to Ceph Reef only."
when: "'reef' not in ceph_version.stdout.split()"
-- name: upgrade ceph mon cluster
+- name: Upgrade ceph mon cluster
tags: mons
vars:
health_mon_check_retries: 5
health_mon_check_delay: 15
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: upgrade ceph mon cluster
+
+ - name: Upgrade ceph mon cluster
block:
- - name: remove ceph aliases
- file:
+ - name: Remove ceph aliases
+ ansible.builtin.file:
path: /etc/profile.d/ceph-aliases.sh
state: absent
when: containerized_deployment | bool
- - name: set mon_host_count
- set_fact:
+ - name: Set mon_host_count
+ ansible.builtin.set_fact:
mon_host_count: "{{ groups[mon_group_name] | length }}"
- - name: fail when less than three monitors
- fail:
+ - name: Fail when less than three monitors
+ ansible.builtin.fail:
msg: "Upgrade of cluster with less than three monitors is not supported."
when: mon_host_count | int < 3
- - name: select a running monitor
- set_fact:
+ - name: Select a running monitor
+ ansible.builtin.set_fact:
mon_host: "{{ groups[mon_group_name] | difference([inventory_hostname]) | last }}"
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - block:
- - name: get ceph cluster status
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json"
+ - name: Check Ceph monitors quorum status
+ when: inventory_hostname == groups[mon_group_name] | first
+ block:
+ - name: Get ceph cluster status
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health -f json"
register: check_cluster_health
delegate_to: "{{ mon_host }}"
+ changed_when: false
- - block:
- - name: display ceph health detail
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail"
+ - name: Display health status before failing
+ when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR'
+ block:
+ - name: Display ceph health detail
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} health detail"
delegate_to: "{{ mon_host }}"
+ changed_when: false
- - name: fail if cluster isn't in an acceptable state
- fail:
+ - name: Fail if cluster isn't in an acceptable state
+ ansible.builtin.fail:
msg: "cluster is not in an acceptable state!"
- when: (check_cluster_health.stdout | from_json).status == 'HEALTH_ERR'
- - name: get the ceph quorum status
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
+ - name: Get the ceph quorum status
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
register: check_quorum_status
delegate_to: "{{ mon_host }}"
+ changed_when: false
- - name: fail if the cluster quorum isn't in an acceptable state
- fail:
+ - name: Fail if the cluster quorum isn't in an acceptable state
+ ansible.builtin.fail:
msg: "cluster quorum is not in an acceptable state!"
when: (check_quorum_status.stdout | from_json).quorum | length != groups[mon_group_name] | length
- when: inventory_hostname == groups[mon_group_name] | first
- - name: ensure /var/lib/ceph/bootstrap-rbd-mirror is present
- file:
+ - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror is present
+ ansible.builtin.file:
path: /var/lib/ceph/bootstrap-rbd-mirror
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- cephx | bool
- inventory_hostname == groups[mon_group_name][0]
- - name: create potentially missing keys (rbd and rbd-mirror)
+ - name: Create potentially missing keys (rbd and rbd-mirror)
ceph_key:
name: "client.{{ item.0 }}"
dest: "/var/lib/ceph/{{ item.0 }}/"
# NOTE: we mask the service so the RPM can't restart it
# after the package gets upgraded
- - name: stop ceph mon
- systemd:
+ - name: Stop ceph mon
+ ansible.builtin.systemd:
name: ceph-mon@{{ item }}
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
with_items:
- "{{ ansible_facts['hostname'] }}"
- "{{ ansible_facts['fqdn'] }}"
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
- - name: mask the mgr service
- systemd:
+ - name: Mask the mgr service
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
- masked: yes
+ masked: true
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mon role
+ ansible.builtin.import_role:
name: ceph-mon
- - name: start ceph mgr
- systemd:
+ - name: Start ceph mgr
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- - name: import_role ceph-facts
- import_role:
+ - name: Import_role ceph-facts
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_monitor_address.yml
delegate_to: "{{ groups[mon_group_name][0] }}"
delegate_facts: true
- - name: non container | waiting for the monitor to join the quorum...
- command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+ - name: Non container | waiting for the monitor to join the quorum...
+ ansible.builtin.command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
+ changed_when: false
when: not containerized_deployment | bool
- - name: container | waiting for the containerized monitor to join the quorum...
- command: >
+ - name: Container | waiting for the containerized monitor to join the quorum...
+ ansible.builtin.command: >
{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
register: ceph_health_raw
until:
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
+ changed_when: false
when: containerized_deployment | bool
rescue:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: unmask the mon service
- systemd:
+ - name: Unmask the mon service
+ ansible.builtin.systemd:
name: ceph-mon@{{ ansible_facts['hostname'] }}
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
- - name: unmask the mgr service
- systemd:
+ - name: Unmask the mgr service
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
- masked: no
+ masked: false
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- - name: stop the playbook execution
- fail:
+ - name: Stop the playbook execution
+ ansible.builtin.fail:
msg: "There was an error during monitor upgrade. Please, check the previous task results."
-- name: reset mon_host
+- name: Reset mon_host
hosts: "{{ mon_group_name|default('mons') }}"
tags: always
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: reset mon_host fact
- set_fact:
+ - name: Reset mon_host fact
+ ansible.builtin.set_fact:
mon_host: "{{ groups[mon_group_name][0] }}"
-- name: upgrade ceph mgr nodes when implicitly collocated on monitors
+- name: Upgrade ceph mgr nodes when implicitly collocated on monitors
vars:
health_mon_check_retries: 5
health_mon_check_delay: 15
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ mon_group_name|default('mons') }}"
tags: mgrs
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
- - name: upgrade mgrs when no mgr group explicitly defined in inventory
+ - name: Upgrade mgrs when no mgr group explicitly defined in inventory
when: groups.get(mgr_group_name, []) | length == 0
block:
- - name: stop ceph mgr
- systemd:
+ - name: Stop ceph mgr
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
- masked: yes
+ masked: true
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mgr role
+ ansible.builtin.import_role:
name: ceph-mgr
-- name: upgrade ceph mgr nodes
+- name: Upgrade ceph mgr nodes
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
ceph_release: "{{ ceph_stable_release }}"
hosts: "{{ mgr_group_name|default('mgrs') }}"
tags: mgrs
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
# The following task has a failed_when: false
# to handle the scenario where no mgr existed before the upgrade
# or if we run a Ceph cluster before Luminous
- - name: stop ceph mgr
- systemd:
+ - name: Stop ceph mgr
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
- enabled: no
- masked: no
+ enabled: false
+ masked: false
failed_when: false
- - name: mask ceph mgr systemd unit
- systemd:
+ - name: Mask ceph mgr systemd unit
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
- masked: yes
+ masked: true
failed_when: false
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mgr role
+ ansible.builtin.import_role:
name: ceph-mgr
-- name: set osd flags
+- name: Set osd flags
hosts: "{{ osd_group_name | default('osds') }}"
tags: osds
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: set osd flags, disable autoscaler and balancer
+ - name: Set osd flags, disable autoscaler and balancer
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- - name: get pool list
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ - name: Get pool list
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
changed_when: false
check_mode: false
- - name: get balancer module status
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ - name: Get balancer module status
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_update
run_once: true
changed_when: false
check_mode: false
- - name: set_fact pools_pgautoscaler_mode
- set_fact:
+ - name: Set_fact pools_pgautoscaler_mode
+ ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- - name: disable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ - name: Disable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
- - name: disable pg autoscale on pools
+ - name: Disable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: set osd flags
+ - name: Set osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
- noout
- nodeep-scrub
-- name: upgrade ceph osds cluster
+- name: Upgrade ceph osds cluster
vars:
health_osd_check_retries: 600
health_osd_check_delay: 2
- upgrade_ceph_packages: True
- hosts: "{{ osd_group_name|default('osds') }}"
+ upgrade_ceph_packages: true
+ hosts: osds
tags: osds
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - name: get osd numbers - non container
- shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa 306
+ - name: Get osd numbers - non container
+ ansible.builtin.shell: if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi # noqa: risky-shell-pipe
register: osd_ids
changed_when: false
- - name: set num_osds
- set_fact:
- num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
+ - name: Set num_osds
+ ansible.builtin.set_fact:
+ num_osds: "{{ osd_ids.stdout_lines | default([]) | length }}"
- - name: set_fact container_exec_cmd_osd
- set_fact:
+ - name: Set_fact container_exec_cmd_osd
+ ansible.builtin.set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- - name: stop ceph osd
- systemd:
+ - name: Stop ceph osd
+ ansible.builtin.systemd:
name: ceph-osd@{{ item }}
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
with_items: "{{ osd_ids.stdout_lines }}"
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-osd role
+ ansible.builtin.import_role:
name: ceph-osd
- - name: scan ceph-disk osds with ceph-volume if deploying nautilus
+ - name: Scan ceph-disk osds with ceph-volume if deploying nautilus
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
when: not containerized_deployment | bool
- - name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
+ - name: Activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
ceph_volume_simple_activate:
cluster: "{{ cluster }}"
osd_all: true
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
when: not containerized_deployment | bool
- - name: waiting for clean pgs...
- command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} pg stat --format json"
+ - name: Waiting for clean pgs...
+ ansible.builtin.command: "{{ container_exec_cmd_update_osd | default('') }} ceph --cluster {{ cluster }} pg stat --format json"
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
delay: "{{ health_osd_check_delay }}"
-- name: complete osd upgrade
+- name: Complete osd upgrade
hosts: "{{ osd_group_name | default('osds') }}"
tags: osds
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: unset osd flags, re-enable pg autoscaler and balancer
+ - name: Unset osd flags, re-enable pg autoscaler and balancer
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- - name: re-enable pg autoscale on pools
+ - name: Re-enable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: unset osd flags
+ - name: Unset osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
- noout
- nodeep-scrub
- - name: re-enable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ - name: Re-enable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status_update.stdout | from_json)['active'] | bool
-- name: upgrade ceph mdss cluster, deactivate all rank > 0
+- name: Upgrade ceph mdss cluster, deactivate all rank > 0
hosts: "{{ mon_group_name | default('mons') }}[0]"
tags: mdss
become: true
gather_facts: false
tasks:
- - name: deactivate all mds rank > 0
+ - name: Deactivate all mds rank > 0
when: groups.get(mds_group_name, []) | length > 0
block:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - name: deactivate all mds rank > 0 if any
+ - name: Deactivate all mds rank > 0 if any
when: groups.get(mds_group_name, []) | length > 1
block:
- - name: set max_mds 1 on ceph fs
+ - name: Set max_mds 1 on ceph fs
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: wait until only rank 0 is up
+ - name: Wait until only rank 0 is up
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: get name of remaining active mds
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ - name: Get name of remaining active mds
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
changed_when: false
register: _mds_active_name
- - name: set_fact mds_active_name
- set_fact:
+ - name: Set_fact mds_active_name
+ ansible.builtin.set_fact:
mds_active_name: "{{ (_mds_active_name.stdout | from_json)['filesystems'][0]['mdsmap']['info'][item.key]['name'] }}"
with_dict: "{{ (_mds_active_name.stdout | default('{}') | from_json).filesystems[0]['mdsmap']['info'] | default({}) }}"
- - name: set_fact mds_active_host
- set_fact:
+ - name: Set_fact mds_active_host
+ ansible.builtin.set_fact:
mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
with_items: "{{ groups[mds_group_name] }}"
when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
- - name: create standby_mdss group
- add_host:
+ - name: Create standby_mdss group
+ ansible.builtin.add_host:
name: "{{ item }}"
groups: standby_mdss
ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
with_items: "{{ groups[mds_group_name] | difference(mds_active_host) }}"
- - name: stop standby ceph mds
- systemd:
+ - name: Stop standby ceph mds
+ ansible.builtin.systemd:
name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ item }}"
with_items: "{{ groups['standby_mdss'] }}"
when: groups['standby_mdss'] | default([]) | length > 0
# dedicated task for masking systemd unit
# somehow, having a single task doesn't work in containerized context
- - name: mask systemd units for standby ceph mds
- systemd:
+ - name: Mask systemd units for standby ceph mds
+ ansible.builtin.systemd:
name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
- masked: yes
+ masked: true
delegate_to: "{{ item }}"
with_items: "{{ groups['standby_mdss'] }}"
when: groups['standby_mdss'] | default([]) | length > 0
- - name: wait until all standbys mds are stopped
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ - name: Wait until all standbys mds are stopped
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
changed_when: false
register: wait_standbys_down
retries: 300
delay: 5
until: (wait_standbys_down.stdout | from_json).standbys | length == 0
- - name: create active_mdss group
- add_host:
+ - name: Create active_mdss group
+ ansible.builtin.add_host:
name: "{{ mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0] }}"
groups: active_mdss
ansible_host: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_host'] | default(omit) }}"
ansible_port: "{{ hostvars[mds_active_host[0] if mds_active_host is defined else groups.get(mds_group_name)[0]]['ansible_port'] | default(omit) }}"
-- name: upgrade active mds
+- name: Upgrade active mds
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: active_mdss
tags: mdss
become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - name: prevent restart from the packaging
- systemd:
+ - name: Prevent restart from the packaging
+ ansible.builtin.systemd:
name: ceph-mds@{{ ansible_facts['hostname'] }}
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
when: not containerized_deployment | bool
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mds role
+ ansible.builtin.import_role:
name: ceph-mds
- - name: restart ceph mds
- systemd:
+ - name: Restart ceph mds
+ ansible.builtin.systemd:
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: restarted
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
when: not containerized_deployment | bool
- - name: restart active mds
- command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
+ - name: Restart active mds
+ ansible.builtin.command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
changed_when: false
when: containerized_deployment | bool
-- name: upgrade standbys ceph mdss cluster
+- name: Upgrade standbys ceph mdss cluster
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: standby_mdss
tags: mdss
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - name: prevent restarts from the packaging
- systemd:
+ - name: Prevent restarts from the packaging
+ ansible.builtin.systemd:
name: ceph-mds@{{ ansible_facts['hostname'] }}
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
when: not containerized_deployment | bool
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-mds role
+ ansible.builtin.import_role:
name: ceph-mds
- - name: set max_mds
+ - name: Set max_mds
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
when: inventory_hostname == groups['standby_mdss'] | last
-- name: upgrade ceph rgws cluster
+- name: Upgrade ceph rgws cluster
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ rgw_group_name|default('rgws') }}"
tags: rgws
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - name: stop ceph rgw when upgrading from stable-3.2
- systemd:
+ - name: Stop ceph rgw when upgrading from stable-3.2 # noqa: ignore-errors
+ ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
state: stopped
- enabled: no
- masked: yes
- ignore_errors: True
+ enabled: false
+ masked: true
+ ignore_errors: true
- - name: stop ceph rgw
- systemd:
+ - name: Stop ceph rgw
+ ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
with_items: "{{ rgw_instances }}"
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-rgw role
+ ansible.builtin.import_role:
name: ceph-rgw
-- name: upgrade ceph rbd mirror node
+- name: Upgrade ceph rbd mirror node
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
tags: rbdmirrors
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
- - name: check for ceph rbd mirror services
- command: systemctl show --no-pager --property=Id --state=enabled ceph-rbd-mirror@* # noqa 303
+ - name: Check for ceph rbd mirror services
+ ansible.builtin.command: systemctl show --no-pager --property=Id --state=enabled ceph-rbd-mirror@* # noqa command-instead-of-module
changed_when: false
register: rbdmirror_services
- - name: stop ceph rbd mirror
- service:
+ - name: Stop ceph rbd mirror
+ ansible.builtin.service:
name: "{{ item.split('=')[1] }}"
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
loop: "{{ rbdmirror_services.stdout_lines }}"
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-rbd-mirror role
+ ansible.builtin.import_role:
name: ceph-rbd-mirror
-- name: upgrade ceph nfs node
+- name: Upgrade ceph nfs node
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ nfs_group_name|default('nfss') }}"
tags: nfss
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
# failed_when: false is here so that if we upgrade
# from a version of ceph that does not have nfs-ganesha
# then this task will not fail
- - name: stop ceph nfs
- systemd:
+ - name: Stop ceph nfs
+ ansible.builtin.systemd:
name: nfs-ganesha
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
failed_when: false
when: not containerized_deployment | bool
- - name: systemd stop nfs container
- systemd:
+ - name: Systemd stop nfs container
+ ansible.builtin.systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
failed_when: false
when:
- ceph_nfs_enable_service | bool
- containerized_deployment | bool
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
name: ceph-nfs
-- name: upgrade ceph iscsi gateway node
+- name: Upgrade ceph iscsi gateway node
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
tags: iscsigws
serial: 1
- become: True
+ become: true
gather_facts: false
tasks:
# failed_when: false is here so that if we upgrade
# from a version of ceph that does not have iscsi gws
# then this task will not fail
- - name: stop ceph iscsi services
- systemd:
+ - name: Stop ceph iscsi services
+ ansible.builtin.systemd:
name: '{{ item }}'
state: stopped
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
failed_when: false
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when: containerized_deployment | bool
- - import_role:
+
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
- - import_role:
+
+ - name: Import ceph-iscsi-gw role
+ ansible.builtin.import_role:
name: ceph-iscsi-gw
-- name: upgrade ceph client node
+- name: Upgrade ceph client node
vars:
- upgrade_ceph_packages: True
+ upgrade_ceph_packages: true
hosts: "{{ client_group_name|default('clients') }}"
tags: clients
serial: "{{ client_update_batch | default(20) }}"
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
when: containerized_deployment | bool
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-common role
+ ansible.builtin.import_role:
name: ceph-common
when: not containerized_deployment | bool
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
when:
- (group_names != ['clients']) or (inventory_hostname == groups.get('clients', [''])|first)
- containerized_deployment | bool
-- name: upgrade ceph-crash daemons
+- name: Upgrade ceph-crash daemons
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ osd_group_name | default('osds') }}"
gather_facts: false
become: true
tasks:
- - name: stop the ceph-crash service
- systemd:
+ - name: Stop the ceph-crash service
+ ansible.builtin.systemd:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
# it needs to be done in a separate task otherwise the stop just before doesn't work.
- - name: mask and disable the ceph-crash service
- systemd:
+ - name: Mask and disable the ceph-crash service
+ ansible.builtin.systemd:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-crash role
+ ansible.builtin.import_role:
name: ceph-crash
-- name: complete upgrade
+- name: Complete upgrade
hosts: "{{ mon_group_name | default('mons') }}"
tags: post_upgrade
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- name: container | disallow pre-reef OSDs and enable all new reef-only functionality
- command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release reef"
+ ansible.builtin.command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release reef"
delegate_to: "{{ groups[mon_group_name][0] }}"
- run_once: True
+ run_once: true
+ changed_when: false
when:
- containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
- name: non container | disallow pre-reef OSDs and enable all new reef-only functionality
- command: "ceph --cluster {{ cluster }} osd require-osd-release reef"
+ ansible.builtin.command: "ceph --cluster {{ cluster }} osd require-osd-release reef"
delegate_to: "{{ groups[mon_group_name][0] }}"
- run_once: True
+ run_once: true
+ changed_when: false
when:
- not containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
-- name: upgrade node-exporter
+- name: Upgrade node-exporter
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: with dashboard configuration
+ - name: With dashboard configuration
when: dashboard_enabled | bool
block:
- - name: stop node-exporter
- service:
+ - name: Stop node-exporter
+ ansible.builtin.service:
name: node_exporter
state: stopped
failed_when: false
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
tasks_from: registry
when:
- not containerized_deployment | bool
- ceph_docker_registry_auth | bool
- - import_role:
+ - name: Import ceph-node-exporter role
+ ansible.builtin.import_role:
name: ceph-node-exporter
-- name: upgrade monitoring node
+- name: Upgrade monitoring node
hosts: "{{ monitoring_group_name|default('monitoring') }}"
tags: monitoring
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: with dashboard configuration
+ - name: With dashboard configuration
when: dashboard_enabled | bool
block:
- - name: stop monitoring services
- service:
+ - name: Stop monitoring services
+ ansible.builtin.service:
name: '{{ item }}'
state: stopped
failed_when: false
- prometheus
- grafana-server
- - import_role:
- name: ceph-facts
- - import_role:
+ # - name: Import ceph-facts role
+ # ansible.builtin.import_role:
+ # name: ceph-facts
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
- - import_role:
+ - name: Import ceph-prometheus role
+ ansible.builtin.import_role:
name: ceph-prometheus
- - import_role:
+ - name: Import ceph-grafana role
+ ansible.builtin.import_role:
name: ceph-grafana
-- name: upgrade ceph dashboard
+- name: Upgrade ceph dashboard
hosts: "{{ groups[mgr_group_name|default('mgrs')] | default(groups[mon_group_name|default('mons')]) | default(omit) }}"
tags: monitoring
gather_facts: false
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: with dashboard configuration
+ - name: With dashboard configuration
when: dashboard_enabled | bool
block:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: grafana
- - import_role:
+
+ - name: Import ceph-dashboard role
+ ansible.builtin.import_role:
name: ceph-dashboard
-- name: switch any existing crush buckets to straw2
+- name: Switch any existing crush buckets to straw2
hosts: "{{ mon_group_name | default('mons') }}[0]"
tags: post_upgrade
become: true
any_errors_fatal: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: set_fact ceph_cmd
- set_fact:
+ - name: Set_fact ceph_cmd
+ ansible.builtin.set_fact:
ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
- - name: backup the crushmap
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap"
+ - name: Backup the crushmap
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd getcrushmap -o /etc/ceph/{{ cluster }}-crushmap"
changed_when: false
- - block:
- - name: switch crush buckets to straw2
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2"
+ - name: Migrate crush buckets to straw2
+ block:
+ - name: Switch crush buckets to straw2
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd crush set-all-straw-buckets-to-straw2"
changed_when: false
rescue:
- - name: restore the crushmap
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap"
+ - name: Restore the crushmap
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd setcrushmap -i /etc/ceph/{{ cluster }}-crushmap"
changed_when: false
- - name: inform that the switch to straw2 buckets failed
- fail:
+ - name: Inform that the switch to straw2 buckets failed
+ ansible.builtin.fail:
msg: >
"An attempt to switch to straw2 bucket was made but failed.
Check the cluster status."
- - name: remove crushmap backup
- file:
+ - name: Remove crushmap backup
+ ansible.builtin.file:
path: /etc/ceph/{{ cluster }}-crushmap
state: absent
-- name: show ceph status
+- name: Show ceph status
hosts: "{{ mon_group_name|default('mons') }}"
tags: always
- become: True
+ become: true
gather_facts: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: set_fact container_exec_cmd_status
- set_fact:
+ - name: Set_fact container_exec_cmd_status
+ ansible.builtin.set_fact:
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- - name: show ceph status
- command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph status
+ ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
- run_once: True
+ run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: show all daemons version
- command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
- run_once: True
+ - name: Show all daemons version
+ ansible.builtin.command: "{{ container_exec_cmd_status | default('') }} ceph --cluster {{ cluster }} versions"
+ run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
# ansible-playbook -e ireallymeanit=yes|no shrink-mds.yml
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mds_group_name | default('mdss') }}"
become: true
tasks:
- - debug:
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference
- - import_role:
+
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
-- name: perform checks, remove mds and print cluster health
+- name: Perform checks, remove mds and print cluster health
hosts: mons[0]
become: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
+ private: false
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: exit playbook, if no mds was given
+ - name: Exit playbook, if no mds was given
when: mds_to_kill is not defined
- fail:
+ ansible.builtin.fail:
msg: >
mds_to_kill must be declared.
Exiting shrink-cluster playbook, no MDS was removed. On the command
"-e mds_to_kill=ceph-mds1" argument. You can only remove a single
MDS each time the playbook runs."
- - name: exit playbook, if the mds is not part of the inventory
+ - name: Exit playbook, if the mds is not part of the inventory
when: mds_to_kill not in groups[mds_group_name]
- fail:
+ ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory,
please make sure it is."
- - name: exit playbook, if user did not mean to shrink cluster
+ - name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes'
- fail:
+ ansible.builtin.fail:
msg: "Exiting shrink-mds playbook, no mds was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
- - name: set_fact container_exec_cmd for mon0
- set_fact:
+ - name: Set_fact container_exec_cmd for mon0
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
changed_when: false
register: ceph_health
until: ceph_health is succeeded
retries: 5
delay: 2
- - name: set_fact mds_to_kill_hostname
- set_fact:
+ - name: Set_fact mds_to_kill_hostname
+ ansible.builtin.set_fact:
mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
# removes the MDS from the FS map.
- - name: exit mds when containerized deployment
- command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
+ - name: Exit mds when containerized deployment
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
changed_when: false
when: containerized_deployment | bool
- - name: get ceph status
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+ - name: Get ceph status
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
changed_when: false
- - name: set_fact current_max_mds
- set_fact:
+ - name: Set_fact current_max_mds
+ ansible.builtin.set_fact:
current_max_mds: "{{ (ceph_status.stdout | from_json)['fsmap']['max'] }}"
- - name: fail if removing that mds node wouldn't satisfy max_mds anymore
- fail:
+ - name: Fail if removing that mds node wouldn't satisfy max_mds anymore
+ ansible.builtin.fail:
msg: "Can't remove more mds as it won't satisfy current max_mds setting"
when:
- ((((ceph_status.stdout | from_json)['fsmap']['up'] | int) + ((ceph_status.stdout | from_json)['fsmap']['up:standby'] | int)) - 1) < current_max_mds | int
- (ceph_status.stdout | from_json)['fsmap']['up'] | int > 1
- - name: stop mds service and verify it
+ - name: Stop mds service and verify it
block:
- - name: stop mds service
- service:
+ - name: Stop mds service
+ ansible.builtin.service:
name: ceph-mds@{{ mds_to_kill_hostname }}
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ mds_to_kill }}"
failed_when: false
- - name: ensure that the mds is stopped
- command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa 303
+ - name: Ensure that the mds is stopped
+ ansible.builtin.command: "systemctl is-active ceph-mds@{{ mds_to_kill_hostname }}" # noqa command-instead-of-module
register: mds_to_kill_status
failed_when: mds_to_kill_status.rc == 0
delegate_to: "{{ mds_to_kill }}"
retries: 5
delay: 2
+ changed_when: false
- - name: fail if the mds is reported as active or standby
+ - name: Fail if the mds is reported as active or standby
block:
- - name: get new ceph status
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
+ - name: Get new ceph status
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
+ changed_when: false
- - name: get active mds nodes list
- set_fact:
+ - name: Get active mds nodes list
+ ansible.builtin.set_fact:
active_mdss: "{{ active_mdss | default([]) + [item.name] }}"
with_items: "{{ (ceph_status.stdout | from_json)['fsmap']['by_rank'] }}"
- - name: get ceph fs dump status
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
+ - name: Get ceph fs dump status
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs dump -f json"
register: ceph_fs_status
+ changed_when: false
- - name: create a list of standby mdss
- set_fact:
+ - name: Create a list of standby mdss
+ ansible.builtin.set_fact:
standby_mdss: (ceph_fs_status.stdout | from_json)['standbys'] | map(attribute='name') | list
- - name: fail if mds just killed is being reported as active or standby
- fail:
+ - name: Fail if mds just killed is being reported as active or standby
+ ansible.builtin.fail:
msg: "mds node {{ mds_to_kill }} still up and running."
when:
- (mds_to_kill in active_mdss | default([])) or
(mds_to_kill in standby_mdss | default([]))
- - name: delete the filesystem when killing last mds
+ - name: Delete the filesystem when killing last mds
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: purge mds store
- file:
+ - name: Purge mds store
+ ansible.builtin.file:
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_to_kill_hostname }}
state: absent
delegate_to: "{{ mds_to_kill }}"
post_tasks:
- - name: show ceph health
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name | default('mons') }}"
- "{{ mgr_group_name | default('mgrs') }}"
become: true
tasks:
- - debug:
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
msg: gather facts on all Ceph hosts for following reference
-- name: confirm if user really meant to remove manager from the ceph cluster
+- name: Confirm if user really meant to remove manager from the ceph cluster
hosts: mons[0]
become: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
+ private: false
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: set_fact container_exec_cmd
+ - name: Set_fact container_exec_cmd
when: containerized_deployment | bool
- set_fact:
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- - name: get total number of mgrs in cluster
+ - name: Get total number of mgrs in cluster
block:
- - name: save mgr dump output
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+ - name: Save mgr dump output
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
+ changed_when: false
- - name: get active and standbys mgr list
- set_fact:
+ - name: Get active and standbys mgr list
+ ansible.builtin.set_fact:
active_mgr: "{{ [mgr_dump.stdout | from_json] | map(attribute='active_name') | list }}"
standbys_mgr: "{{ (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list }}"
- - name: exit playbook, if there's no standby manager
- fail:
+ - name: Exit playbook, if there's no standby manager
+ ansible.builtin.fail:
msg: "You are about to shrink the only manager present in the cluster."
when: standbys_mgr | length | int < 1
- - name: exit playbook, if no manager was given
- fail:
+ - name: Exit playbook, if no manager was given
+ ansible.builtin.fail:
msg: "mgr_to_kill must be declared
Exiting shrink-cluster playbook, no manager was removed.
On the command line when invoking the playbook, you can use
manager each time the playbook runs."
when: mgr_to_kill is not defined
- - name: exit playbook, if user did not mean to shrink cluster
- fail:
+ - name: Exit playbook, if user did not mean to shrink cluster
+ ansible.builtin.fail:
msg: "Exiting shrink-mgr playbook, no manager was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: set_fact mgr_to_kill_hostname
- set_fact:
+ - name: Set_fact mgr_to_kill_hostname
+ ansible.builtin.set_fact:
mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
- - name: exit playbook, if the selected manager is not present in the cluster
- fail:
+ - name: Exit playbook, if the selected manager is not present in the cluster
+ ansible.builtin.fail:
msg: "It seems that the host given is not present in the cluster."
when:
- mgr_to_kill_hostname not in active_mgr
- mgr_to_kill_hostname not in standbys_mgr
tasks:
- - name: stop manager services and verify it
+ - name: Stop manager services and verify it
block:
- - name: stop manager service
- service:
+ - name: Stop manager service
+ ansible.builtin.service:
name: ceph-mgr@{{ mgr_to_kill_hostname }}
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ mgr_to_kill }}"
failed_when: false
- - name: ensure that the mgr is stopped
- command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa 303
+ - name: Ensure that the mgr is stopped
+ ansible.builtin.command: "systemctl is-active ceph-mgr@{{ mgr_to_kill_hostname }}" # noqa command-instead-of-module
register: mgr_to_kill_status
failed_when: mgr_to_kill_status.rc == 0
delegate_to: "{{ mgr_to_kill }}"
+ changed_when: false
retries: 5
delay: 2
- - name: fail if the mgr is reported in ceph mgr dump
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+ - name: Fail if the mgr is reported in ceph mgr dump
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
changed_when: false
failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
retries: 12
delay: 10
- - name: purge manager store
- file:
+ - name: Purge manager store
+ ansible.builtin.file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ mgr_to_kill_hostname }}
state: absent
delegate_to: "{{ mgr_to_kill }}"
post_tasks:
- - name: show ceph health
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts: "{{ mon_group_name|default('mons') }}"
become: true
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
+ msg: "gather facts on all Ceph hosts for following reference"
-- name: confirm whether user really meant to remove monitor from the ceph cluster
+- name: Confirm whether user really meant to remove monitor from the ceph cluster
hosts: mons[0]
become: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
+ private: false
vars:
mon_group_name: mons
pre_tasks:
- - name: exit playbook, if only one monitor is present in cluster
- fail:
+ - name: Exit playbook, if only one monitor is present in cluster
+ ansible.builtin.fail:
msg: "You are about to shrink the only monitor present in the cluster.
If you really want to do that, please use the purge-cluster playbook."
when: groups[mon_group_name] | length | int == 1
- - name: exit playbook, if no monitor was given
- fail:
+ - name: Exit playbook, if no monitor was given
+ ansible.builtin.fail:
msg: "mon_to_kill must be declared
Exiting shrink-cluster playbook, no monitor was removed.
On the command line when invoking the playbook, you can use
-e mon_to_kill=ceph-mon01 argument. You can only remove a single monitor each time the playbook runs."
when: mon_to_kill is not defined
- - name: exit playbook, if the monitor is not part of the inventory
- fail:
+ - name: Exit playbook, if the monitor is not part of the inventory
+ ansible.builtin.fail:
msg: "It seems that the host given is not part of your inventory, please make sure it is."
when: mon_to_kill not in groups[mon_group_name]
- - name: exit playbook, if user did not mean to shrink cluster
- fail:
+ - name: Exit playbook, if user did not mean to shrink cluster
+ ansible.builtin.fail:
msg: "Exiting shrink-mon playbook, no monitor was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
tasks:
- - name: pick a monitor different than the one we want to remove
- set_fact:
+ - name: Pick a monitor different than the one we want to remove
+ ansible.builtin.set_fact:
mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when: item != mon_to_kill
- - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
- set_fact:
+ - name: Set container_exec_cmd fact
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
retries: 5
delay: 2
- - name: set_fact mon_to_kill_hostname
- set_fact:
+ - name: Set_fact mon_to_kill_hostname
+ ansible.builtin.set_fact:
mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
- - name: stop monitor service(s)
- service:
+ - name: Stop monitor service(s)
+ ansible.builtin.service:
name: ceph-mon@{{ mon_to_kill_hostname }}
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ mon_to_kill }}"
failed_when: false
- - name: purge monitor store
- file:
+ - name: Purge monitor store
+ ansible.builtin.file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ mon_to_kill_hostname }}
state: absent
delegate_to: "{{ mon_to_kill }}"
- - name: remove monitor from the quorum
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
+ - name: Remove monitor from the quorum
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
changed_when: false
failed_when: false
delegate_to: "{{ mon_host }}"
post_tasks:
- - name: verify the monitor is out of the cluster
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
+ - name: Verify the monitor is out of the cluster
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
delegate_to: "{{ mon_host }}"
changed_when: false
failed_when: false
retries: 2
delay: 10
- - name: please remove the monitor from your ceph configuration file
- debug:
- msg: "The monitor has been successfully removed from the cluster.
- Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
+ - name: Please remove the monitor from your ceph configuration file
+ ansible.builtin.debug:
+ msg: "The monitor has been successfully removed from the cluster.
+ Please remove the monitor entry from the rest of your ceph configuration files, cluster wide."
run_once: true
when: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
- - name: fail if monitor is still part of the cluster
- fail:
- msg: "Monitor appears to still be part of the cluster, please check what happened."
+ - name: Fail if monitor is still part of the cluster
+ ansible.builtin.fail:
+ msg: "Monitor appears to still be part of the cluster, please check what happened."
run_once: true
when: mon_to_kill_hostname in (result.stdout | from_json)['quorum_names']
- - name: show ceph health
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ mon_host }}"
changed_when: false
- - name: show ceph mon status
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
+ - name: Show ceph mon status
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
delegate_to: "{{ mon_host }}"
changed_when: false
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
-
+- name: Gather facts and check the init system
hosts:
- mons
- osds
- become: True
+ become: true
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
-
-- name: confirm whether user really meant to remove osd(s) from the cluster
+ - name: Gather facts on all Ceph hosts for following reference
+ ansible.builtin.debug:
+ msg: "gather facts on all Ceph hosts for following reference"
+- name: Confirm whether user really meant to remove osd(s) from the cluster
hosts: mons[0]
-
become: true
-
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
-
+ private: false
vars:
mon_group_name: mons
osd_group_name: osds
pre_tasks:
- - name: exit playbook, if user did not mean to shrink cluster
- fail:
+ - name: Exit playbook, if user did not mean to shrink cluster
+ ansible.builtin.fail:
msg: "Exiting shrink-osd playbook, no osd(s) was/were removed..
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: exit playbook, if no osd(s) was/were given
- fail:
+ - name: Exit playbook, if no osd(s) was/were given
+ ansible.builtin.fail:
msg: "osd_to_kill must be declared
Exiting shrink-osd playbook, no OSD(s) was/were removed.
On the command line when invoking the playbook, you can use
-e osd_to_kill=0,1,2,3 argument."
when: osd_to_kill is not defined
- - name: check the osd ids passed have the correct format
- fail:
+ - name: Check the osd ids passed have the correct format
+ ansible.builtin.fail:
msg: "The id {{ item }} has wrong format, please pass the number only"
with_items: "{{ osd_to_kill.split(',') }}"
when: not item is regex("^\d+$")
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
post_tasks:
- - name: set_fact container_exec_cmd build docker exec command (containerized)
- set_fact:
+ - name: Set_fact container_exec_cmd build docker exec command (containerized)
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
retries: 5
delay: 2
- - name: find the host(s) where the osd(s) is/are running on
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
+ - name: Find the host(s) where the osd(s) is/are running on
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
changed_when: false
with_items: "{{ osd_to_kill.split(',') }}"
register: find_osd_hosts
- - name: set_fact osd_hosts
- set_fact:
- osd_hosts: "{{ osd_hosts | default([]) + [ [ (item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item ] ] }}"
+ - name: Set_fact osd_hosts
+ ansible.builtin.set_fact:
+ osd_hosts: "{{ osd_hosts | default([]) + [[(item.stdout | from_json).crush_location.host, (item.stdout | from_json).osd_fsid, item.item]] }}"
with_items: "{{ find_osd_hosts.results }}"
- - name: set_fact _osd_hosts
- set_fact:
+ - name: Set_fact _osd_hosts
+ ansible.builtin.set_fact:
_osd_hosts: "{{ _osd_hosts | default([]) + [ [ item.0, item.2, item.3 ] ] }}"
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- - name: set_fact host_list
- set_fact:
+ - name: Set_fact host_list
+ ansible.builtin.set_fact:
host_list: "{{ host_list | default([]) | union([item.0]) }}"
loop: "{{ _osd_hosts }}"
- - name: get ceph-volume lvm list data
+ - name: Get ceph-volume lvm list data
ceph_volume:
cluster: "{{ cluster }}"
action: list
delegate_to: "{{ item }}"
loop: "{{ host_list }}"
- - name: set_fact _lvm_list
- set_fact:
+ - name: Set_fact _lvm_list
+ ansible.builtin.set_fact:
_lvm_list: "{{ _lvm_list | default({}) | combine(item.stdout | from_json) }}"
with_items: "{{ _lvm_list_data.results }}"
- - name: refresh /etc/ceph/osd files non containerized_deployment
+ - name: Refresh /etc/ceph/osd files non containerized_deployment
ceph_volume_simple_scan:
cluster: "{{ cluster }}"
force: true
loop: "{{ host_list }}"
when: not containerized_deployment | bool
- - name: get osd unit status
- systemd:
+ - name: Get osd unit status
+ ansible.builtin.systemd:
name: ceph-osd@{{ item.2 }}
register: osd_status
delegate_to: "{{ item.0 }}"
when:
- containerized_deployment | bool
- - name: refresh /etc/ceph/osd files containerized_deployment
- command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
+ - name: Refresh /etc/ceph/osd files containerized_deployment
+ ansible.builtin.command: "{{ container_binary }} exec ceph-osd-{{ item.2 }} ceph-volume simple scan --force /var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
changed_when: false
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
- item.2 not in _lvm_list.keys()
- osd_status.results[0].status.ActiveState == 'active'
- - name: refresh /etc/ceph/osd files containerized_deployment when OSD container is down
+ - name: Refresh /etc/ceph/osd files containerized_deployment when OSD container is down
+ when:
+ - containerized_deployment | bool
+ - osd_status.results[0].status.ActiveState != 'active'
block:
- - name: create tmp osd folder
- file:
+ - name: Create tmp osd folder
+ ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: directory
mode: '0755'
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- - name: activate OSD
- command: |
+ - name: Activate OSD
+ ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- - name: simple scan
- command: |
+ - name: Simple scan
+ ansible.builtin.command: |
{{ container_binary }} run -ti --pids-limit=-1 --rm --net=host --privileged=true --pid=host --ipc=host --cpus=1
-v /dev:/dev -v /etc/localtime:/etc/localtime:ro
-v /var/lib/ceph/tmp/:/var/lib/ceph/osd:z,rshared
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- - name: umount OSD temp folder
- mount:
+ - name: Umount OSD temp folder
+ ansible.posix.mount:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: unmounted
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- - name: remove OSD temp folder
- file:
+ - name: Remove OSD temp folder
+ ansible.builtin.file:
path: /var/lib/ceph/tmp/{{ cluster }}-{{ item.2 }}
state: absent
delegate_to: "{{ item.0 }}"
when: item.2 not in _lvm_list.keys()
loop: "{{ _osd_hosts }}"
- when:
- - containerized_deployment | bool
- - osd_status.results[0].status.ActiveState != 'active'
-
- - name: find /etc/ceph/osd files
- find:
+ - name: Find /etc/ceph/osd files
+ ansible.builtin.find:
paths: /etc/ceph/osd
pattern: "{{ item.2 }}-*"
register: ceph_osd_data
loop: "{{ _osd_hosts }}"
when: item.2 not in _lvm_list.keys()
- - name: slurp ceph osd files content
- slurp:
+ - name: Slurp ceph osd files content
+ ansible.builtin.slurp:
src: "{{ item['files'][0]['path'] }}"
delegate_to: "{{ item.item.0 }}"
register: ceph_osd_files_content
- item.skipped is undefined
- item.matched > 0
- - name: set_fact ceph_osd_files_json
- set_fact:
+ - name: Set_fact ceph_osd_files_json
+ ansible.builtin.set_fact:
ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({ item.item.item.2: item.content | b64decode | from_json}) }}"
with_items: "{{ ceph_osd_files_content.results }}"
when: item.skipped is undefined
- - name: mark osd(s) out of the cluster
+ - name: Mark osd(s) out of the cluster
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
run_once: true
- - name: stop osd(s) service
- service:
+ - name: Stop osd(s) service
+ ansible.builtin.service:
name: ceph-osd@{{ item.2 }}
state: stopped
- enabled: no
+ enabled: false
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- - name: umount osd lockbox
+ - name: Umount osd lockbox
ansible.posix.mount:
path: "/var/lib/ceph/osd-lockbox/{{ ceph_osd_data_json[item.2]['data']['uuid'] }}"
state: absent
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2]['data']['uuid'] is defined
- - name: umount osd data
+ - name: Umount osd data
ansible.posix.mount:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- - name: get parent device for data partition
- command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
+ - name: Get parent device for data partition
+ ansible.builtin.command: lsblk --noheadings --output PKNAME --nodeps "{{ ceph_osd_data_json[item.2]['data']['path'] }}"
register: parent_device_data_part
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
+ changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['data']['path'] is defined
- - name: add pkname information in ceph_osd_data_json
- set_fact:
- ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout }}, recursive=True) }}"
+ - name: Add pkname information in ceph_osd_data_json
+ ansible.builtin.set_fact:
+ ceph_osd_data_json: "{{ ceph_osd_data_json | default({}) | combine({item.item[2]: {'pkname_data': '/dev/' + item.stdout}}, recursive=True) }}"
loop: "{{ parent_device_data_part.results }}"
when: item.skipped is undefined
- - name: close dmcrypt close on devices if needed
- command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
+ - name: Close dmcrypt close on devices if needed
+ ansible.builtin.command: "cryptsetup close {{ ceph_osd_data_json[item.2][item.3]['uuid'] }}"
with_nested:
- "{{ _osd_hosts }}"
- - [ 'block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt' ]
+ - ['block_dmcrypt', 'block.db_dmcrypt', 'block.wal_dmcrypt', 'data', 'journal_dmcrypt']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
until: result is succeeded
+ changed_when: false
when:
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2]['encrypted'] | default(False) | bool
- ceph_osd_data_json[item.2][item.3] is defined
- - name: use ceph-volume lvm zap to destroy all partitions
+ - name: Use ceph-volume lvm zap to destroy all partitions
ceph_volume:
cluster: "{{ cluster }}"
action: zap
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_nested:
- "{{ _osd_hosts }}"
- - [ 'block', 'block.db', 'block.wal', 'journal', 'data' ]
+ - ['block', 'block.db', 'block.wal', 'journal', 'data']
delegate_to: "{{ item.0 }}"
failed_when: false
register: result
- item.2 not in _lvm_list.keys()
- ceph_osd_data_json[item.2][item.3] is defined
- - name: zap osd devices
+ - name: Zap osd devices
ceph_volume:
action: "zap"
osd_fsid: "{{ item.1 }}"
loop: "{{ _osd_hosts }}"
when: item.2 in _lvm_list.keys()
- - name: ensure osds are marked down
+ - name: Ensure osds are marked down
ceph_osd:
ids: "{{ osd_to_kill.split(',') }}"
cluster: "{{ cluster }}"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: purge osd(s) from the cluster
+ - name: Purge osd(s) from the cluster
ceph_osd:
ids: "{{ item }}"
cluster: "{{ cluster }}"
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"
- - name: remove osd data dir
- file:
+ - name: Remove osd data dir
+ ansible.builtin.file:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.2 }}"
state: absent
loop: "{{ _osd_hosts }}"
delegate_to: "{{ item.0 }}"
- - name: show ceph health
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
changed_when: false
- - name: show ceph osd tree
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
+ - name: Show ceph osd tree
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
changed_when: false
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts:
- mons
- rbdmirrors
become: true
tasks:
- - debug:
+ - name: Gather facts on MONs and RBD mirrors
+ ansible.builtin.debug:
msg: gather facts on MONs and RBD mirrors
-- name: confirm whether user really meant to remove rbd mirror from the ceph
+- name: Confirm whether user really meant to remove rbd mirror from the ceph
cluster
hosts: mons[0]
become: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
+ private: false
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: exit playbook, if no rbdmirror was given
- fail:
+ - name: Exit playbook, if no rbdmirror was given
+ ansible.builtin.fail:
msg: "rbdmirror_to_kill must be declared
Exiting shrink-cluster playbook, no RBD mirror was removed.
On the command line when invoking the playbook, you can use
single rbd mirror each time the playbook runs."
when: rbdmirror_to_kill is not defined
- - name: exit playbook, if the rbdmirror is not part of the inventory
- fail:
+ - name: Exit playbook, if the rbdmirror is not part of the inventory
+ ansible.builtin.fail:
msg: >
It seems that the host given is not part of your inventory,
please make sure it is.
when: rbdmirror_to_kill not in groups[rbdmirror_group_name]
- - name: exit playbook, if user did not mean to shrink cluster
- fail:
+ - name: Exit playbook, if user did not mean to shrink cluster
+ ansible.builtin.fail:
msg: "Exiting shrink-rbdmirror playbook, no rbd-mirror was removed.
To shrink the cluster, either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: set_fact container_exec_cmd for mon0
+ - name: Set_fact container_exec_cmd for mon0
when: containerized_deployment | bool
- set_fact:
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- - name: set_fact rbdmirror_to_kill_hostname
- set_fact:
+ - name: Set_fact rbdmirror_to_kill_hostname
+ ansible.builtin.set_fact:
rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
- - name: set_fact rbdmirror_gids
- set_fact:
- rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [ item ] }}"
+ - name: Set_fact rbdmirror_gids
+ ansible.builtin.set_fact:
+ rbdmirror_gids: "{{ rbdmirror_gids | default([]) + [item] }}"
with_items: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list }}"
when: item != 'summary'
- - name: set_fact rbdmirror_to_kill_gid
- set_fact:
+ - name: Set_fact rbdmirror_to_kill_gid
+ ansible.builtin.set_fact:
rbdmirror_to_kill_gid: "{{ (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['gid'] }}"
with_items: "{{ rbdmirror_gids }}"
when: (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'][item]['metadata']['id'] == rbdmirror_to_kill_hostname
tasks:
- - name: stop rbdmirror service
- service:
+ - name: Stop rbdmirror service
+ ansible.builtin.service:
name: ceph-rbd-mirror@rbd-mirror.{{ rbdmirror_to_kill_hostname }}
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ rbdmirror_to_kill }}"
failed_when: false
- - name: purge related directories
- file:
+ - name: Purge related directories
+ ansible.builtin.file:
path: /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}-{{ rbdmirror_to_kill_hostname }}
state: absent
delegate_to: "{{ rbdmirror_to_kill }}"
post_tasks:
- - name: get servicemap details
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
+ - name: Get servicemap details
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
register: ceph_health
failed_when:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
until:
- "'rbd-mirror' in (ceph_health.stdout | from_json)['services'].keys() | list"
- rbdmirror_to_kill_gid not in (ceph_health.stdout | from_json)['services']['rbd-mirror']['daemons'].keys() | list
+ changed_when: false
when: rbdmirror_to_kill_gid is defined
retries: 12
delay: 10
- - name: show ceph health
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
# automation scripts to avoid interactive prompt.
-- name: confirm whether user really meant to remove rgw from the ceph cluster
+- name: Confirm whether user really meant to remove rgw from the ceph cluster
hosts: localhost
become: false
gather_facts: false
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to shrink the cluster?
default: 'no'
- private: no
+ private: false
tasks:
- - name: exit playbook, if no rgw was given
+ - name: Exit playbook, if no rgw was given
when: rgw_to_kill is not defined or rgw_to_kill | length == 0
- fail:
+ ansible.builtin.fail:
msg: >
rgw_to_kill must be declared.
Exiting shrink-cluster playbook, no RGW was removed. On the command
"-e rgw_to_kill=ceph.rgw0 argument". You can only remove a single
RGW each time the playbook runs.
- - name: exit playbook, if user did not mean to shrink cluster
+ - name: Exit playbook, if user did not mean to shrink cluster
when: ireallymeanit != 'yes'
- fail:
+ ansible.builtin.fail:
msg: >
Exiting shrink-mon playbook, no monitor was removed. To shrink the
cluster, either say 'yes' on the prompt or use
'-e ireallymeanit=yes' on the command line when invoking the playbook
-- name: gather facts and mons and rgws
+- name: Gather facts and mons and rgws
hosts:
- "{{ mon_group_name | default('mons') }}[0]"
- "{{ rgw_group_name | default('rgws') }}"
become: true
gather_facts: false
tasks:
- - name: gather facts
- setup:
+ - name: Gather facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
-- hosts: mons[0]
+- name: Shrink rgw service
+ hosts: mons[0]
become: true
gather_facts: false
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary
- - name: set_fact container_exec_cmd for mon0
- set_fact:
+ - name: Set_fact container_exec_cmd for mon0
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
- - name: get rgw instances
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
+ - name: Get rgw instances
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: rgw_instances
changed_when: false
- - name: exit playbook, if the rgw_to_kill doesn't exist
+ - name: Exit playbook, if the rgw_to_kill doesn't exist
when: rgw_to_kill not in (rgw_instances.stdout | from_json).services.rgw.daemons.keys() | list
- fail:
+ ansible.builtin.fail:
msg: >
It seems that the rgw instance given is not part of the ceph cluster. Please
make sure it is.
The rgw instance format is $(hostname}.rgw$(instance number).
tasks:
- - name: get rgw host running the rgw instance to kill
- set_fact:
+ - name: Get rgw host running the rgw instance to kill
+ ansible.builtin.set_fact:
rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}'
when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
- - name: stop rgw service
- service:
+ - name: Stop rgw service
+ ansible.builtin.service:
name: ceph-radosgw@rgw.{{ rgw_to_kill }}
state: stopped
- enabled: no
+ enabled: false
delegate_to: "{{ rgw_host }}"
failed_when: false
- - name: ensure that the rgw is stopped
- command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa 303
+ - name: Ensure that the rgw is stopped
+ ansible.builtin.command: "systemctl is-active ceph-radosgw@rgw.{{ rgw_to_kill }}" # noqa command-instead-of-module
register: rgw_to_kill_status
failed_when: rgw_to_kill_status.rc == 0
changed_when: false
retries: 5
delay: 2
- - name: exit if rgw_to_kill is reported in ceph status
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
+ - name: Exit if rgw_to_kill is reported in ceph status
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} service dump -f json"
register: ceph_status
changed_when: false
failed_when:
retries: 3
delay: 3
- - name: purge directories related to rgw
- file:
+ - name: Purge directories related to rgw
+ ansible.builtin.file:
path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ rgw_to_kill }}
state: absent
delegate_to: "{{ rgw_host }}"
post_tasks:
- - name: show ceph health
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
# Usage:
# ansible-playbook storage-inventory.yml
-- name: gather facts and check the init system
-
- hosts: "{{ osd_group_name|default('osds') }}"
-
+- name: Gather facts and check the init system
+ hosts: osds
become: true
-
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
-
-- name: query each host for storage device inventory
-
- hosts: "{{ osd_group_name|default('osds') }}"
+ - name: Gather facts on all Ceph hosts
+ ansible.builtin.debug:
+ msg: "gather facts on all Ceph hosts for following reference"
+- name: Query each host for storage device inventory
+ hosts: osds
become: true
-
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: list storage inventory
+ - name: List storage inventory
ceph_volume:
action: "inventory"
environment:
---
# This playbook switches from non-containerized to containerized Ceph daemons
-- name: confirm whether user really meant to switch from non-containerized to containerized ceph daemons
+- name: Confirm whether user really meant to switch from non-containerized to containerized ceph daemons
hosts: localhost
gather_facts: false
any_errors_fatal: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to switch from non-containerized to containerized ceph daemons?
default: 'no'
- private: no
+ private: false
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: fail when less than three monitors
- fail:
+ - name: Fail when less than three monitors
+ ansible.builtin.fail:
msg: "This playbook requires at least three monitors."
when: groups[mon_group_name] | length | int < 3
- - name: exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
- fail:
+ - name: Exit playbook, if user did not mean to switch from non-containerized to containerized daemons?
+ ansible.builtin.fail:
msg: >
"Exiting switch-from-non-containerized-to-containerized-ceph-daemons.yml playbook,
cluster did not switch from non-containerized to containerized ceph daemons.
when: ireallymeanit != 'yes'
-- name: gather facts
+- name: Gather facts
hosts:
- "{{ mon_group_name|default('mons') }}"
become: true
vars:
- delegate_facts_host: True
+ delegate_facts_host: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: gather and delegate facts
- setup:
+ - name: Gather and delegate facts
+ ansible.builtin.setup:
gather_subset:
- 'all'
- '!facter'
- '!ohai'
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
with_items: "{{ groups['all'] | difference(groups.get(client_group_name, [])) }}"
run_once: true
when: delegate_facts_host | bool
tags: always
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+
+ - name: Import ceph-validate role
+ ansible.builtin.import_role:
name: ceph-validate
-- name: switching from non-containerized to containerized ceph mon
+- name: Switching from non-containerized to containerized ceph mon
vars:
containerized_deployment: true
- switch_to_containers: True
- mon_group_name: mons
+ switch_to_containers: true
+ mon_group_name: mons
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: true
pre_tasks:
- - name: select a running monitor
- set_fact: mon_host={{ item }}
+ - name: Select a running monitor
+ ansible.builtin.set_fact:
+ mon_host: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when: item != inventory_hostname
- - name: stop non-containerized ceph mon
- service:
+ - name: Stop non-containerized ceph mon
+ ansible.builtin.service:
name: "ceph-mon@{{ ansible_facts['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-mon@.service
- /lib/systemd/system/ceph-mon.target
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph/mon /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- - name: check for existing old leveldb file extension (ldb)
- shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
+ - name: Check for existing old leveldb file extension (ldb)
+ ansible.builtin.shell: stat /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false
failed_when: false
register: ldb_files
- - name: rename leveldb extension from ldb to sst
- shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
+ - name: Rename leveldb extension from ldb to sst
+ ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/mon/*/store.db/*.ldb
changed_when: false
failed_when: false
when: ldb_files.rc == 0
- - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
- command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
+ - name: Copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
+ ansible.builtin.command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
args:
creates: /etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
failed_when: false
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-mon role
+ ansible.builtin.import_role:
name: ceph-mon
post_tasks:
- - name: waiting for the monitor to join the quorum...
- command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
+ - name: Waiting for the monitor to join the quorum...
+ ansible.builtin.command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
register: ceph_health_raw
until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
changed_when: false
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
-- name: switching from non-containerized to containerized ceph mgr
+- name: Switching from non-containerized to containerized ceph mgr
hosts: "{{ mgr_group_name|default('mgrs') }}"
# failed_when: false is here because if we're
# working with a jewel cluster then ceph mgr
# will not exist
- - name: stop non-containerized ceph mgr(s)
- service:
+ - name: Stop non-containerized ceph mgr(s)
+ ansible.builtin.service:
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-mgr@.service
- /lib/systemd/system/ceph-mgr.target
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph/mgr /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-mgr role
+ ansible.builtin.import_role:
name: ceph-mgr
-- name: set osd flags
+- name: Set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
- become: True
+ become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: get pool list
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ - name: Get pool list
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
changed_when: false
check_mode: false
- - name: get balancer module status
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ - name: Get balancer module status
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status_switch
changed_when: false
check_mode: false
- - name: set_fact pools_pgautoscaler_mode
- set_fact:
+ - name: Set_fact pools_pgautoscaler_mode
+ ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- - name: disable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ - name: Disable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool
- - name: disable pg autoscale on pools
+ - name: Disable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: set osd flags
+ - name: Set osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
- nodeep-scrub
-- name: switching from non-containerized to containerized ceph osd
+- name: Switching from non-containerized to containerized ceph osd
vars:
containerized_deployment: true
osd_group_name: osds
- switch_to_containers: True
+ switch_to_containers: true
hosts: "{{ osd_group_name|default('osds') }}"
become: true
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: collect running osds
- shell: |
+ - name: Collect running osds
+ ansible.builtin.shell: |
set -o pipefail;
systemctl list-units | grep -E "loaded * active" | grep -Eo 'ceph-osd@[0-9]+.service|ceph-volume'
register: running_osds
failed_when: false
# systemd module does not support --runtime option
- - name: disable ceph-osd@.service runtime-enabled
- command: "systemctl disable --runtime {{ item }}" # noqa 303
+ - name: Disable ceph-osd@.service runtime-enabled
+ ansible.builtin.command: "systemctl disable --runtime {{ item }}" # noqa command-instead-of-module
changed_when: false
failed_when: false
with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: item.startswith('ceph-osd@')
- - name: stop/disable/mask non-containerized ceph osd(s) (if any)
- systemd:
+ - name: Stop/disable/mask non-containerized ceph osd(s) (if any)
+ ansible.builtin.systemd:
name: "{{ item }}"
state: stopped
- enabled: no
+ enabled: false
with_items: "{{ running_osds.stdout_lines | default([]) }}"
when: running_osds != []
- - name: disable ceph.target
- systemd:
+ - name: Disable ceph.target
+ ansible.builtin.systemd:
name: ceph.target
- enabled: no
+ enabled: false
- - name: remove old ceph-osd systemd units
- file:
+ - name: Remove old ceph-osd systemd units
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-osd@.service
- /lib/systemd/system/ceph-volume@.service
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph/osd /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- - name: check for existing old leveldb file extension (ldb)
- shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
+ - name: Check for existing old leveldb file extension (ldb)
+ ansible.builtin.shell: stat /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
register: ldb_files
- - name: rename leveldb extension from ldb to sst
- shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
+ - name: Rename leveldb extension from ldb to sst
+ ansible.builtin.shell: rename -v .ldb .sst /var/lib/ceph/osd/*/current/omap/*.ldb
changed_when: false
failed_when: false
when: ldb_files.rc == 0
- - name: check if containerized osds are already running
- command: >
+ - name: Check if containerized osds are already running
+ ansible.builtin.command: >
{{ container_binary }} ps -q --filter='name=ceph-osd'
changed_when: false
failed_when: false
register: osd_running
- - name: get osd directories
- command: >
+ - name: Get osd directories
+ ansible.builtin.command: >
find /var/lib/ceph/osd {% if dmcrypt | bool %}/var/lib/ceph/osd-lockbox{% endif %} -maxdepth 1 -mindepth 1 -type d
register: osd_dirs
changed_when: false
failed_when: false
- - name: unmount all the osd directories
- command: >
+ - name: Unmount all the osd directories
+ ansible.builtin.command: >
umount {{ item }}
changed_when: false
failed_when: false
when: osd_running.rc != 0 or osd_running.stdout_lines | length == 0
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-osd role
+ ansible.builtin.import_role:
name: ceph-osd
post_tasks:
- - name: container - waiting for clean pgs...
- command: >
+ - name: Container - waiting for clean pgs...
+ ansible.builtin.command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
register: ceph_health_post
until: >
changed_when: false
-- name: unset osd flags
+- name: Unset osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
- become: True
+ become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - name: re-enable pg autoscale on pools
+ - name: Re-enable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: unset osd flags
+ - name: Unset osd flags
ceph_osd_flag:
name: "{{ item }}"
cluster: "{{ cluster }}"
- noout
- nodeep-scrub
- - name: re-enable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ - name: Re-enable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
changed_when: false
when: (balancer_status_switch.stdout | from_json)['active'] | bool
-- name: switching from non-containerized to containerized ceph mds
+- name: Switching from non-containerized to containerized ceph mds
hosts: "{{ mds_group_name|default('mdss') }}"
become: true
pre_tasks:
- - name: stop non-containerized ceph mds(s)
- service:
+ - name: Stop non-containerized ceph mds(s)
+ ansible.builtin.service:
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
- enabled: no
+ enabled: false
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-mds@.service
- /lib/systemd/system/ceph-mds.target
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph/mds /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
- name: ceph-mds
+ - name: Import ceph-mds role
+ ansible.builtin.import_role:
+ name: ceph-mds
-- name: switching from non-containerized to containerized ceph rgw
+- name: Switching from non-containerized to containerized ceph rgw
hosts: "{{ rgw_group_name|default('rgws') }}"
serial: 1
become: true
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+ - name: Import ceph-config role
+ ansible.builtin.import_role:
name: ceph-config
tasks_from: rgw_systemd_environment_file.yml
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph/radosgw /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- - name: stop non-containerized ceph rgw(s)
- service:
+ - name: Stop non-containerized ceph rgw(s)
+ ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
- enabled: no
+ enabled: false
with_items: "{{ rgw_instances }}"
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-radosgw@.service
- /lib/systemd/system/ceph-radosgw.target
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-rgw role
+ ansible.builtin.import_role:
name: ceph-rgw
-- name: switching from non-containerized to containerized ceph rbd-mirror
+- name: Switching from non-containerized to containerized ceph rbd-mirror
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
serial: 1
become: true
pre_tasks:
- - name: check for ceph rbd mirror services
- command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa 303
+ - name: Check for ceph rbd mirror services
+ ansible.builtin.command: systemctl show --no-pager --property=Id ceph-rbd-mirror@* # noqa: command-instead-of-module
changed_when: false
register: rbdmirror_services
- - name: stop non-containerized ceph rbd mirror(s)
- service:
+ - name: Stop non-containerized ceph rbd mirror(s) # noqa: ignore-errors
+ ansible.builtin.service:
name: "{{ item.split('=')[1] }}"
state: stopped
- enabled: no
+ enabled: false
ignore_errors: true
loop: "{{ rbdmirror_services.stdout_lines }}"
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /lib/systemd/system/ceph-rbd-mirror@.service
- /lib/systemd/system/ceph-rbd-mirror.target
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-rbd-mirror role
+ ansible.builtin.import_role:
name: ceph-rbd-mirror
-- name: switching from non-containerized to containerized ceph nfs
+- name: Switching from non-containerized to containerized ceph nfs
hosts: "{{ nfs_group_name|default('nfss') }}"
# failed_when: false is here because if we're
# working with a jewel cluster then ceph nfs
# will not exist
- - name: stop non-containerized ceph nfs(s)
- service:
+ - name: Stop non-containerized ceph nfs(s)
+ ansible.builtin.service:
name: nfs-ganesha
state: stopped
- enabled: no
+ enabled: false
failed_when: false
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
tasks:
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-nfs role
+ ansible.builtin.import_role:
name: ceph-nfs
-- name: switching from non-containerized to containerized iscsigws
+- name: Switching from non-containerized to containerized iscsigws
hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
vars:
containerized_deployment: true
become: true
serial: 1
pre_tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - name: stop iscsigw services
- service:
+ - name: Stop iscsigw services
+ ansible.builtin.service:
name: "{{ item }}"
state: stopped
- enabled: no
+ enabled: false
with_items:
- tcmu-runner
- rbd-target-gw
- rbd-target-api
- - name: remove old systemd unit files
- file:
+ - name: Remove old systemd unit files
+ ansible.builtin.file:
path: "/usr/lib/systemd/system/{{ item }}.service"
state: absent
with_items:
- rbd-target-gw
- rbd-target-api
tasks:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown -h {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
- - import_role:
+ - name: Import ceph-container-engine role
+ ansible.builtin.import_role:
name: ceph-container-engine
- - import_role:
+ - name: Import ceph-container-common role
+ ansible.builtin.import_role:
name: ceph-container-common
- - import_role:
+ - name: Import ceph-iscsi-gw role
+ ansible.builtin.import_role:
name: ceph-iscsi-gw
-- name: switching from non-containerized to containerized ceph-crash
+- name: Switching from non-containerized to containerized ceph-crash
hosts:
- "{{ mon_group_name | default('mons') }}"
containerized_deployment: true
become: true
tasks:
- - name: stop non-containerized ceph-crash
- service:
+ - name: Stop non-containerized ceph-crash
+ ansible.builtin.service:
name: ceph-crash
state: stopped
- enabled: no
+ enabled: false
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
- - import_role:
+ - name: Import ceph-handler role
+ ansible.builtin.import_role:
name: ceph-handler
- - import_role:
+ - name: Import ceph-crash role
+ ansible.builtin.import_role:
name: ceph-crash
-- name: final task
+- name: Final task
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ mgr_group_name|default('mgrs') }}"
containerized_deployment: true
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
# NOTE: changed from file module to raw find command for performance reasons
# The file module has to run checks on current ownership of all directories and files. This is unnecessary
# as in this case we know we want all owned by ceph user
- - name: set proper ownership on ceph directories
- command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
+ - name: Set proper ownership on ceph directories
+ ansible.builtin.command: "find /var/lib/ceph /etc/ceph -not -( -user {{ ceph_uid }} -or -group {{ ceph_uid }} -) -execdir chown {{ ceph_uid }}:{{ ceph_uid }} {} +"
changed_when: false
# 4. Run the playbook called: `take-over-existing-cluster.yml` like this `ansible-playbook take-over-existing-cluster.yml`.
# 5. Eventually run Ceph Ansible to validate everything by doing: `ansible-playbook site.yml`.
-- hosts: mons
- become: True
+- name: Fetch keys
+ hosts: mons
+ become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
- - import_role:
+
+ - name: Import ceph-fetch-keys role
+ ansible.builtin.import_role:
name: ceph-fetch-keys
-- hosts:
- - mons
- - osds
- - mdss
- - rgws
- - nfss
- - rbdmirrors
- - clients
- - mgrs
- - iscsi-gw
+- name: Take over existing cluster
+ hosts:
+ - mons
+ - osds
+ - mdss
+ - rgws
+ - nfss
+ - rbdmirrors
+ - clients
+ - mgrs
+ - iscsi-gw
become: true
tasks:
- - import_role:
+ - name: Import ceph-defaults role
+ ansible.builtin.import_role:
name: ceph-defaults
post_tasks:
- - name: get the name of the existing ceph cluster
- shell: |
+ - name: Get the name of the existing ceph cluster
+ ansible.builtin.shell: |
set -o pipefail;
basename $(grep --exclude '*.bak' -R fsid /etc/ceph/ | egrep -o '^[^.]*' | head -n 1)
changed_when: false
register: cluster_name
- - name: "stat {{ cluster_name.stdout }}.conf"
- stat:
+ - name: Run stat module on Ceph configuration file
+ ansible.builtin.stat:
path: "/etc/ceph/{{ cluster_name.stdout }}.conf"
register: ceph_conf_stat
# Creates a backup of original ceph conf file in 'cluster_name-YYYYMMDDTHHMMSS.conf.bak' format
- - name: "make a backup of original {{ cluster_name.stdout }}.conf"
- copy:
+ - name: Make a backup of original Ceph configuration file
+ ansible.builtin.copy:
src: "/etc/ceph/{{ cluster_name.stdout }}.conf"
dest: "/etc/ceph/{{ cluster_name.stdout }}-{{ ansible_date_time.iso8601_basic_short }}.conf.bak"
remote_src: true
group: "{{ ceph_conf_stat.stat.gr_name }}"
mode: "{{ ceph_conf_stat.stat.mode }}"
- - name: generate ceph configuration file
+ - name: Generate ceph configuration file
openstack.config_template.config_template:
src: "roles/ceph-config/templates/ceph.conf.j2"
dest: "/etc/ceph/{{ cluster_name.stdout }}.conf"
# the operation won't last for too long.
- hosts: <your_host>
- gather_facts: False
+ gather_facts: false
tasks:
- name: Set the noout flag
- command: ceph osd set noout
+ ansible.builtin.command: ceph osd set noout
delegate_to: <your_monitor>
- name: Turn off the server
- command: poweroff
+ ansible.builtin.command: poweroff
- name: Wait for the server to go down
local_action:
timeout: 3600
- name: Unset the noout flag
- command: ceph osd unset noout
+ ansible.builtin.command: ceph osd unset noout
delegate_to: <your_monitor>
- hosts: mons
serial: 1
- sudo: True
+ sudo: true
vars:
backup_dir: /tmp/
tasks:
- name: Check if the node has be migrated already
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: mon_archive_leftover
when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
- name: Compress the store as much as possible
- command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
+ ansible.builtin.command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
when: migration_completed.stat.exists == False
- name: Check if sysvinit
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
- changed_when: False
+ changed_when: false
- name: Check if upstart
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
- changed_when: False
+ changed_when: false
- name: Check if init does what it is supposed to do (Sysvinit)
- shell: >
+ ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && service ceph status mon >> /dev/null
register: ceph_status_sysvinit
- changed_when: False
+ changed_when: false
# can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart)
- shell: >
+ ansible.builtin.shell: >
ps faux|grep -sq [c]eph-mon && status ceph-mon-all >> /dev/null
register: ceph_status_upstart
- changed_when: False
+ changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the service is started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
# NOTE (leseb): should we convert upstart to sysvinit here already?
- name: Archive monitor stores
- shell: >
+ ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Reboot the server
- command: reboot
+ ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
when: migration_completed.stat.exists == False
- name: Check if sysvinit
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
- changed_when: False
+ changed_when: false
- name: Check if upstart
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
- changed_when: False
+ changed_when: false
- name: Make sure the monitor is stopped (Upstart)
service: >
when: migration_completed.stat.exists == False
- name: Copy keys and configs
- shell: >
+ ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False
- name: Configure RHEL7 for sysvinit
- shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
+ ansible.builtin.shell: find -L /var/lib/ceph/mon/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False
# NOTE (leseb): at this point the upstart and sysvinit checks are not necessary
when: migration_completed.stat.exists == False
- name: Waiting for the monitor to join the quorum...
- shell: >
+ ansible.builtin.shell: >
ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
register: result
until: result.rc == 0
- hosts: osds
serial: 1
- sudo: True
+ sudo: true
vars:
backup_dir: /tmp/
tasks:
- name: Check if the node has be migrated already
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: osd_archive_leftover
when: migration_completed.stat.exists == False and osd_archive_leftover.stat.exists == True
- name: Check if init does what it is supposed to do (Sysvinit)
- shell: >
+ ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && service ceph status osd >> /dev/null
register: ceph_status_sysvinit
- changed_when: False
+ changed_when: false
# can't complete the condition since the previous taks never ran...
- fail: msg="Something is terribly wrong here, sysvinit is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_sysvinit.rc != 0 and migration_completed.stat.exists == False and monsysvinit.stat.exists == True
- name: Check if init does what it is supposed to do (upstart)
- shell: >
+ ansible.builtin.shell: >
ps faux|grep -sq [c]eph-osd && initctl list|egrep -sq "ceph-osd \(ceph/.\) start/running, process [0-9][0-9][0-9][0-9]"
register: ceph_status_upstart
- changed_when: False
+ changed_when: false
- fail: msg="Something is terribly wrong here, upstart is configured, the services are started BUT the init script does not return 0, GO FIX YOUR SETUP!"
when: ceph_status_upstart.rc != 0 and migration_completed.stat.exists == False and monupstart.stat.exists == True
- name: Set the noout flag
- command: ceph osd set noout
+ ansible.builtin.command: ceph osd set noout
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False
- name: Check if sysvinit
- shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
+ ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/sysvinit
register: osdsysvinit
failed_when: false
- changed_when: False
+ changed_when: false
- name: Check if upstart
- shell: stat /var/lib/ceph/osd/ceph-*/upstart
+ ansible.builtin.shell: stat /var/lib/ceph/osd/ceph-*/upstart
register: osdupstart
failed_when: false
- changed_when: False
+ changed_when: false
- name: Archive ceph configs
- shell: >
+ ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Collect OSD ports
- shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
+ ansible.builtin.shell: netstat -tlpn | awk -F ":" '/ceph-osd/ { sub (" .*", "", $2); print $2 }' | uniq
register: osd_ports
when: migration_completed.stat.exists == False
when: migration_completed.stat.exists == False
- name: Configure RHEL with sysvinit
- shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
+ ansible.builtin.shell: find -L /var/lib/ceph/osd/ -mindepth 1 -maxdepth 1 -regextype posix-egrep -regex '.*/[A-Za-z0-9]+-[A-Za-z0-9._-]+' -exec touch {}/sysvinit \; -exec rm {}/upstart \;
when: migration_completed.stat.exists == False
- name: Reboot the server
- command: reboot
+ ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
when: migration_completed.stat.exists == False
- name: Copy keys and configs
- shell: >
+ ansible.builtin.shell: >
cp etc/ceph/* /etc/ceph/
chdir=/var/lib/ceph/
when: migration_completed.stat.exists == False
# - "{{ osd_ports.stdout_lines }}"
- name: Waiting for clean PGs...
- shell: >
+ ansible.builtin.shell: >
test "[""$(ceph -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph -s -f json | python -c 'import sys, json; print([ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"])')"
register: result
until: result.rc == 0
when: migration_completed.stat.exists == False
- name: Unset the noout flag
- command: ceph osd unset noout
+ ansible.builtin.command: ceph osd unset noout
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name][0] }}"
when: migration_completed.stat.exists == False
- hosts: rgws
serial: 1
- sudo: True
+ sudo: true
vars:
backup_dir: /tmp/
tasks:
- name: Check if the node has be migrated already
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/radosgw/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
- stat: >
+ ansible.builtin.stat: >
path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: rgw_archive_leftover
when: migration_completed.stat.exists == False and rgw_archive_leftover.stat.exists == True
- name: Archive rados gateway configs
- shell: >
+ ansible.builtin.shell: >
tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Reboot the server
- command: reboot
+ ansible.builtin.command: reboot
when: migration_completed.stat.exists == False
- name: Wait for the server to come up
when: migration_completed.stat.exists == False
- name: Copy keys and configs
- shell: >
+ ansible.builtin.shell: >
{{ item }}
chdir=/var/lib/ceph/
with_items: cp etc/ceph/* /etc/ceph/
tasks:
- - name: load a variable file for devices partition
+ - name: Load a variable file for devices partition
include_vars: "{{ item }}"
with_first_found:
- files:
- "host_vars/default.yml"
skip: true
- - name: exit playbook, if devices not defined
- fail:
+ - name: Exit playbook, if devices not defined
+ ansible.builtin.fail:
msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
when: devices is not defined
- - name: install sgdisk(gdisk)
- package:
+ - name: Install sgdisk(gdisk)
+ ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
- - name: erase all previous partitions(dangerous!!!)
- shell: sgdisk --zap-all -- /dev/{{item.device_name}}
+ - name: Erase all previous partitions(dangerous!!!)
+ ansible.builtin.shell: sgdisk --zap-all -- /dev/{{item.device_name}}
with_items: "{{ devices }}"
- - name: make osd partitions
- shell: >
+ - name: Make osd partitions
+ ansible.builtin.shell: >
sgdisk --new={{item.1.index}}:0:+{{item.1.size}} "--change-name={{item.1.index}}:ceph {{item.1.type}}"
"--typecode={{item.1.index}}:{% if item.1.type=='data' %}{{data_typecode}}{% else %}{{journal_typecode}}{% endif %}"
--mbrtogpt -- /dev/{{item.0.device_name}}
group: 64045
when: ansible_facts['os_family'] == "Debian"
- - name: change partitions ownership
- file:
+ - name: Change partitions ownership
+ ansible.builtin.file:
path: "/dev/{{item.0.device_name}}{{item.1.index}}"
owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}"
when:
item.0.device_name | match('/dev/([hsv]d[a-z]{1,2}){1,2}$')
- - name: change partitions ownership
- file:
+ - name: Change partitions ownership
+ ansible.builtin.file:
path: "/dev/{{item.0.device_name}}p{{item.1.index}}"
owner: "{{ owner | default('root')}}"
group: "{{ group | default('disk')}}"
serial: 1
tasks:
- - name: get osd(s) if directory stat
- stat:
+ - name: Get osd(s) if directory stat
+ ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat
with_subelements:
- "{{ osds_journal_devices }}"
- partitions
- - name: exit playbook osd(s) is not on this host
- fail:
+ - name: Exit playbook osd(s) is not on this host
+ ansible.builtin.fail:
msg: exit playbook osd(s) is not on this host
with_items:
osds_dir_stat.results
when: osds_dir_stat is defined and item.stat.exists == false
- - name: install sgdisk(gdisk)
- package:
+ - name: Install sgdisk(gdisk)
+ ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
when: osds_journal_devices is defined
- - name: generate uuid for osds journal
- command: uuidgen
+ - name: Generate uuid for osds journal
+ ansible.builtin.command: uuidgen
register: osds
with_subelements:
- "{{ osds_journal_devices }}"
- partitions
- - name: make osd partitions on ssd
- shell: >
+ - name: Make osd partitions on ssd
+ ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds.results }}"
- - name: stop osd(s) service
- service:
+ - name: Stop osd(s) service
+ ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items: "{{ osds.results }}"
- - name: flush osd(s) journal
- command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
+ - name: Flush osd(s) journal
+ ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --flush-journal --cluster {{ cluster }}
with_items: "{{ osds.results }}"
when: osds_journal_devices is defined
- - name: update osd(s) journal soft link
- command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
+ - name: Update osd(s) journal soft link
+ ansible.builtin.command: ln -sf /dev/disk/by-partuuid/{{ item.stdout }} /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal
with_items: "{{ osds.results }}"
- - name: update osd(s) journal uuid
- command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
+ - name: Update osd(s) journal uuid
+ ansible.builtin.command: echo {{ item.stdout }} > /var/lib/ceph/osd/{{ cluster }}-{{ item.item[1].osd_id }}/journal_uuid
with_items: "{{ osds.results }}"
- - name: initialize osd(s) new journal
- command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
+ - name: Initialize osd(s) new journal
+ ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds.results }}"
- - name: start osd(s) service
- service:
+ - name: Start osd(s) service
+ ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items: "{{ osds.results }}"
---
# Nukes a multisite config
- hosts: rgws
- become: True
+ become: true
tasks:
- include_tasks: roles/ceph-rgw/tasks/multisite/destroy.yml
handlers:
# Ansible 2.1.0 bug will ignore included handlers without this
- - name: import_tasks roles/ceph-rgw/handlers/main.yml
+ - name: Import_tasks roles/ceph-rgw/handlers/main.yml
import_tasks: roles/ceph-rgw/handlers/main.yml
# @param osd_id: Which osds's journal this partition for.
#
# ansible-playbook recover-osds-after-ssd-journal-failure.yml
-# Prompts for select which host to recover, defaults to null,
+# Prompts for select which host to recover, defaults to null,
# doesn't select host the recover ssd. Input the hostname
# which to recover osds after ssd journal failure
#
# automation scripts to avoid interactive prompt.
- hosts: localhost
- gather_facts: no
+ gather_facts: false
vars_prompt:
- - name: target_host
+ - name: target_host # noqa: name[casing]
prompt: please enter the target hostname which to recover osds after ssd journal failure
- private: no
+ private: false
tasks:
- add_host:
name: "{{ target_host }}"
- fail: msg="please define dev_ssds variable"
when: dev_ssds|length <= 0
- - name: get osd(s) if directory stat
- stat:
+ - name: Get osd(s) if directory stat
+ ansible.builtin.stat:
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_dir_stat
with_subelements:
- "{{ dev_ssds }}"
- partitions
- - name: exit playbook osd(s) is not on this host
- fail:
+ - name: Exit playbook osd(s) is not on this host
+ ansible.builtin.fail:
msg: exit playbook osds is not no this host
with_items:
osds_dir_stat.results
- osds_dir_stat is defined | bool
- item.stat.exists == false
- - name: install sgdisk(gdisk)
- package:
+ - name: Install sgdisk(gdisk)
+ ansible.builtin.package:
name: gdisk
state: present
register: result
until: result is succeeded
- - name: get osd(s) journal uuid
- command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
+ - name: Get osd(s) journal uuid
+ ansible.builtin.command: cat "/var/lib/ceph/osd/{{ cluster }}-{{ item.1.osd_id }}/journal_uuid"
register: osds_uuid
with_subelements:
- "{{ dev_ssds }}"
- partitions
- - name: make partitions on new ssd
- shell: >
+ - name: Make partitions on new ssd
+ ansible.builtin.shell: >
sgdisk --new={{item.item[1].index}}:0:+{{item.item[1].size}} "--change-name={{ item.item[1].index }}:ceph journal"
--typecode={{ item.item[1].index }}:{{ journal_typecode }}
--partition-guid={{ item.item[1].index }}:{{ item.stdout }}
--mbrtogpt -- {{ item.item[0].device_name }}
with_items: "{{ osds_uuid.results }}"
- - name: stop osd(s) service
- service:
+ - name: Stop osd(s) service
+ ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: stopped
with_items: "{{ osds_uuid.results }}"
- - name: reinitialize osd(s) journal in new ssd
- command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
+ - name: Reinitialize osd(s) journal in new ssd
+ ansible.builtin.command: ceph-osd -i {{ item.item[1].osd_id }} --mkjournal --cluster {{ cluster }}
with_items: "{{ osds_uuid.results }}"
- - name: start osd(s) service
- service:
+ - name: Start osd(s) service
+ ansible.builtin.service:
name: "ceph-osd@{{ item.item[1].osd_id }}"
state: started
with_items: "{{ osds_uuid.results }}"
# Overrides the prompt using -e option. Can be used in
# automation scripts to avoid interactive prompt.
-- name: gather facts and check the init system
+- name: Gather facts and check the init system
hosts:
- "{{ mon_group_name|default('mons') }}"
- "{{ osd_group_name|default('osds') }}"
- become: True
+ become: true
tasks:
- - debug: msg="gather facts on all Ceph hosts for following reference"
+ - ansible.builtin.debug: msg="gather facts on all Ceph hosts for following reference"
-- name: confirm whether user really meant to replace osd(s)
+- name: Confirm whether user really meant to replace osd(s)
hosts: localhost
become: true
vars_prompt:
- - name: ireallymeanit
+ - name: ireallymeanit # noqa: name[casing]
prompt: Are you sure you want to replace the osd(s)?
default: 'no'
- private: no
+ private: false
vars:
mon_group_name: mons
osd_group_name: osds
pre_tasks:
- - name: exit playbook, if user did not mean to replace the osd(s)
- fail:
+ - name: Exit playbook, if user did not mean to replace the osd(s)
+ ansible.builtin.fail:
msg: "Exiting replace-osd playbook, no osd(s) was/were replaced..
To replace the osd(s), either say 'yes' on the prompt or
or use `-e ireallymeanit=yes` on the command line when
invoking the playbook"
when: ireallymeanit != 'yes'
- - name: exit playbook, if no osd(s) was/were given
- fail:
+ - name: Exit playbook, if no osd(s) was/were given
+ ansible.builtin.fail:
msg: "osd_to_replace must be declared
Exiting replace-osd playbook, no OSD(s) was/were replaced.
On the command line when invoking the playbook, you can use
when: osd_to_replace is not defined
tasks:
- - import_role:
+ - ansible.builtin.import_role:
name: ceph-defaults
post_tasks:
- - name: set_fact container_exec_cmd build docker exec command (containerized)
- set_fact:
+ - name: Set_fact container_exec_cmd build docker exec command (containerized)
+ ansible.builtin.set_fact:
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- - name: exit playbook, if can not connect to the cluster
- command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
+ - name: Exit playbook, if can not connect to the cluster
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ groups[mon_group_name][0] }}"
retries: 5
delay: 2
- - name: find the host(s) where the osd(s) is/are running on
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
+ - name: Find the host(s) where the osd(s) is/are running on
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd find {{ item }}"
with_items: "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
register: find_osd_hosts
- - name: set_fact osd_hosts
- set_fact:
+ - name: Set_fact osd_hosts
+ ansible.builtin.set_fact:
osd_hosts: "{{ osd_hosts | default([]) + [ (item.stdout | from_json).crush_location.host ] }}"
with_items: "{{ find_osd_hosts.results }}"
- - name: check if ceph admin key exists on the osd nodes
- stat:
+ - name: Check if ceph admin key exists on the osd nodes
+ ansible.builtin.stat:
path: "/etc/ceph/{{ cluster }}.client.admin.keyring"
register: ceph_admin_key
with_items: "{{ osd_hosts }}"
failed_when: false
when: not containerized_deployment | bool
- - name: fail when admin key is not present
- fail:
+ - name: Fail when admin key is not present
+ ansible.builtin.fail:
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
with_items: "{{ ceph_admin_key.results }}"
when:
- item.stat.exists == false
# NOTE(leseb): using '>' is the only way I could have the command working
- - name: find osd device based on the id
- shell: >
+ - name: Find osd device based on the id
+ ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | awk -v pattern=osd.{{ item.1 }} '$0 ~ pattern {print $1}'
delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool
- - name: zapping osd(s) - container
- shell: >
+ - name: Zapping osd(s) - container
+ ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
zap {{ item.1 }}
delegate_to: "{{ item.0 }}"
when: containerized_deployment | bool
- - name: zapping osd(s) - non container
- command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
+ - name: Zapping osd(s) - non container
+ ansible.builtin.command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
run_once: true
with_together:
- "{{ osd_hosts }}"
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- - name: destroying osd(s)
- command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
+ - name: Destroying osd(s)
+ ansible.builtin.command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
run_once: true
with_together:
- "{{ osd_hosts }}"
delegate_to: "{{ item.0 }}"
when: not containerized_deployment | bool
- - name: replace osd(s) - prepare - non container
- command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
+ - name: Replace osd(s) - prepare - non container
+ ansible.builtin.command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
run_once: true
delegate_to: "{{ item.0 }}"
with_together:
- "{{ osd_to_replace_disks.results }}"
- "{{ osd_to_replace.split(',') }}"
- - name: replace osd(s) - prepare - container
- shell: >
+ - name: Replace osd(s) - prepare - container
+ ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
prepare {{ item.1 }}
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- - name: replace osd(s) - activate - non container
- command: ceph-disk activate {{ item.1 }}1
+ - name: Replace osd(s) - activate - non container
+ ansible.builtin.command: ceph-disk activate {{ item.1 }}1
run_once: true
delegate_to: "{{ item.0 }}"
with_together:
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- - name: replace osd(s) - activate - container
- shell: >
+ - name: Replace osd(s) - activate - container
+ ansible.builtin.shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
activate {{ item.1 }}1
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
- - name: show ceph health
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
+ - name: Show ceph health
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: show ceph osd tree
- command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
+ - name: Show ceph osd tree
+ ansible.builtin.command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd tree"
delegate_to: "{{ groups[mon_group_name][0] }}"
# - { name: client.test, key: "AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==" ...
keys:
- - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
- - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
+ - { name: client.test, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test" }, mode: "{{ ceph_keyring_permissions }}" }
+ - { name: client.test2, caps: { mon: "profile rbd", osd: "allow class-read object_prefix rbd_children, profile rbd pool=test2" }, mode: "{{ ceph_keyring_permissions }}" }
author: Sébastien Han
description: Installs A Ceph Client
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: set_fact delegated_node
- set_fact:
+- name: Set_fact delegated_node
+ ansible.builtin.set_fact:
delegated_node: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else inventory_hostname }}"
-- name: set_fact admin_key_presence
- set_fact:
+- name: Set_fact admin_key_presence
+ ansible.builtin.set_fact:
admin_key_presence: "{{ True if groups.get(mon_group_name, []) | length > 0 else copy_admin_key }}"
-- name: create cephx key(s)
+- name: Create cephx key(s)
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
- inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: slurp client cephx key(s)
- slurp:
+- name: Slurp client cephx key(s)
+ ansible.builtin.slurp:
src: "{{ ceph_conf_key_directory }}/{{ cluster }}.{{ item.name }}.keyring"
with_items: "{{ keys }}"
register: slurp_client_keys
- inventory_hostname == groups.get('_filtered_clients') | first
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: pool related tasks
+- name: Pool related tasks
when:
- admin_key_presence | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- - import_role:
+ - name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
- - name: create ceph pool(s)
+ - name: Create ceph pool(s)
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
changed_when: false
delegate_to: "{{ delegated_node }}"
-- name: get client cephx keys
- copy:
+- name: Get client cephx keys
+ ansible.builtin.copy:
dest: "{{ item.source }}"
content: "{{ item.content | b64decode }}"
mode: "{{ item.item.get('mode', '0600') }}"
with_items: "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] }}"
when: not item.get('skipped', False)
no_log: "{{ no_log_on_ceph_key_tasks }}"
-
---
-- name: include pre_requisite.yml
- include_tasks: pre_requisite.yml
+- name: Include pre_requisite.yml
+ ansible.builtin.include_tasks: pre_requisite.yml
when: groups.get(mon_group_name, []) | length > 0
-- name: include create_users_keys.yml
- include_tasks: create_users_keys.yml
+- name: Include create_users_keys.yml
+ ansible.builtin.include_tasks: create_users_keys.yml
when:
- user_config | bool
- not rolling_update | default(False) | bool
---
-- name: copy ceph admin keyring
+- name: Copy ceph admin keyring
+ when:
+ - cephx | bool
+ - copy_admin_key | bool
block:
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: client.admin
cluster: "{{ cluster }}"
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ _admin_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- when:
- - cephx | bool
- - copy_admin_key | bool
author: Sébastien Han
description: Installs Ceph
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: configure cluster name
- lineinfile:
+- name: Configure cluster name
+ ansible.builtin.lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
- create: yes
+ create: true
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
+ mode: "0644"
when: ansible_facts['os_family'] in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
# - Jewel from latest Canonical 16.04 distro
# - All previous versions from Canonical
# - Infernalis from ceph.com
-- name: debian based systems - configure cluster name
+- name: Debian based systems - configure cluster name
when: ansible_facts['os_family'] == "Debian"
block:
- - name: check /etc/default/ceph exist
- stat:
+ - name: Check /etc/default/ceph exist
+ ansible.builtin.stat:
path: /etc/default/ceph
register: etc_default_ceph
- check_mode: no
+ check_mode: false
- - name: configure cluster name
+ - name: Configure cluster name
when: etc_default_ceph.stat.exists
block:
- - name: when /etc/default/ceph is not dir
- lineinfile:
+ - name: When /etc/default/ceph is not dir
+ ansible.builtin.lineinfile:
dest: /etc/default/ceph
insertafter: EOF
- create: yes
+ create: true
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
+ mode: "0644"
when: not etc_default_ceph.stat.isdir
- - name: when /etc/default/ceph is dir
- lineinfile:
+ - name: When /etc/default/ceph is dir
+ ansible.builtin.lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
- create: yes
+ create: true
regexp: "^CLUSTER="
line: "CLUSTER={{ cluster }}"
+ mode: "0644"
when: etc_default_ceph.stat.isdir
---
-- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
- lineinfile:
+- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for debian
+ ansible.builtin.lineinfile:
dest: "{{ etc_default_ceph.stat.isdir | ternary('/etc/default/ceph/ceph', '/etc/default/ceph') }}"
insertafter: EOF
- create: yes
+ create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
+ mode: "0644"
when:
- ansible_facts['os_family'] == 'Debian'
- etc_default_ceph.stat.exists
notify:
- - restart ceph mons
- - restart ceph mgrs
- - restart ceph osds
- - restart ceph mdss
- - restart ceph rgws
- - restart ceph rbdmirrors
+ - Restart ceph mons
+ - Restart ceph mgrs
+ - Restart ceph osds
+ - Restart ceph mdss
+ - Restart ceph rgws
+ - Restart ceph rbdmirrors
-- name: configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
- lineinfile:
+- name: Configure TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES for redhat
+ ansible.builtin.lineinfile:
dest: "/etc/sysconfig/ceph"
insertafter: EOF
- create: yes
+ create: true
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
+ mode: "0644"
when: ansible_facts['os_family'] == 'RedHat'
notify:
- - restart ceph mons
- - restart ceph mgrs
- - restart ceph osds
- - restart ceph mdss
- - restart ceph rgws
- - restart ceph rbdmirrors
+ - Restart ceph mons
+ - Restart ceph mgrs
+ - Restart ceph osds
+ - Restart ceph mdss
+ - Restart ceph rgws
+ - Restart ceph rbdmirrors
---
-- name: config repository for Red Hat based OS
+- name: Config repository for Red Hat based OS
when: ansible_facts['os_family'] == 'RedHat'
block:
- - name: include installs/configure_redhat_repository_installation.yml
- include_tasks: installs/configure_redhat_repository_installation.yml
+ - name: Include installs/configure_redhat_repository_installation.yml
+ ansible.builtin.include_tasks: installs/configure_redhat_repository_installation.yml
when: ceph_origin == 'repository'
- - name: include installs/configure_redhat_local_installation.yml
- include_tasks: installs/configure_redhat_local_installation.yml
+ - name: Include installs/configure_redhat_local_installation.yml
+ ansible.builtin.include_tasks: installs/configure_redhat_local_installation.yml
when: ceph_origin == 'local'
-- name: config repository for Debian based OS
+- name: Config repository for Debian based OS
when: ansible_facts['os_family'] == 'Debian'
+ tags: package-install
block:
- - name: include installs/configure_debian_repository_installation.yml
- include_tasks: installs/configure_debian_repository_installation.yml
+ - name: Include installs/configure_debian_repository_installation.yml
+ ansible.builtin.include_tasks: installs/configure_debian_repository_installation.yml
when: ceph_origin == 'repository'
- - name: update apt cache if cache_valid_time has expired
- apt:
- update_cache: yes
+ - name: Update apt cache if cache_valid_time has expired
+ ansible.builtin.apt:
+ update_cache: true
cache_valid_time: 3600
register: result
until: result is succeeded
- tags: package-install
-- name: include installs/configure_suse_repository_installation.yml
- include_tasks: installs/configure_suse_repository_installation.yml
+- name: Include installs/configure_suse_repository_installation.yml
+ ansible.builtin.include_tasks: installs/configure_suse_repository_installation.yml
when:
- ansible_facts['os_family'] == 'Suse'
- ceph_origin == 'repository'
---
-- name: create rbd client directory
- file:
+- name: Create rbd client directory
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ rbd_client_directory_owner }}"
---
-- name: include debian_community_repository.yml
- include_tasks: debian_community_repository.yml
+- name: Include debian_community_repository.yml
+ ansible.builtin.include_tasks: debian_community_repository.yml
when: ceph_repository == 'community'
-- name: include debian_dev_repository.yml
- include_tasks: debian_dev_repository.yml
+- name: Include debian_dev_repository.yml
+ ansible.builtin.include_tasks: debian_dev_repository.yml
when: ceph_repository == 'dev'
-- name: include debian_custom_repository.yml
- include_tasks: debian_custom_repository.yml
+- name: Include debian_custom_repository.yml
+ ansible.builtin.include_tasks: debian_custom_repository.yml
when: ceph_repository == 'custom'
-- name: include debian_uca_repository.yml
- include_tasks: debian_uca_repository.yml
+- name: Include debian_uca_repository.yml
+ ansible.builtin.include_tasks: debian_uca_repository.yml
when: ceph_repository == 'uca'
---
-- name: make sure /tmp exists
- file:
+- name: Make sure /tmp exists
+ ansible.builtin.file:
path: /tmp
state: directory
+ mode: "0755"
when: use_installer | bool
-- name: use mktemp to create name for rundep
- tempfile:
+- name: Use mktemp to create name for rundep
+ ansible.builtin.tempfile:
path: /tmp
prefix: rundep.
register: rundep_location
when: use_installer | bool
-- name: copy rundep
- copy:
+- name: Copy rundep
+ ansible.builtin.copy:
src: "{{ ansible_dir }}/rundep"
dest: "{{ rundep_location.path }}"
+ mode: preserve
when: use_installer | bool
-- name: install ceph dependencies
- script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
+- name: Install ceph dependencies
+ ansible.builtin.script: "{{ ansible_dir }}/rundep_installer.sh {{ rundep_location.path }}"
when: use_installer | bool
-- name: ensure rsync is installed
- package:
+- name: Ensure rsync is installed
+ ansible.builtin.package:
name: rsync
state: present
register: result
until: result is succeeded
-- name: synchronize ceph install
- synchronize:
+- name: Synchronize ceph install
+ ansible.posix.synchronize:
src: "{{ ceph_installation_dir }}/"
dest: "/"
-- name: create user group ceph
- group:
+- name: Create user group ceph
+ ansible.builtin.group:
name: 'ceph'
-- name: create user ceph
- user:
+- name: Create user ceph
+ ansible.builtin.user:
name: 'ceph'
---
-- name: include redhat_community_repository.yml
- include_tasks: redhat_community_repository.yml
+- name: Include redhat_community_repository.yml
+ ansible.builtin.include_tasks: redhat_community_repository.yml
when: ceph_repository == 'community'
-- name: include redhat_rhcs_repository.yml
- include_tasks: redhat_rhcs_repository.yml
+- name: Include redhat_rhcs_repository.yml
+ ansible.builtin.include_tasks: redhat_rhcs_repository.yml
when: ceph_repository == 'rhcs'
-- name: include redhat_dev_repository.yml
- include_tasks: redhat_dev_repository.yml
+- name: Include redhat_dev_repository.yml
+ ansible.builtin.include_tasks: redhat_dev_repository.yml
when: ceph_repository == 'dev'
-- name: include redhat_custom_repository.yml
- include_tasks: redhat_custom_repository.yml
+- name: Include redhat_custom_repository.yml
+ ansible.builtin.include_tasks: redhat_custom_repository.yml
when: ceph_repository == 'custom'
# Remove yum caches so yum doesn't get confused if we are reinstalling a different ceph version
-- name: purge yum cache
- command: yum clean all #noqa: [303]
+- name: Purge yum cache
+ ansible.builtin.command: yum clean all # noqa: [303]
changed_when: false
when: ansible_facts['pkg_mgr'] == 'yum'
---
-- name: include suse_obs_repository.yml
- include_tasks: suse_obs_repository.yml
+- name: Include suse_obs_repository.yml
+ ansible.builtin.include_tasks: suse_obs_repository.yml
when: ceph_repository == 'obs'
---
-- name: install dependencies for apt modules
- package:
+- name: Install dependencies for apt modules
+ ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
- update_cache: yes
+ update_cache: true
register: result
until: result is succeeded
-- name: configure debian ceph community repository stable key
- apt_key:
- data: "{{ lookup('file', role_path+'/files/cephstable.asc') }}"
+- name: Configure debian ceph community repository stable key
+ ansible.builtin.apt_key:
+ data: "{{ lookup('file', role_path + '/files/cephstable.asc') }}"
state: present
register: result
until: result is succeeded
-- name: configure debian ceph stable community repository
- apt_repository:
+- name: Configure debian ceph stable community repository
+ ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
- update_cache: yes
+ update_cache: true
---
-- name: configure debian custom apt key
- apt_key:
+- name: Configure debian custom apt key
+ ansible.builtin.apt_key:
url: "{{ ceph_custom_key }}"
state: present
register: result
until: result is succeeded
when: ceph_custom_key is defined
-- name: configure debian custom repository
- apt_repository:
+- name: Configure debian custom repository
+ ansible.builtin.apt_repository:
repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
state: present
- update_cache: yes
+ update_cache: true
---
-- name: fetch ceph debian development repository
- uri:
+- name: Fetch ceph debian development repository
+ ansible.builtin.uri:
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo?arch={{ ansible_facts['architecture'] }}"
- return_content: yes
+ return_content: true
register: ceph_dev_deb_repo
-- name: configure ceph debian development repository
- apt_repository:
+- name: Configure ceph debian development repository
+ ansible.builtin.apt_repository:
repo: "{{ ceph_dev_deb_repo.content }}"
state: present
- update_cache: yes
+ update_cache: true
---
-- name: add ubuntu cloud archive key package
- package:
+- name: Add ubuntu cloud archive key package
+ ansible.builtin.package:
name: ubuntu-cloud-keyring
register: result
until: result is succeeded
-- name: add ubuntu cloud archive repository
- apt_repository:
+- name: Add ubuntu cloud archive repository
+ ansible.builtin.apt_repository:
repo: "deb {{ ceph_stable_repo_uca }} {{ ceph_stable_release_uca }} main"
state: present
- update_cache: yes
+ update_cache: true
---
-- name: install ceph for debian
- apt:
+- name: Install ceph for debian
+ ansible.builtin.apt:
name: "{{ debian_ceph_pkgs | unique }}"
- update_cache: no
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ update_cache: false
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded
---
-- name: install red hat storage ceph packages for debian
- apt:
+- name: Install red hat storage ceph packages for debian
+ ansible.builtin.apt:
pkg: "{{ debian_ceph_pkgs | unique }}"
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
---
-- name: install ceph bundle
- swupd:
+- name: Install ceph bundle
+ community.general.swupd:
name: storage-cluster
state: present
register: result
-- name: install dependencies
- apt:
+- name: Install dependencies
+ ansible.builtin.apt:
name: "{{ debian_package_dependencies }}"
state: present
- update_cache: yes
+ update_cache: true
cache_valid_time: 3600
register: result
until: result is succeeded
-- name: include install_debian_packages.yml
- include_tasks: install_debian_packages.yml
+- name: Include install_debian_packages.yml
+ ansible.builtin.include_tasks: install_debian_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
-- name: include install_debian_rhcs_packages.yml
- include_tasks: install_debian_rhcs_packages.yml
+- name: Include install_debian_rhcs_packages.yml
+ ansible.builtin.include_tasks: install_debian_rhcs_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository == 'rhcs'
---
-- name: install redhat dependencies
- package:
+- name: Install redhat dependencies
+ ansible.builtin.package:
name: "{{ redhat_package_dependencies }}"
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution'] == 'RedHat'
-- name: install centos dependencies
- yum:
+- name: Install centos dependencies
+ ansible.builtin.yum:
name: "{{ centos_package_dependencies }}"
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution'] == 'CentOS'
-- name: install redhat ceph packages
- package:
+- name: Install redhat ceph packages
+ ansible.builtin.package:
name: "{{ redhat_ceph_pkgs | unique }}"
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
---
-- name: install SUSE/openSUSE dependencies
- package:
+- name: Install SUSE/openSUSE dependencies
+ ansible.builtin.package:
name: "{{ suse_package_dependencies }}"
state: present
register: result
until: result is succeeded
-- name: install SUSE/openSUSE ceph packages
- package:
+- name: Install SUSE/openSUSE ceph packages
+ ansible.builtin.package:
name: "{{ suse_ceph_pkgs | unique }}"
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
---
-- name: enable red hat storage tools repository
- rhsm_repository:
+- name: Enable red hat storage tools repository
+ community.general.rhsm_repository:
name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when:
- mon_group_name in group_names
---
-- name: install yum plugin priorities
- package:
+- name: Install yum plugin priorities
+ ansible.builtin.package:
name: yum-plugin-priorities
register: result
until: result is succeeded
tags: with_pkg
when: ansible_facts['distribution_major_version'] | int == 7
-- name: configure red hat ceph community repository stable key
- rpm_key:
+- name: Configure red hat ceph community repository stable key
+ ansible.builtin.rpm_key:
key: "{{ ceph_stable_key }}"
state: present
register: result
until: result is succeeded
-- name: configure red hat ceph stable community repository
- yum_repository:
+- name: Configure red hat ceph stable community repository
+ ansible.builtin.yum_repository:
name: ceph_stable
description: Ceph Stable $basearch repo
- gpgcheck: yes
+ gpgcheck: true
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
register: result
until: result is succeeded
-- name: configure red hat ceph stable noarch community repository
- yum_repository:
+- name: Configure red hat ceph stable noarch community repository
+ ansible.builtin.yum_repository:
name: ceph_stable_noarch
description: Ceph Stable noarch repo
- gpgcheck: yes
+ gpgcheck: true
state: present
gpgkey: "{{ ceph_stable_key }}"
baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
---
-- name: configure red hat custom rpm key
- rpm_key:
+- name: Configure red hat custom rpm key
+ ansible.builtin.rpm_key:
key: "{{ ceph_custom_key }}"
state: present
register: result
until: result is succeeded
when: ceph_custom_key is defined
-- name: configure red hat custom repository
- get_url:
+- name: Configure red hat custom repository
+ ansible.builtin.get_url:
url: "{{ ceph_custom_repo }}"
dest: /etc/yum.repos.d
owner: root
group: root
+ mode: "0644"
---
-- name: fetch ceph red hat development repository
- uri:
+- name: Fetch ceph red hat development repository
+ ansible.builtin.uri:
# Use the centos repo since we don't currently have a dedicated red hat repo
url: "https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/centos/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
- return_content: yes
+ return_content: true
register: ceph_dev_yum_repo
-- name: configure ceph red hat development repository
- copy:
+- name: Configure ceph red hat development repository
+ ansible.builtin.copy:
content: "{{ ceph_dev_yum_repo.content }}"
dest: /etc/yum.repos.d/ceph-dev.repo
owner: root
group: root
- backup: yes
+ mode: "0644"
+ backup: true
-- name: remove ceph_stable repositories
- yum_repository:
+- name: Remove ceph_stable repositories
+ ansible.builtin.yum_repository:
name: '{{ item }}'
file: ceph_stable
state: absent
---
-- name: include prerequisite_rhcs_cdn_install.yml
- include_tasks: prerequisite_rhcs_cdn_install.yml
+- name: Include prerequisite_rhcs_cdn_install.yml
+ ansible.builtin.include_tasks: prerequisite_rhcs_cdn_install.yml
---
-- name: configure openSUSE ceph OBS repository
- zypper_repository:
+- name: Configure openSUSE ceph OBS repository
+ community.general.zypper_repository:
name: "OBS:filesystems:ceph:{{ ceph_release }}"
state: present
repo: "{{ ceph_obs_repo }}"
- auto_import_keys: yes
- autorefresh: yes
+ auto_import_keys: true
+ autorefresh: true
---
-- name: include configure_repository.yml
- include_tasks: configure_repository.yml
+- name: Include configure_repository.yml
+ ansible.builtin.include_tasks: configure_repository.yml
tags: package-configure
-- name: include installs/install_redhat_packages.yml
- include_tasks: installs/install_redhat_packages.yml
+- name: Include installs/install_redhat_packages.yml
+ ansible.builtin.include_tasks: installs/install_redhat_packages.yml
when:
- ansible_facts['os_family'] == 'RedHat'
- (ceph_origin == 'repository' or ceph_origin == 'distro')
tags: package-install
-- name: include installs/install_suse_packages.yml
- include_tasks: installs/install_suse_packages.yml
+- name: Include installs/install_suse_packages.yml
+ ansible.builtin.include_tasks: installs/install_suse_packages.yml
when: ansible_facts['os_family'] == 'Suse'
tags: package-install
-- name: include installs/install_on_debian.yml
- include_tasks: installs/install_on_debian.yml
+- name: Include installs/install_on_debian.yml
+ ansible.builtin.include_tasks: installs/install_on_debian.yml
tags: package-install
when: ansible_facts['os_family'] == 'Debian'
-- name: include_tasks installs/install_on_clear.yml
- include_tasks: installs/install_on_clear.yml
+- name: Include_tasks installs/install_on_clear.yml
+ ansible.builtin.include_tasks: installs/install_on_clear.yml
when: ansible_facts['os_family'] == 'ClearLinux'
tags: package-install
-- name: get ceph version
- command: ceph --version
+- name: Get ceph version
+ ansible.builtin.command: ceph --version
changed_when: false
- check_mode: no
+ check_mode: false
register: ceph_version
-- name: set_fact ceph_version
- set_fact:
+- name: Set_fact ceph_version
+ ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
-- name: include release-rhcs.yml
- include_tasks: release-rhcs.yml
+- name: Include release-rhcs.yml
+ ansible.builtin.include_tasks: release-rhcs.yml
when: ceph_repository in ['rhcs', 'dev']
or
ceph_origin == 'distro'
tags: always
-- name: set_fact ceph_release - override ceph_release with ceph_stable_release
- set_fact:
+- name: Set_fact ceph_release - override ceph_release with ceph_stable_release
+ ansible.builtin.set_fact:
ceph_release: "{{ ceph_stable_release }}"
when:
- ceph_origin == 'repository'
- ceph_repository not in ['dev', 'rhcs', 'custom']
tags: always
-- name: include create_rbd_client_dir.yml
- include_tasks: create_rbd_client_dir.yml
+- name: Include create_rbd_client_dir.yml
+ ansible.builtin.include_tasks: create_rbd_client_dir.yml
-- name: include configure_cluster_name.yml
- include_tasks: configure_cluster_name.yml
+- name: Include configure_cluster_name.yml
+ ansible.builtin.include_tasks: configure_cluster_name.yml
-- name: include configure_memory_allocator.yml
- include_tasks: configure_memory_allocator.yml
+- name: Include configure_memory_allocator.yml
+ ansible.builtin.include_tasks: configure_memory_allocator.yml
when:
- (ceph_tcmalloc_max_total_thread_cache | int) > 0
- (ceph_origin == 'repository' or ceph_origin == 'distro')
-- name: include selinux.yml
- include_tasks: selinux.yml
+- name: Include selinux.yml
+ ansible.builtin.include_tasks: selinux.yml
when:
- ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(nfs_group_name, [])
- or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
\ No newline at end of file
+ or inventory_hostname in groups.get(rgwloadbalancer_group_name, [])
---
-- name: set_fact ceph_release jewel
- set_fact:
+- name: Set_fact ceph_release jewel
+ ansible.builtin.set_fact:
ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==')
-- name: set_fact ceph_release kraken
- set_fact:
+- name: Set_fact ceph_release kraken
+ ansible.builtin.set_fact:
ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==')
-- name: set_fact ceph_release luminous
- set_fact:
+- name: Set_fact ceph_release luminous
+ ansible.builtin.set_fact:
ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==')
-- name: set_fact ceph_release mimic
- set_fact:
+- name: Set_fact ceph_release mimic
+ ansible.builtin.set_fact:
ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==')
-- name: set_fact ceph_release nautilus
- set_fact:
+- name: Set_fact ceph_release nautilus
+ ansible.builtin.set_fact:
ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==')
-- name: set_fact ceph_release octopus
- set_fact:
+- name: Set_fact ceph_release octopus
+ ansible.builtin.set_fact:
ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==')
-- name: set_fact ceph_release pacific
- set_fact:
+- name: Set_fact ceph_release pacific
+ ansible.builtin.set_fact:
ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==')
-- name: set_fact ceph_release quincy
- set_fact:
+- name: Set_fact ceph_release quincy
+ ansible.builtin.set_fact:
ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==')
-- name: set_fact ceph_release reef
- set_fact:
+- name: Set_fact ceph_release reef
+ ansible.builtin.set_fact:
ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==')
---
-- name: if selinux is not disabled
+- name: If selinux is not disabled
when: ansible_facts['selinux']['status'] == 'enabled'
block:
- - name: install policycoreutils-python
- package:
+ - name: Install policycoreutils-python
+ ansible.builtin.package:
name: policycoreutils-python
state: present
register: result
until: result is succeeded
when: ansible_facts['distribution_major_version'] == '7'
- - name: install python3-policycoreutils on RHEL 8
- package:
+ - name: Install python3-policycoreutils on RHEL 8
+ ansible.builtin.package:
name: python3-policycoreutils
state: present
register: result
author: Guillaume Abrioux
description: Handles ceph-ansible initial configuration
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create ceph initial directories
- file:
+- name: Create ceph initial directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
- mode: 0755
+ mode: "0755"
loop:
- /etc/ceph
- /var/lib/ceph/
---
-- name: include create_ceph_initial_dirs.yml
- include_tasks: create_ceph_initial_dirs.yml
+- name: Include create_ceph_initial_dirs.yml
+ ansible.builtin.include_tasks: create_ceph_initial_dirs.yml
when: containerized_deployment | bool
-- name: include_tasks rgw_systemd_environment_file.yml
- include_tasks: rgw_systemd_environment_file.yml
+- name: Include_tasks rgw_systemd_environment_file.yml
+ ansible.builtin.include_tasks: rgw_systemd_environment_file.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: config file operations related to OSDs
+- name: Config file operations related to OSDs
when:
- inventory_hostname in groups.get(osd_group_name, [])
# the rolling_update.yml playbook sets num_osds to the number of currently
# running osds
- not rolling_update | bool
block:
- - name: reset num_osds
- set_fact:
+ - name: Reset num_osds
+ ansible.builtin.set_fact:
num_osds: 0
- - name: count number of osds for lvm scenario
- set_fact:
+ - name: Count number of osds for lvm scenario
+ ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_volumes | length | int) }}"
when: lvm_volumes | default([]) | length > 0
- - block:
- - name: look up for ceph-volume rejected devices
+ - name: Ceph-volume pre-requisites tasks
+ when:
+ - devices | default([]) | length > 0
+ block:
+ - name: Look up for ceph-volume rejected devices
ceph_volume:
cluster: "{{ cluster }}"
action: "inventory"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
PYTHONIOENCODING: utf-8
- - name: set_fact rejected_devices
- set_fact:
+ - name: Set_fact rejected_devices
+ ansible.builtin.set_fact:
_rejected_devices: "{{ _rejected_devices | default([]) + [item.path] }}"
with_items: "{{ rejected_devices.stdout | default('{}') | from_json }}"
when: "'Used by ceph-disk' in item.rejected_reasons"
- - name: set_fact _devices
- set_fact:
+ - name: Set_fact _devices
+ ansible.builtin.set_fact:
_devices: "{{ devices | difference(_rejected_devices | default([])) }}"
- - name: run 'ceph-volume lvm batch --report' to see how many osds are to be created
+ - name: Run 'ceph-volume lvm batch --report' to see how many osds are to be created
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
PYTHONIOENCODING: utf-8
when: _devices | default([]) | length > 0
- - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
- set_fact:
+ - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (legacy report)
+ ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json).osds | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when:
- (lvm_batch_report.stdout | default('{}') | from_json) is mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
- - name: set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
- set_fact:
+ - name: Set_fact num_osds from the output of 'ceph-volume lvm batch --report' (new report)
+ ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + ((lvm_batch_report.stdout | default('{}') | from_json) | default([]) | length | int) + (_rejected_devices | default([]) | length | int) }}"
when:
- (lvm_batch_report.stdout | default('{}') | from_json) is not mapping
- (lvm_batch_report.stdout | default('{}') | from_json).changed | default(true) | bool
- when:
- - devices | default([]) | length > 0
- - name: run 'ceph-volume lvm list' to see how many osds have already been created
+ - name: Run 'ceph-volume lvm list' to see how many osds have already been created
ceph_volume:
action: "list"
register: lvm_list
PYTHONIOENCODING: utf-8
changed_when: false
- - name: set_fact num_osds (add existing osds)
- set_fact:
+ - name: Set_fact num_osds (add existing osds)
+ ansible.builtin.set_fact:
num_osds: "{{ num_osds | int + (lvm_list.stdout | default('{}') | from_json | dict2items | map(attribute='value') | flatten | map(attribute='devices') | sum(start=[]) | difference(lvm_volumes | default([]) | map(attribute='data')) | length | int) }}"
-- name: set osd related config facts
+- name: Set osd related config facts
when: inventory_hostname in groups.get(osd_group_name, [])
block:
- - name: set_fact _osd_memory_target, override from ceph_conf_overrides
- set_fact:
+ - name: Set_fact _osd_memory_target, override from ceph_conf_overrides
+ ansible.builtin.set_fact:
_osd_memory_target: "{{ item }}"
loop:
- "{{ ceph_conf_overrides.get('osd', {}).get('osd memory target', '') }}"
- "{{ ceph_conf_overrides.get('osd', {}).get('osd_memory_target', '') }}"
when: item
- - name: set_fact _osd_memory_target
- set_fact:
+ - name: Set_fact _osd_memory_target
+ ansible.builtin.set_fact:
_osd_memory_target: "{{ ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) | int }}"
when:
- _osd_memory_target is undefined
- num_osds | default(0) | int > 0
- ((ansible_facts['memtotal_mb'] * 1048576 * safety_factor | float) / num_osds | float) > (osd_memory_target | float)
-- name: create ceph conf directory
- file:
+- name: Create ceph conf directory
+ ansible.builtin.file:
path: "/etc/ceph"
state: directory
owner: "ceph"
mode: "{{ ceph_directories_mode }}"
when: not containerized_deployment | bool
-- name: import_role ceph-facts
- import_role:
+- name: Import_role ceph-facts
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: "generate {{ cluster }}.conf configuration file"
+- name: Generate Ceph file
openstack.config_template.config_template:
src: "ceph.conf.j2"
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
mode: "0644"
config_type: ini
notify:
- - restart ceph mons
- - restart ceph osds
- - restart ceph mdss
- - restart ceph rgws
- - restart ceph mgrs
- - restart ceph rbdmirrors
- - restart ceph rbd-target-api-gw
+ - Restart ceph mons
+ - Restart ceph osds
+ - Restart ceph mdss
+ - Restart ceph rgws
+ - Restart ceph mgrs
+ - Restart ceph rbdmirrors
+ - Restart ceph rbd-target-api-gw
---
-- name: create rados gateway instance directories
- file:
+- name: Create rados gateway instance directories
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_directories_mode | default('0755') }}"
with_items: "{{ rgw_instances }}"
-- name: generate environment file
- copy:
+- name: Generate environment file
+ ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root"
group: "root"
with_items: "{{ rgw_instances }}"
when:
- containerized_deployment | bool
- - rgw_instances is defined
\ No newline at end of file
+ - rgw_instances is defined
author: Sébastien Han
description: Installs Ceph
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
- command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
+- name: Pulling Ceph container image
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
register: docker_image
until: docker_image.rc == 0
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
-- name: "pulling alertmanager/prometheus/grafana container images"
- command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
+- name: Pulling alertmanager/prometheus/grafana container images
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ item }}"
changed_when: false
register: monitoring_images
until: monitoring_images.rc == 0
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
-- name: "pulling node-exporter container image"
- command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
+- name: Pulling node-exporter container image
+ ansible.builtin.command: "{{ timeout_command }} {{ container_binary }} pull {{ node_exporter_container_image }}"
changed_when: false
register: node_exporter_image
until: node_exporter_image.rc == 0
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
NO_PROXY: "{{ ceph_docker_no_proxy }}"
-- name: export local ceph dev image
- command: >
+- name: Export local ceph dev image
+ ansible.builtin.command: >
{{ container_binary }} save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
"{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
delegate_to: localhost
+ changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
-- name: copy ceph dev image file
- copy:
+- name: Copy ceph dev image file
+ ansible.builtin.copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ mode: "0644"
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
-- name: load ceph dev image
- command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+- name: Load ceph dev image
+ ansible.builtin.command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
+ changed_when: false
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
-- name: remove tmp ceph dev image file
- file:
+- name: Remove tmp ceph dev image file
+ ansible.builtin.file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent
when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
-
---
-- name: generate systemd ceph-mon target file
- copy:
+- name: Generate systemd ceph-mon target file
+ ansible.builtin.copy:
src: ceph.target
dest: /etc/systemd/system/ceph.target
+ mode: "0644"
-- name: enable ceph.target
- service:
+- name: Enable ceph.target
+ ansible.builtin.service:
name: ceph.target
- enabled: yes
- daemon_reload: yes
+ enabled: true
+ daemon_reload: true
-- name: include prerequisites.yml
- include_tasks: prerequisites.yml
+- name: Include prerequisites.yml
+ ansible.builtin.include_tasks: prerequisites.yml
-- name: include registry.yml
- include_tasks: registry.yml
+- name: Include registry.yml
+ ansible.builtin.include_tasks: registry.yml
when: ceph_docker_registry_auth | bool
-- name: include fetch_image.yml
- include_tasks: fetch_image.yml
+- name: Include fetch_image.yml
+ ansible.builtin.include_tasks: fetch_image.yml
tags: fetch_container_image
-- name: get ceph version
- command: >
+- name: Get ceph version
+ ansible.builtin.command: >
{{ container_binary }} run --rm --net=host --entrypoint /usr/bin/ceph
{{ ceph_client_docker_registry }}/{{ ceph_client_docker_image }}:{{ ceph_client_docker_image_tag }}
--version
changed_when: false
- check_mode: no
+ check_mode: false
register: ceph_version
-- name: set_fact ceph_version ceph_version.stdout.split
- set_fact:
+- name: Set_fact ceph_version ceph_version.stdout.split
+ ansible.builtin.set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
-- name: include release.yml
- include_tasks: release.yml
+- name: Include release.yml
+ ansible.builtin.include_tasks: release.yml
---
-- name: lvmetad tasks related
+- name: Lvmetad tasks related
when:
- inventory_hostname in groups.get(osd_group_name, [])
- lvmetad_disabled | default(False) | bool
- ansible_facts['os_family'] == 'RedHat'
- ansible_facts['distribution_major_version'] | int == 7
block:
- - name: stop lvmetad
- service:
+ - name: Stop lvmetad
+ ansible.builtin.service:
name: lvm2-lvmetad
state: stopped
- - name: disable and mask lvmetad service
- service:
+ - name: Disable and mask lvmetad service
+ ansible.builtin.systemd:
name: lvm2-lvmetad
- enabled: no
- masked: yes
+ enabled: false
+ masked: true
-- name: remove ceph udev rules
- file:
+- name: Remove ceph udev rules
+ ansible.builtin.file:
path: "{{ item }}"
state: absent
with_items:
- /usr/lib/udev/rules.d/95-ceph-osd.rules
- /usr/lib/udev/rules.d/60-ceph-by-parttypeuuid.rules
-- name: ensure tmpfiles.d is present
- lineinfile:
+- name: Ensure tmpfiles.d is present
+ ansible.builtin.lineinfile:
path: /etc/tmpfiles.d/ceph-common.conf
line: "d /run/ceph 0770 root root -"
owner: root
group: root
- mode: 0644
+ mode: "0644"
state: present
- create: yes
+ create: true
-- name: restore certificates selinux context
+- name: Restore certificates selinux context
when:
- ansible_facts['os_family'] == 'RedHat'
- inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
- command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
+ ansible.builtin.command: /usr/sbin/restorecon -RF /etc/pki/ca-trust/extracted
changed_when: false
-- name: install python3 on osd nodes
- package:
+- name: Install python3 on osd nodes
+ ansible.builtin.package:
name: python3
state: present
when:
- inventory_hostname in groups.get(osd_group_name, [])
- - ansible_facts['os_family'] == 'RedHat'
\ No newline at end of file
+ - ansible_facts['os_family'] == 'RedHat'
---
-- name: container registry authentication
- command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
+- name: Container registry authentication
+ ansible.builtin.command: '{{ container_binary }} login -u {{ ceph_docker_registry_username }} --password-stdin {{ ceph_docker_registry }}'
args:
stdin: '{{ ceph_docker_registry_password }}'
- stdin_add_newline: no
+ stdin_add_newline: false
changed_when: false
environment:
HTTP_PROXY: "{{ ceph_docker_http_proxy | default('') }}"
HTTPS_PROXY: "{{ ceph_docker_https_proxy | default('') }}"
- NO_PROXY: "{{ ceph_docker_no_proxy }}"
\ No newline at end of file
+ NO_PROXY: "{{ ceph_docker_no_proxy }}"
---
-- name: set_fact ceph_release jewel
- set_fact:
+- name: Set_fact ceph_release jewel
+ ansible.builtin.set_fact:
ceph_release: jewel
when: ceph_version.split('.')[0] is version('10', '==')
-- name: set_fact ceph_release kraken
- set_fact:
+- name: Set_fact ceph_release kraken
+ ansible.builtin.set_fact:
ceph_release: kraken
when: ceph_version.split('.')[0] is version('11', '==')
-- name: set_fact ceph_release luminous
- set_fact:
+- name: Set_fact ceph_release luminous
+ ansible.builtin.set_fact:
ceph_release: luminous
when: ceph_version.split('.')[0] is version('12', '==')
-- name: set_fact ceph_release mimic
- set_fact:
+- name: Set_fact ceph_release mimic
+ ansible.builtin.set_fact:
ceph_release: mimic
when: ceph_version.split('.')[0] is version('13', '==')
-- name: set_fact ceph_release nautilus
- set_fact:
+- name: Set_fact ceph_release nautilus
+ ansible.builtin.set_fact:
ceph_release: nautilus
when: ceph_version.split('.')[0] is version('14', '==')
-- name: set_fact ceph_release octopus
- set_fact:
+- name: Set_fact ceph_release octopus
+ ansible.builtin.set_fact:
ceph_release: octopus
when: ceph_version.split('.')[0] is version('15', '==')
-- name: set_fact ceph_release pacific
- set_fact:
+- name: Set_fact ceph_release pacific
+ ansible.builtin.set_fact:
ceph_release: pacific
when: ceph_version.split('.')[0] is version('16', '==')
-- name: set_fact ceph_release quincy
- set_fact:
+- name: Set_fact ceph_release quincy
+ ansible.builtin.set_fact:
ceph_release: quincy
when: ceph_version.split('.')[0] is version('17', '==')
-- name: set_fact ceph_release reef
- set_fact:
+- name: Set_fact ceph_release reef
+ ansible.builtin.set_fact:
ceph_release: reef
when: ceph_version.split('.')[0] is version('18', '==')
author: Guillaume Abrioux
description: Handles container installation prerequisites
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: include pre_requisites/prerequisites.yml
- include_tasks: pre_requisites/prerequisites.yml
+- name: Include pre_requisites/prerequisites.yml
+ ansible.builtin.include_tasks: pre_requisites/prerequisites.yml
when: not is_atomic | bool
---
-- name: uninstall old docker versions
- package:
+- name: Uninstall old docker versions
+ ansible.builtin.package:
name: ['docker', 'docker-engine', 'docker.io', 'containerd', 'runc']
state: absent
when: container_package_name == 'docker-ce'
-- name: allow apt to use a repository over https (debian)
- package:
+- name: Allow apt to use a repository over https (debian)
+ ansible.builtin.package:
name: ['apt-transport-https', 'ca-certificates', 'gnupg', 'software-properties-common']
- update_cache: yes
+ update_cache: true
register: result
until: result is succeeded
-- name: add docker's gpg key
- apt_key:
+- name: Add docker's gpg key
+ ansible.builtin.apt_key:
url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
register: result
until: result is succeeded
when: container_package_name == 'docker-ce'
-- name: add docker repository
- apt_repository:
+- name: Add docker repository
+ ansible.builtin.apt_repository:
repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
when: container_package_name == 'docker-ce'
-- name: add podman ppa repository
- apt_repository:
+- name: Add podman ppa repository
+ ansible.builtin.apt_repository:
repo: "ppa:projectatomic/ppa"
when:
- container_package_name == 'podman'
---
-- name: include specific variables
- include_vars: "{{ item }}"
+- name: Include specific variables
+ ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
- "{{ ansible_facts['os_family'] }}.yml"
when: container_package_name is undefined and container_service_name is undefined
-- name: debian based systems tasks
- include_tasks: debian_prerequisites.yml
+- name: Debian based systems tasks
+ ansible.builtin.include_tasks: debian_prerequisites.yml
when:
- ansible_facts['os_family'] == 'Debian'
tags: with_pkg
-- name: install container packages
- package:
+- name: Install container packages
+ ansible.builtin.package:
name: '{{ container_package_name }}'
update_cache: true
register: result
until: result is succeeded
tags: with_pkg
-- name: install lvm2 package
- package:
+- name: Install lvm2 package
+ ansible.builtin.package:
name: lvm2
register: result
until: result is succeeded
tags: with_pkg
when: inventory_hostname in groups.get(osd_group_name, [])
-- name: extra configuration for docker
+- name: Extra configuration for docker
when: container_service_name == 'docker'
block:
- - name: create the systemd docker override directory
- file:
+ - name: Create the systemd docker override directory
+ ansible.builtin.file:
path: /etc/systemd/system/docker.service.d
state: directory
+ mode: "0755"
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- - name: create the systemd docker override file
- template:
+ - name: Create the systemd docker override file
+ ansible.builtin.template:
src: docker-proxy.conf.j2
dest: /etc/systemd/system/docker.service.d/proxy.conf
- mode: 0600
+ mode: "0600"
owner: root
group: root
register: proxy_created
when: ceph_docker_http_proxy is defined or ceph_docker_https_proxy is defined
- - name: remove docker proxy configuration
- file:
+ - name: Remove docker proxy configuration
+ ansible.builtin.file:
path: /etc/systemd/system/docker.service.d/proxy.conf
state: absent
register: proxy_removed
# have an immediate effect and not wait the end of the play.
# using flush_handlers via the meta action plugin isn't enough too because
# it flushes all handlers and not only the one notified in this role.
- - name: restart docker
- systemd:
+ - name: Restart docker
+ ansible.builtin.systemd:
name: "{{ container_service_name }}"
state: restarted
- daemon_reload: yes
+ daemon_reload: true
when: proxy_created.changed | bool or proxy_removed.changed | bool
- - name: start container service
- service:
+ - name: Start container service
+ ansible.builtin.service:
name: '{{ container_service_name }}'
state: started
- enabled: yes
+ enabled: true
tags:
with_pkg
author: Guillaume Abrioux
description: Deploy ceph-crash
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
- - 8
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create and copy client.crash keyring
+- name: Create and copy client.crash keyring
when: cephx | bool
block:
- - name: create client.crash keyring
+ - name: Create client.crash keyring
ceph_key:
name: "client.crash"
caps:
mgr: 'allow profile crash'
cluster: "{{ cluster }}"
dest: "{{ ceph_conf_key_directory }}"
- import_key: True
+ import_key: true
mode: "{{ ceph_keyring_permissions }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
- run_once: True
+ run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: client.crash
cluster: "{{ cluster }}"
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.client.crash.keyring"
content: "{{ _crash_keys.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: start ceph-crash daemon
+- name: Start ceph-crash daemon
when: containerized_deployment | bool
block:
- - name: create /var/lib/ceph/crash/posted
- file:
+ - name: Create /var/lib/ceph/crash/posted
+ ansible.builtin.file:
path: /var/lib/ceph/crash/posted
state: directory
mode: '0755'
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
- - name: include_tasks systemd.yml
- include_tasks: systemd.yml
+ - name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: start the ceph-crash service
- systemd:
+- name: Start the ceph-crash service
+ ansible.builtin.systemd:
name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
---
-- name: generate systemd unit file for ceph-crash container
- template:
+- name: Generate systemd unit file for ceph-crash container
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-crash.service.j2"
dest: /etc/systemd/system/ceph-crash@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph crash
\ No newline at end of file
+ notify: Restart ceph crash
author: Boris Ranto
description: Configures Ceph Dashboard
license: Apache
- min_ansible_version: 2.4
+ min_ansible_version: '2.4'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- import_role:
+- name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: container_binary.yml
delegate_to: "{{ groups[mon_group_name][0] }}"
delegate_facts: true
-- name: set_fact container_exec_cmd
- set_fact:
+- name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
-- name: set_fact container_run_cmd
- set_fact:
+- name: Set_fact container_run_cmd
+ ansible.builtin.set_fact:
ceph_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --net=host --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
-- name: get current mgr backend - ipv4
- set_fact:
+- name: Get current mgr backend - ipv4
+ ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(dashboard_network.split(',')) | first }}"
when: ip_version == 'ipv4'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
-- name: get current mgr backend - ipv6
- set_fact:
+- name: Get current mgr backend - ipv6
+ ansible.builtin.set_fact:
dashboard_server_addr: "{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(dashboard_network.split(',')) | last }}"
when: ip_version == 'ipv6'
loop: "{{ groups.get(mgr_group_name) if groups.get(mgr_group_name, []) | length > 0 else groups.get(mon_group_name) }}"
delegate_to: "{{ item }}"
- delegate_facts: True
+ delegate_facts: true
-- include_role:
+- name: Include ceph-facts role
+ ansible.builtin.include_role:
name: ceph-facts
tasks_from: set_radosgw_address.yml
loop: "{{ groups.get(rgw_group_name, []) }}"
loop_var: ceph_dashboard_call_item
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: disable SSL for dashboard
+- name: Disable SSL for dashboard
when: dashboard_protocol == "http"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
block:
- - name: get SSL status for dashboard
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
- changed_when: false
- register: current_ssl_for_dashboard
+ - name: Get SSL status for dashboard
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get mgr mgr/dashboard/ssl"
+ changed_when: false
+ register: current_ssl_for_dashboard
- - name: disable SSL for dashboard
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
- when: current_ssl_for_dashboard.stdout == "true"
+ - name: Disable SSL for dashboard
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl false"
+ changed_when: false
+ when: current_ssl_for_dashboard.stdout == "true"
-- name: with SSL for dashboard
+- name: With SSL for dashboard
when: dashboard_protocol == "https"
block:
- - name: enable SSL for dashboard
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
+ - name: Enable SSL for dashboard
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl true"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
+ changed_when: false
- - name: copy dashboard SSL certificate file
- copy:
+ - name: Copy dashboard SSL certificate file
+ ansible.builtin.copy:
src: "{{ dashboard_crt }}"
dest: "/etc/ceph/ceph-dashboard.crt"
owner: root
group: root
- mode: 0440
+ mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_crt | length > 0
- - name: copy dashboard SSL certificate key
- copy:
+ - name: Copy dashboard SSL certificate key
+ ansible.builtin.copy:
src: "{{ dashboard_key }}"
dest: "/etc/ceph/ceph-dashboard.key"
owner: root
group: root
- mode: 0440
+ mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: dashboard_key | length > 0
- - name: generate and copy self-signed certificate
+ - name: Generate and copy self-signed certificate
when: dashboard_key | length == 0 or dashboard_crt | length == 0
run_once: true
block:
- - name: set_fact subj_alt_names
- set_fact:
+ - name: Set_fact subj_alt_names
+ ansible.builtin.set_fact:
subj_alt_names: >
- {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%}
- DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}
- {%- endfor -%}
+ {% for host in groups[mgr_group_name] | default(groups[mon_group_name]) -%} DNS:{{ hostvars[host]['ansible_facts']['hostname'] }},DNS:{{ hostvars[host]['ansible_facts']['fqdn'] }},IP:{{ hostvars[host]['dashboard_server_addr'] }}{% if not loop.last %},{% endif %}{%- endfor -%}
- - name: create tempfile for openssl certificate and key generation
- tempfile:
+ - name: Create tempfile for openssl certificate and key generation
+ ansible.builtin.tempfile:
state: file
register: openssl_config_file
- - name: copy the openssl configuration file
- copy:
+ - name: Copy the openssl configuration file
+ ansible.builtin.copy:
src: "{{ '/etc/pki/tls/openssl.cnf' if ansible_facts['os_family'] == 'RedHat' else '/etc/ssl/openssl.cnf' }}"
dest: '{{ openssl_config_file.path }}'
remote_src: true
+ mode: "0644"
- - name: add subjectAltName to the openssl configuration
- ini_file:
+ - name: Add subjectAltName to the openssl configuration
+ community.general.ini_file:
path: '{{ openssl_config_file.path }}'
section: v3_ca
option: subjectAltName
value: '{{ subj_alt_names | trim }}'
+ mode: "0644"
- - name: generate a Self Signed OpenSSL certificate for dashboard
- shell: |
+ - name: Generate a Self Signed OpenSSL certificate for dashboard
+ ansible.builtin.shell: |
test -f /etc/ceph/ceph-dashboard.key -a -f /etc/ceph/ceph-dashboard.crt || \
openssl req -new -nodes -x509 -subj '/O=IT/CN={{ dashboard_certificate_cn }}/' -config {{ openssl_config_file.path }} -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
+ changed_when: false
- - name: remove the openssl tempfile
- file:
+ - name: Remove the openssl tempfile
+ ansible.builtin.file:
path: '{{ openssl_config_file.path }}'
state: absent
- - name: slurp self-signed generated certificate for dashboard
- slurp:
+ - name: Slurp self-signed generated certificate for dashboard
+ ansible.builtin.slurp:
src: "/etc/ceph/{{ item }}"
- run_once: True
+ run_once: true
with_items:
- 'ceph-dashboard.key'
- 'ceph-dashboard.crt'
register: slurp_self_signed_crt
- - name: copy self-signed generated certificate on mons
- copy:
+ - name: Copy self-signed generated certificate on mons
+ ansible.builtin.copy:
dest: "{{ item.0.source }}"
content: "{{ item.0.content | b64decode }}"
owner: "{{ ceph_uid }}"
- "{{ slurp_self_signed_crt.results }}"
- "{{ groups[mon_group_name] }}"
- - name: import dashboard certificate file
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
+ - name: Import dashboard certificate file
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- - name: import dashboard certificate key
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
+ - name: Import dashboard certificate key
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key set mgr/dashboard/key -i /etc/ceph/ceph-dashboard.key"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
-- name: "set the dashboard port ({{ dashboard_port }})"
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
+- name: Set the dashboard port
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/server_port {{ dashboard_port }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
-- name: "set the dashboard SSL port ({{ dashboard_port }})"
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
+- name: Set the dashboard SSL port
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/ssl_server_port {{ dashboard_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
failed_when: false # Do not fail if the option does not exist, it only exists post-14.2.0
-- name: config the current dashboard backend
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
+- name: Config the current dashboard backend
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[item]['ansible_facts']['hostname'] }}/server_addr {{ hostvars[item]['dashboard_server_addr'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
-- name: disable mgr dashboard module (restart)
+- name: Disable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
-- name: enable mgr dashboard module (restart)
+- name: Enable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
-- name: create dashboard admin user
+- name: Create dashboard admin user
ceph_dashboard_user:
name: "{{ dashboard_admin_user }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: disable unused dashboard features
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
+- name: Disable unused dashboard features
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard feature disable {{ item }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
with_items: "{{ dashboard_disabled_features }}"
-- name: set grafana api user
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
+- name: Set grafana api user
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-username {{ grafana_admin_user }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
-- name: set grafana api password
- command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
+- name: Set grafana api password
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard set-grafana-api-password -i -"
args:
stdin: "{{ grafana_admin_password }}"
- stdin_add_newline: no
+ stdin_add_newline: false
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
-- name: disable ssl verification for grafana
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
+- name: Disable ssl verification for grafana
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-ssl-verify False"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
- dashboard_protocol == "https"
- dashboard_grafana_api_no_ssl_verify | bool
-- name: set alertmanager host
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
+- name: Set alertmanager host
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host http://{{ grafana_server_addrs | first }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
-- name: set prometheus host
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
+- name: Set prometheus host
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host http://{{ grafana_server_addrs | first }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
-- include_tasks: configure_grafana_layouts.yml
+- name: Include grafana layout tasks
+ ansible.builtin.include_tasks: configure_grafana_layouts.yml
with_items: '{{ grafana_server_addrs }}'
vars:
grafana_server_addr: '{{ item }}'
-- name: config monitoring api url vip
+- name: Config monitoring api url vip
run_once: true
block:
- - name: config grafana api url vip
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
+ - name: Config grafana api url vip
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ dashboard_frontend_vip }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: dashboard_frontend_vip is defined and dashboard_frontend_vip | length > 0
- - name: config alertmanager api url
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
+ - name: Config alertmanager api url
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-alertmanager-api-host {{ dashboard_protocol }}://{{ alertmanager_frontend_vip }}:{{ alertmanager_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: alertmanager_frontend_vip is defined and alertmanager_frontend_vip | length > 0
- - name: config prometheus api url
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
+ - name: Config prometheus api url
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-prometheus-api-host {{ dashboard_protocol }}://{{ prometheus_frontend_vip }}:{{ prometheus_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: prometheus_frontend_vip is defined and prometheus_frontend_vip | length > 0
-- name: dashboard object gateway management frontend
+- name: Dashboard object gateway management frontend
when: groups.get(rgw_group_name, []) | length > 0
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
block:
- - name: set the rgw credentials
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
+ - name: Set the rgw credentials
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-credentials"
changed_when: false
register: result
until: result is succeeded
retries: 5
- - name: set the rgw admin resource
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
+ - name: Set the rgw admin resource
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
changed_when: false
when: dashboard_rgw_api_admin_resource | length > 0
- - name: disable ssl verification for rgw
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
+ - name: Disable ssl verification for rgw
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-rgw-api-ssl-verify False"
changed_when: false
when:
- dashboard_rgw_api_no_ssl_verify | bool
- radosgw_frontend_ssl_certificate | length > 0
-- name: dashboard iscsi management
+- name: Dashboard iscsi management
when: groups.get(iscsi_gw_group_name, []) | length > 0
run_once: true
block:
- - name: disable iscsi api ssl verification
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
+ - name: Disable iscsi api ssl verification
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-iscsi-api-ssl-verification false"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- api_secure | default(false) | bool
- generate_crt | default(false) | bool
- - name: add iscsi gateways - ipv4
- command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
+ - name: Add iscsi gateways - ipv4
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(igw_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
- stdin_add_newline: no
+ stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv4'
- - name: add iscsi gateways - ipv6
- command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
+ - name: Add iscsi gateways - ipv6
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(igw_network.split(',')) | last | ansible.utils.ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
- stdin_add_newline: no
+ stdin_add_newline: false
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items: "{{ groups[iscsi_gw_group_name] }}"
when: ip_version == 'ipv6'
-- name: disable mgr dashboard module (restart)
+- name: Disable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
-- name: enable mgr dashboard module (restart)
+- name: Enable mgr dashboard module (restart)
ceph_mgr_module:
name: dashboard
cluster: "{{ cluster }}"
---
-- name: set grafana url
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
+- name: Set grafana url
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard set-grafana-api-url {{ dashboard_protocol }}://{{ grafana_server_fqdn | default(grafana_server_addr, true) }}:{{ grafana_port }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
-- name: inject grafana dashboard layouts
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
+- name: Inject grafana dashboard layouts
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard grafana dashboards update"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
changed_when: false
---
-- name: include configure_dashboard.yml
- include_tasks: configure_dashboard.yml
+- name: Include configure_dashboard.yml
+ ansible.builtin.include_tasks: configure_dashboard.yml
-- name: print dashboard URL
- debug:
+- name: Print dashboard URL
+ ansible.builtin.debug:
msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
run_once: true
# If configure_firewall is true, then ansible will try to configure the
# appropriate firewalling rules so that Ceph daemons can communicate
# with each others.
-configure_firewall: True
+configure_firewall: true
# Open ports on corresponding nodes if firewall is installed on it
ceph_mon_firewall_zone: public
# This variable determines if ceph packages can be updated. If False, the
# package resources will use "state=present". If True, they will use
# "state=latest".
-upgrade_ceph_packages: False
+upgrade_ceph_packages: false
ceph_use_distro_backports: false # DEBIAN ONLY
ceph_directories_mode: "0755"
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
+# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
# a URL to the .repo file to be installed on the targets. For deb,
# ceph_custom_repo should be the URL to the repo base.
#
-#ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
+# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
ceph_custom_repo: https://server.domain.com/ceph-custom-repo
# Enabled when ceph_repository == 'local'
#
# Path to DESTDIR of the ceph install
-#ceph_installation_dir: "/path/to/ceph_installation/"
+# ceph_installation_dir: "/path/to/ceph_installation/"
# Whether or not to use installer script rundep_installer.sh
# This script takes in rundep and installs the packages line by line onto the machine
# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
# all runtime dependencies installed
-#use_installer: false
+# use_installer: false
# Root directory for ceph-ansible
-#ansible_dir: "/path/to/ceph-ansible"
+# ansible_dir: "/path/to/ceph-ansible"
######################
ip_version: ipv4
mon_host_v1:
- enabled: True
+ enabled: true
suffix: ':6789'
mon_host_v2:
suffix: ':3300'
-enable_ceph_volume_debug: False
+enable_ceph_volume_debug: false
##########
# CEPHFS #
## Testing mode
# enable this mode _only_ when you have a single node
# if you don't want it keep the option commented
-#common_single_host_mode: true
+# common_single_host_mode: true
## Handlers - restarting daemons after a config change
# if for whatever reasons the content of your ceph configuration changes
ceph_docker_image_tag: latest-main
ceph_docker_registry: quay.io
ceph_docker_registry_auth: false
-#ceph_docker_registry_username:
-#ceph_docker_registry_password:
-#ceph_docker_http_proxy:
-#ceph_docker_https_proxy:
+# ceph_docker_registry_username:
+# ceph_docker_registry_password:
+# ceph_docker_http_proxy:
+# ceph_docker_https_proxy:
ceph_docker_no_proxy: "localhost,127.0.0.1"
## Client only docker image - defaults to {{ ceph_docker_image }}
ceph_client_docker_image: "{{ ceph_docker_image }}"
ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
ceph_client_docker_registry: "{{ ceph_docker_registry }}"
-containerized_deployment: False
+containerized_deployment: false
container_binary:
timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
# name: "images"
# rule_name: "my_replicated_rule"
# application: "rbd"
-# pg_autoscale_mode: False
+# pg_autoscale_mode: false
# pg_num: 16
# pgp_num: 16
# target_size_ratio: 0.2
#############
# DASHBOARD #
#############
-dashboard_enabled: True
+dashboard_enabled: true
# Choose http or https
# For https, you should set dashboard.crt/key and grafana.crt/key
# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
dashboard_admin_user: admin
dashboard_admin_user_ro: false
# This variable must be set with a strong custom password when dashboard_enabled is True
-#dashboard_admin_password: p@ssw0rd
+# dashboard_admin_password: p@ssw0rd
# We only need this for SSL (https) connections
dashboard_crt: ''
dashboard_key: ''
dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
dashboard_rgw_api_user_id: ceph-dashboard
dashboard_rgw_api_admin_resource: ''
-dashboard_rgw_api_no_ssl_verify: False
+dashboard_rgw_api_no_ssl_verify: false
dashboard_frontend_vip: ''
dashboard_disabled_features: []
prometheus_frontend_vip: ''
node_exporter_port: 9100
grafana_admin_user: admin
# This variable must be set with a strong custom password when dashboard_enabled is True
-#grafana_admin_password: admin
+# grafana_admin_password: admin
# We only need this for SSL (https) connections
grafana_crt: ''
grafana_key: ''
grafana_plugins:
- vonage-status-panel
- grafana-piechart-panel
-grafana_allow_embedding: True
+grafana_allow_embedding: true
grafana_port: 3000
grafana_network: "{{ public_network }}"
grafana_conf_overrides: {}
prometheus_conf_overrides: {}
# Uncomment out this variable if you need to customize the retention period for prometheus storage.
# set it to '30d' if you want to retain 30 days of data.
-#prometheus_storage_tsdb_retention_time: 15d
+# prometheus_storage_tsdb_retention_time: 15d
alertmanager_container_image: "docker.io/prom/alertmanager:v0.16.2"
alertmanager_container_cpu_period: 100000
alertmanager_container_cpu_cores: 2
#
# Example:
#
-#rbd_devices:
-# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
-# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
+# rbd_devices:
+# - { pool: 'rbd', image: 'ansible1', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible2', size: '15G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible3', size: '30G', host: 'ceph-1', state: 'present' }
+# - { pool: 'rbd', image: 'ansible4', size: '50G', host: 'ceph-1', state: 'present' }
rbd_devices: {}
# client_connections defines the client ACL's to restrict client access to specific LUNs
#
# Example:
#
-#client_connections:
-# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
-# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
+# client_connections:
+# - { client: 'iqn.1994-05.com.redhat:rh7-iscsi-client', image_list: 'rbd.ansible1,rbd.ansible2', chap: 'rh7-iscsi-client/redhat', status: 'present' }
+# - { client: 'iqn.1991-05.com.microsoft:w2k12r2', image_list: 'rbd.ansible4', chap: 'w2k12r2/microsoft_w2k12', status: 'absent' }
client_connections: {}
-no_log_on_ceph_key_tasks: True
+no_log_on_ceph_key_tasks: true
###############
# DEPRECATION #
###############
-
######################################################
# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
# *DO NOT* MODIFY THEM #
container_exec_cmd:
docker: false
-ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
+ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
author: Sébastien Han
description: Handles ceph-ansible default vars for all roles
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
----
\ No newline at end of file
+---
---
ceph_osd_pool_default_crush_rule: -1
-ceph_osd_pool_default_crush_rule_name: "replicated_rule"
\ No newline at end of file
+ceph_osd_pool_default_crush_rule_name: "replicated_rule"
author: Guillaume Abrioux
description: Set some facts for ceph to be deployed
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: Ubuntu
versions:
- xenial
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: check if podman binary is present
- stat:
+- name: Check if podman binary is present
+ ansible.builtin.stat:
path: /usr/bin/podman
register: podman_binary
-- name: set_fact container_binary
- set_fact:
+- name: Set_fact container_binary
+ ansible.builtin.set_fact:
container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] in ['8', '9']) else 'docker' }}"
when: not docker2podman | default(false) | bool
---
-- name: convert grafana-server group name if exist
- add_host:
+- name: Convert grafana-server group name if exist
+ ansible.builtin.add_host:
name: "{{ item }}"
groups: "{{ monitoring_group_name }}"
ansible_host: "{{ hostvars[item]['ansible_host'] | default(omit) }}"
ansible_port: "{{ hostvars[item]['ansible_port'] | default(omit) }}"
- with_items: "{{ groups.get((grafana_server_group_name|default('grafana-server')), []) }}"
- run_once: True
+ with_items: "{{ groups.get((grafana_server_group_name | default('grafana-server')), []) }}"
+ run_once: true
---
-- name: resolve devices
+- name: Resolve devices
when:
- devices is defined
- not osd_auto_discovery | default(False) | bool
block:
- - name: resolve device link(s)
- command: readlink -f {{ item }}
+ - name: Resolve device link(s)
+ ansible.builtin.command: readlink -f {{ item }}
changed_when: false
- check_mode: no
+ check_mode: false
with_items: "{{ devices }}"
register: devices_prepare_canonicalize
- - name: set_fact build devices from resolved symlinks
- set_fact:
- devices: "{{ devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}"
+ - name: Set_fact build devices from resolved symlinks
+ ansible.builtin.set_fact:
+ devices: "{{ devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}"
-- name: resolve dedicated_device
+- name: Resolve dedicated_device
when:
- dedicated_devices is defined
- not osd_auto_discovery | default(False) | bool
block:
- - name: resolve dedicated_device link(s)
- command: readlink -f {{ item }}
+ - name: Resolve dedicated_device link(s)
+ ansible.builtin.command: readlink -f {{ item }}
changed_when: false
- check_mode: no
+ check_mode: false
with_items: "{{ dedicated_devices }}"
register: dedicated_devices_prepare_canonicalize
- - name: set_fact build dedicated_devices from resolved symlinks
- set_fact:
- dedicated_devices: "{{ dedicated_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}"
+ - name: Set_fact build dedicated_devices from resolved symlinks
+ ansible.builtin.set_fact:
+ dedicated_devices: "{{ dedicated_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}"
-- name: resolve bluestore_wal_device
+- name: Resolve bluestore_wal_device
when:
- bluestore_wal_devices is defined
- not osd_auto_discovery | default(False) | bool
block:
- - name: resolve bluestore_wal_device link(s)
- command: readlink -f {{ item }}
+ - name: Resolve bluestore_wal_device link(s)
+ ansible.builtin.command: readlink -f {{ item }}
changed_when: false
- check_mode: no
+ check_mode: false
with_items: "{{ bluestore_wal_devices }}"
register: bluestore_wal_devices_prepare_canonicalize
- - name: set_fact build bluestore_wal_devices from resolved symlinks
- set_fact:
- bluestore_wal_devices: "{{ bluestore_wal_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search','/dev/disk') | list | unique }}"
+ - name: Set_fact build bluestore_wal_devices from resolved symlinks
+ ansible.builtin.set_fact:
+ bluestore_wal_devices: "{{ bluestore_wal_devices_prepare_canonicalize.results | map(attribute='stdout') | reject('search', '/dev/disk') | list | unique }}"
-- name: set_fact devices generate device list when osd_auto_discovery
+- name: Set_fact devices generate device list when osd_auto_discovery
vars:
device: "{{ item.key | regex_replace('^', '/dev/') }}"
- set_fact:
+ ansible.builtin.set_fact:
devices: "{{ devices | default([]) | union([device]) }}"
with_dict: "{{ ansible_facts['devices'] }}"
when:
---
-- name: check if it is atomic host
- stat:
+- name: Check if it is atomic host
+ ansible.builtin.stat:
path: /run/ostree-booted
register: stat_ostree
-- name: set_fact is_atomic
- set_fact:
+- name: Set_fact is_atomic
+ ansible.builtin.set_fact:
is_atomic: "{{ stat_ostree.stat.exists }}"
-- name: import_tasks container_binary.yml
- import_tasks: container_binary.yml
+- name: Import_tasks container_binary.yml
+ ansible.builtin.import_tasks: container_binary.yml
-- name: set_fact ceph_cmd
- set_fact:
+- name: Set_fact ceph_cmd
+ ansible.builtin.set_fact:
ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
# In case ansible_python_interpreter is set by the user,
# ansible will not discover python and discovered_interpreter_python
# will not be set
-- name: set_fact discovered_interpreter_python
- set_fact:
+- name: Set_fact discovered_interpreter_python
+ ansible.builtin.set_fact:
discovered_interpreter_python: "{{ ansible_python_interpreter }}"
when: ansible_python_interpreter is defined
# If ansible_python_interpreter is not defined, this can result in the
# discovered_interpreter_python fact from being set. This fails later in this
# playbook and is used elsewhere.
-- name: set_fact discovered_interpreter_python if not previously set
- set_fact:
+- name: Set_fact discovered_interpreter_python if not previously set
+ ansible.builtin.set_fact:
discovered_interpreter_python: "{{ ansible_facts['discovered_interpreter_python'] }}"
when:
- discovered_interpreter_python is not defined
- ansible_facts['discovered_interpreter_python'] is defined
# Set ceph_release to ceph_stable by default
-- name: set_fact ceph_release ceph_stable_release
- set_fact:
+- name: Set_fact ceph_release ceph_stable_release
+ ansible.builtin.set_fact:
ceph_release: "{{ ceph_stable_release }}"
-- name: set_fact monitor_name ansible_facts['hostname']
- set_fact:
+- name: Set_fact monitor_name ansible_facts['hostname']
+ ansible.builtin.set_fact:
monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
delegate_to: "{{ item }}"
delegate_facts: true
run_once: true
when: groups.get(mon_group_name, []) | length > 0
-- name: find a running monitor
+- name: Find a running monitor
when: groups.get(mon_group_name, []) | length > 0
block:
- - name: set_fact container_exec_cmd
- set_fact:
+ - name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
- - name: find a running mon container
- command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
+ - name: Find a running mon container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
register: find_running_mon_container
failed_when: false
run_once: true
delegate_to: "{{ item }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
+ changed_when: false
when:
- containerized_deployment | bool
- - name: check for a ceph mon socket
- shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
+ - name: Check for a ceph mon socket
+ ansible.builtin.shell: stat --printf=%n {{ rbd_client_admin_socket_path }}/{{ cluster }}-mon*.asok
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: mon_socket_stat
run_once: true
delegate_to: "{{ item }}"
when:
- not containerized_deployment | bool
- - name: check if the ceph mon socket is in-use
- command: grep -q {{ item.stdout }} /proc/net/unix
+ - name: Check if the ceph mon socket is in-use
+ ansible.builtin.command: grep -q {{ item.stdout }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: mon_socket
run_once: true
delegate_to: "{{ hostvars[item.item]['inventory_hostname'] }}"
- not containerized_deployment | bool
- item.rc == 0
- - name: set_fact running_mon - non_container
- set_fact:
+ - name: Set_fact running_mon - non_container
+ ansible.builtin.set_fact:
running_mon: "{{ hostvars[item.item.item]['inventory_hostname'] }}"
with_items: "{{ mon_socket.results }}"
run_once: true
- item.rc is defined
- item.rc == 0
- - name: set_fact running_mon - container
- set_fact:
+ - name: Set_fact running_mon - container
+ ansible.builtin.set_fact:
running_mon: "{{ item.item }}"
run_once: true
with_items: "{{ find_running_mon_container.results }}"
- containerized_deployment | bool
- item.stdout_lines | default([]) | length > 0
- - name: set_fact _container_exec_cmd
- set_fact:
+ - name: Set_fact _container_exec_cmd
+ ansible.builtin.set_fact:
_container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
# this task shouldn't run in a rolling_update situation
# because it blindly picks a mon, which may be down because
# of the rolling update
- - name: get current fsid if cluster is already running
- command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid"
+ - name: Get current fsid if cluster is already running
+ ansible.builtin.command: "{{ timeout_command }} {{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fsid"
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: current_fsid
run_once: true
delegate_to: "{{ groups[mon_group_name][0] if running_mon is undefined else running_mon }}"
# set this as a default when performing a rolling_update
# so the rest of the tasks here will succeed
-- name: set_fact current_fsid rc 1
- set_fact:
+- name: Set_fact current_fsid rc 1
+ ansible.builtin.set_fact:
current_fsid:
rc: 1
when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
-- name: get current fsid
- command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
+- name: Get current fsid
+ ansible.builtin.command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
until: rolling_update_fsid is succeeded
+ changed_when: false
when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
-- name: set_fact fsid
- set_fact:
+- name: Set_fact fsid
+ ansible.builtin.set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
when:
- rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
-- name: set_fact fsid from current_fsid
- set_fact:
+- name: Set_fact fsid from current_fsid
+ ansible.builtin.set_fact:
fsid: "{{ current_fsid.stdout }}"
run_once: true
when: current_fsid.rc == 0
-- name: fsid related tasks
+- name: Fsid related tasks
when:
- generate_fsid | bool
- current_fsid.rc != 0
- not rolling_update | bool
block:
- - name: generate cluster fsid
- command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'"
+ - name: Generate cluster fsid
+ ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['discovered_interpreter_python'] }} -c 'import uuid; print(str(uuid.uuid4()))'"
register: cluster_uuid
delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
run_once: true
- - name: set_fact fsid
- set_fact:
+ - name: Set_fact fsid
+ ansible.builtin.set_fact:
fsid: "{{ cluster_uuid.stdout }}"
-- name: import_tasks devices.yml
- import_tasks: devices.yml
+- name: Import_tasks devices.yml
+ ansible.builtin.import_tasks: devices.yml
when: inventory_hostname in groups.get(osd_group_name, [])
-- name: check if the ceph conf exists
- stat:
+- name: Check if the ceph conf exists
+ ansible.builtin.stat:
path: '/etc/ceph/{{ cluster }}.conf'
register: ceph_conf
-- name: set default osd_pool_default_crush_rule fact
- set_fact:
+- name: Set default osd_pool_default_crush_rule fact
+ ansible.builtin.set_fact:
osd_pool_default_crush_rule: "{{ ceph_osd_pool_default_crush_rule }}"
-- name: get default crush rule value from ceph configuration
+- name: Get default crush rule value from ceph configuration
+ when: ceph_conf.stat.exists | bool
block:
- &read-osd-pool-default-crush-rule
- name: read osd pool default crush rule
- command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf
+ name: Read osd pool default crush rule
+ ansible.builtin.command: grep 'osd pool default crush rule' /etc/ceph/{{ cluster }}.conf
register: crush_rule_variable
changed_when: false
- check_mode: no
+ check_mode: false
failed_when: crush_rule_variable.rc not in (0, 1)
- &set-osd-pool-default-crush-rule-fact
- name: set osd_pool_default_crush_rule fact
- set_fact:
+ name: Set osd_pool_default_crush_rule fact
+ ansible.builtin.set_fact:
osd_pool_default_crush_rule: "{{ crush_rule_variable.stdout.split(' = ')[1] }}"
when: crush_rule_variable.rc == 0
- when: ceph_conf.stat.exists | bool
-- name: get default crush rule value from running monitor ceph configuration
- block:
- - <<: *read-osd-pool-default-crush-rule
- delegate_to: "{{ running_mon }}"
- - *set-osd-pool-default-crush-rule-fact
+- name: Get default crush rule value from running monitor ceph configuration
when:
- running_mon is defined
- not ceph_conf.stat.exists | bool
+ block:
+ - <<: *read-osd-pool-default-crush-rule # noqa: name[casing]
+ delegate_to: "{{ running_mon }}"
+ - *set-osd-pool-default-crush-rule-fact
-- name: import_tasks set_monitor_address.yml
- import_tasks: set_monitor_address.yml
+- name: Import_tasks set_monitor_address.yml
+ ansible.builtin.import_tasks: set_monitor_address.yml
when: groups.get(mon_group_name, []) | length > 0
-- name: import_tasks set_radosgw_address.yml
- include_tasks: set_radosgw_address.yml
+- name: Import_tasks set_radosgw_address.yml
+ ansible.builtin.include_tasks: set_radosgw_address.yml
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
- set_fact:
- use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
+- name: Set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
+ ansible.builtin.set_fact:
+ use_new_ceph_iscsi: "{{ (gateway_ip_list == '0.0.0.0' and gateway_iqn | length == 0 and client_connections | length == 0 and rbd_devices | length == 0) | bool | ternary(true, false) }}"
when: iscsi_gw_group_name in group_names
-- name: set_fact ceph_run_cmd
- set_fact:
+- name: Set_fact ceph_run_cmd
+ ansible.builtin.set_fact:
ceph_run_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }}"
delegate_to: "{{ item }}"
- delegate_facts: True
- run_once: True
+ delegate_facts: true
+ run_once: true
with_items:
- "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[client_group_name] if groups[client_group_name] | default([]) | length > 0 else [] }}"
-- name: set_fact ceph_admin_command
- set_fact:
+- name: Set_fact ceph_admin_command
+ ansible.builtin.set_fact:
ceph_admin_command: "{{ hostvars[item]['ceph_run_cmd'] }} -n client.admin -k /etc/ceph/{{ cluster }}.client.admin.keyring"
delegate_to: "{{ item }}"
- delegate_facts: True
- run_once: True
+ delegate_facts: true
+ run_once: true
with_items:
- "{{ groups[mon_group_name] if groups[mon_group_name] | default([]) | length > 0 else [] }}"
- "{{ groups[mds_group_name] if groups[mds_group_name] | default([]) | length > 0 else [] }}"
---
-- name: get current default crush rule details
+- name: Get current default crush rule details
ceph_crush_rule:
cluster: "{{ cluster }}"
state: info
delegate_to: "{{ delegated_node | default(groups[mon_group_name][0]) }}"
run_once: true
-- name: get current default crush rule name
- set_fact:
+- name: Get current default crush rule name
+ ansible.builtin.set_fact:
ceph_osd_pool_default_crush_rule_name: "{{ item.rule_name }}"
with_items: "{{ default_crush_rule_details.stdout | default('{}', True) | from_json }}"
- run_once: True
+ run_once: true
when: item.rule_id | int == osd_pool_default_crush_rule | int
-- name: set grafana_server_addr fact - ipv4
- set_fact:
+- name: Set grafana_server_addr fact - ipv4
+ ansible.builtin.set_fact:
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- dashboard_enabled | bool
- inventory_hostname in groups[monitoring_group_name]
-- name: set grafana_server_addr fact - ipv6
- set_fact:
+- name: Set grafana_server_addr fact - ipv6
+ ansible.builtin.set_fact:
grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- dashboard_enabled | bool
- inventory_hostname in groups[monitoring_group_name]
-- name: set grafana_server_addrs fact - ipv4
- set_fact:
+- name: Set grafana_server_addrs fact - ipv4
+ ansible.builtin.set_fact:
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(grafana_network.split(',')) | first]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- ip_version == 'ipv4'
- dashboard_enabled | bool
-- name: set grafana_server_addrs fact - ipv6
- set_fact:
+- name: Set grafana_server_addrs fact - ipv6
+ ansible.builtin.set_fact:
grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(grafana_network.split(',')) | last | ansible.utils.ipwrap]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
---
-- name: include_tasks convert_grafana_server_group_name.yml
- include_tasks: convert_grafana_server_group_name.yml
+- name: Include_tasks convert_grafana_server_group_name.yml
+ ansible.builtin.include_tasks: convert_grafana_server_group_name.yml
when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0
-- name: include facts.yml
- include_tasks: facts.yml
+- name: Include facts.yml
+ ansible.builtin.include_tasks: facts.yml
---
-- name: set_fact _monitor_addresses to monitor_address_block ipv4
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
+- name: Set_fact _monitor_addresses to monitor_address_block ipv4
+ ansible.builtin.set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first}] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] != 'subnet'
- ip_version == 'ipv4'
-- name: set_fact _monitor_addresses to monitor_address_block ipv6
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ansible.utils.ipwrap }] }}"
+- name: Set_fact _monitor_addresses to monitor_address_block ipv6
+ ansible.builtin.set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ansible.utils.ipwrap}] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address_block'] != 'subnet'
- ip_version == 'ipv6'
-- name: set_fact _monitor_addresses to monitor_address
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['monitor_address'] | ansible.utils.ipwrap}] }}"
+- name: Set_fact _monitor_addresses to monitor_address
+ ansible.builtin.set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['monitor_address'] | ansible.utils.ipwrap}] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address'] is defined
- hostvars[item]['monitor_address'] != 'x.x.x.x'
-- name: set_fact _monitor_addresses to monitor_interface - ipv4
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ansible.utils.ipwrap }] }}"
+- name: Set_fact _monitor_addresses to monitor_interface - ipv4
+ ansible.builtin.set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version]['address'] | ansible.utils.ipwrap}] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x'
- hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-- name: set_fact _monitor_addresses to monitor_interface - ipv6
- set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ansible.utils.ipwrap }] }}"
+- name: Set_fact _monitor_addresses to monitor_interface - ipv6
+ ansible.builtin.set_fact:
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface'] | replace('-', '_'))][ip_version][0]['address'] | ansible.utils.ipwrap}] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- hostvars[item]['monitor_address'] | default('x.x.x.x') == 'x.x.x.x'
- hostvars[item]['monitor_interface'] | default('interface') != 'interface'
-- name: set_fact _current_monitor_address
- set_fact:
+- name: Set_fact _current_monitor_address
+ ansible.builtin.set_fact:
_current_monitor_address: "{{ item.addr }}"
with_items: "{{ _monitor_addresses }}"
when:
- (inventory_hostname == item.name and not rolling_update | default(False) | bool)
- or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0])
\ No newline at end of file
+ or (rolling_update | default(False) | bool and item.name == groups.get(mon_group_name, [])[0])
---
-- name: dashboard related tasks
+- name: Dashboard related tasks
when: ceph_dashboard_call_item is defined
block:
- - name: set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}"
- set_fact:
+ - name: Set current radosgw_address_block, radosgw_address, radosgw_interface from node "{{ ceph_dashboard_call_item }}"
+ ansible.builtin.set_fact:
radosgw_address_block: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address_block'] | default(radosgw_address_block) }}"
radosgw_address: "{{ hostvars[ceph_dashboard_call_item]['radosgw_address'] | default(radosgw_address) }}"
radosgw_interface: "{{ hostvars[ceph_dashboard_call_item]['radosgw_interface'] | default(radosgw_interface) }}"
-- name: set_fact _radosgw_address to radosgw_address_block ipv4
- set_fact:
+- name: Set_fact _radosgw_address to radosgw_address_block ipv4
+ ansible.builtin.set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
- ip_version == 'ipv4'
-- name: set_fact _radosgw_address to radosgw_address_block ipv6
- set_fact:
+- name: Set_fact _radosgw_address to radosgw_address_block ipv6
+ ansible.builtin.set_fact:
_radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ansible.utils.ipwrap }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
- ip_version == 'ipv6'
-- name: set_fact _radosgw_address to radosgw_address
- set_fact:
+- name: Set_fact _radosgw_address to radosgw_address
+ ansible.builtin.set_fact:
_radosgw_address: "{{ radosgw_address | ansible.utils.ipwrap }}"
when:
- radosgw_address is defined
- radosgw_address != 'x.x.x.x'
-- name: tasks for radosgw interface
+- name: Tasks for radosgw interface
when:
- radosgw_address_block == 'subnet'
- radosgw_address == 'x.x.x.x'
- radosgw_interface != 'interface'
block:
- - name: set_fact _interface
- set_fact:
+ - name: Set_fact _interface
+ ansible.builtin.set_fact:
_interface: "{{ (hostvars[item]['radosgw_interface'] | replace('-', '_')) }}"
loop: "{{ groups.get(rgw_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
run_once: true
- - name: set_fact _radosgw_address to radosgw_interface - ipv4
- set_fact:
+ - name: Set_fact _radosgw_address to radosgw_interface - ipv4
+ ansible.builtin.set_fact:
_radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version]['address'] }}"
loop: "{{ groups.get(rgw_group_name, []) }}"
delegate_to: "{{ item }}"
run_once: true
when: ip_version == 'ipv4'
- - name: set_fact _radosgw_address to radosgw_interface - ipv6
- set_fact:
+ - name: Set_fact _radosgw_address to radosgw_interface - ipv6
+ ansible.builtin.set_fact:
_radosgw_address: "{{ hostvars[item]['ansible_facts'][hostvars[item]['_interface']][ip_version][0]['address'] | ansible.utils.ipwrap }}"
loop: "{{ groups.get(rgw_group_name, []) }}"
delegate_to: "{{ item }}"
run_once: true
when: ip_version == 'ipv6'
-- name: rgw_instances
+- name: Rgw_instances
when:
- ceph_dashboard_call_item is defined or
inventory_hostname in groups.get(rgw_group_name, [])
block:
- - name: reset rgw_instances (workaround)
- set_fact:
+ - name: Reset rgw_instances (workaround)
+ ansible.builtin.set_fact:
rgw_instances: []
- - name: set_fact rgw_instances
- set_fact:
- rgw_instances: "{{ rgw_instances|default([]) | union([{'instance_name': 'rgw' + item|string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port|int + item|int }]) }}"
- with_sequence: start=0 end={{ radosgw_num_instances|int - 1 }}
+ - name: Set_fact rgw_instances
+ ansible.builtin.set_fact:
+ rgw_instances: "{{ rgw_instances | default([]) | union([{'instance_name': 'rgw' + item | string, 'radosgw_address': hostvars[ceph_dashboard_call_item | default(inventory_hostname)]['_radosgw_address'], 'radosgw_frontend_port': radosgw_frontend_port | int + item | int}]) }}"
+ with_sequence: start=0 end={{ radosgw_num_instances | int - 1 }}
delegate_to: "{{ ceph_dashboard_call_item if ceph_dashboard_call_item is defined else inventory_hostname }}"
- delegate_facts: "{{ true if ceph_dashboard_call_item is defined else false }}"
+ delegate_facts: true
dummy:
fetch_directory: fetch/
-
author: Andrew Schoen
description: Fetches ceph keys from monitors.
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: lookup keys in /etc/ceph
- shell: ls -1 /etc/ceph/*.keyring
+- name: Lookup keys in /etc/ceph
+ ansible.builtin.shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
-- name: create a local fetch directory if it does not exist
- file:
+- name: Create a local fetch directory if it does not exist
+ ansible.builtin.file:
path: "{{ fetch_directory }}"
state: directory
+ mode: "0755"
delegate_to: localhost
become: false
-- name: "copy ceph user and bootstrap keys to the ansible server in {{ fetch_directory }}/{{ fsid }}/"
- fetch:
+- name: Copy ceph user and bootstrap keys to the ansible server
+ ansible.builtin.fetch:
src: "{{ item }}"
dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
- flat: yes
+ flat: true
fail_on_missing: false
run_once: true
with_items:
author: Boris Ranto
description: Configures Grafana for Ceph Dashboard
license: Apache
- min_ansible_version: 2.4
+ min_ansible_version: "2.4"
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: install ceph-grafana-dashboards package on RedHat or SUSE
- package:
+- name: Install ceph-grafana-dashboards package on RedHat or SUSE
+ ansible.builtin.package:
name: ceph-grafana-dashboards
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when:
- ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: package-install
-- name: make sure grafana is down
- service:
+- name: Make sure grafana is down
+ ansible.builtin.service:
name: grafana-server
state: stopped
-- name: wait for grafana to be stopped
- wait_for:
+- name: Wait for grafana to be stopped
+ ansible.builtin.wait_for:
host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}'
port: '{{ grafana_port }}'
state: stopped
-- name: make sure grafana configuration directories exist
- file:
+- name: Make sure grafana configuration directories exist
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
- recurse: yes
+ recurse: true
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
with_items:
- "/etc/grafana/provisioning/dashboards"
- "/etc/grafana/provisioning/notifiers"
-- name: download ceph grafana dashboards
- get_url:
+- name: Download ceph grafana dashboards
+ ansible.builtin.get_url:
url: "https://raw.githubusercontent.com/ceph/ceph/{{ grafana_dashboard_version }}/monitoring/ceph-mixin/dashboards_out/{{ item }}"
dest: "/etc/grafana/dashboards/ceph-dashboard/{{ item }}"
+ mode: "0644"
with_items: "{{ grafana_dashboard_files }}"
when:
- not containerized_deployment | bool
- not ansible_facts['os_family'] in ['RedHat', 'Suse']
-- name: write grafana.ini
+- name: Write grafana.ini
openstack.config_template.config_template:
src: grafana.ini.j2
dest: /etc/grafana/grafana.ini
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
- mode: 0640
+ mode: "0640"
config_type: ini
config_overrides: "{{ grafana_conf_overrides }}"
-- name: write datasources provisioning config file
- template:
+- name: Write datasources provisioning config file
+ ansible.builtin.template:
src: datasources-ceph-dashboard.yml.j2
dest: /etc/grafana/provisioning/datasources/ceph-dashboard.yml
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
- mode: 0640
+ mode: "0640"
- name: Write dashboards provisioning config file
- template:
+ ansible.builtin.template:
src: dashboards-ceph-dashboard.yml.j2
dest: /etc/grafana/provisioning/dashboards/ceph-dashboard.yml
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
- mode: 0640
+ mode: "0640"
when: not containerized_deployment | bool
-- name: copy grafana SSL certificate file
- copy:
+- name: Copy grafana SSL certificate file
+ ansible.builtin.copy:
src: "{{ grafana_crt }}"
dest: "/etc/grafana/ceph-dashboard.crt"
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
- mode: 0640
+ mode: "0640"
remote_src: "{{ dashboard_tls_external | bool }}"
when:
- grafana_crt | length > 0
- dashboard_protocol == "https"
-- name: copy grafana SSL certificate key
- copy:
+- name: Copy grafana SSL certificate key
+ ansible.builtin.copy:
src: "{{ grafana_key }}"
dest: "/etc/grafana/ceph-dashboard.key"
owner: "{{ grafana_uid }}"
group: "{{ grafana_uid }}"
- mode: 0440
+ mode: "0440"
remote_src: "{{ dashboard_tls_external | bool }}"
when:
- grafana_key | length > 0
- dashboard_protocol == "https"
-- name: generate a Self Signed OpenSSL certificate for dashboard
- shell: |
+- name: Generate a Self Signed OpenSSL certificate for dashboard
+ ansible.builtin.shell: |
test -f /etc/grafana/ceph-dashboard.key -a -f /etc/grafana/ceph-dashboard.crt || \
(openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca && \
chown {{ grafana_uid }}:{{ grafana_uid }} /etc/grafana/ceph-dashboard.key /etc/grafana/ceph-dashboard.crt)
+ changed_when: false
when:
- dashboard_protocol == "https"
- grafana_key | length == 0 or grafana_crt | length == 0
-- name: enable and start grafana
- service:
+- name: Enable and start grafana
+ ansible.builtin.service:
name: grafana-server
state: restarted
enabled: true
-- name: wait for grafana to start
- wait_for:
+- name: Wait for grafana to start
+ ansible.builtin.wait_for:
host: '{{ grafana_server_addr if ip_version == "ipv4" else grafana_server_addr[1:-1] }}'
port: '{{ grafana_port }}'
---
-- name: include setup_container.yml
- include_tasks: setup_container.yml
+- name: Include setup_container.yml
+ ansible.builtin.include_tasks: setup_container.yml
-- name: include configure_grafana.yml
- include_tasks: configure_grafana.yml
+- name: Include configure_grafana.yml
+ ansible.builtin.include_tasks: configure_grafana.yml
---
-- name: create /etc/grafana and /var/lib/grafana
- file:
+- name: Create /etc/grafana and /var/lib/grafana
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ grafana_uid }}"
- /etc/grafana
- /var/lib/grafana
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: start the grafana-server service
- systemd:
+- name: Start the grafana-server service
+ ansible.builtin.systemd:
name: grafana-server
state: started
- enabled: yes
- daemon_reload: yes
+ enabled: true
+ daemon_reload: true
failed_when: false
---
-- name: ship systemd service
- template:
+- name: Ship systemd service
+ ansible.builtin.template:
src: grafana-server.service.j2
dest: "/etc/systemd/system/grafana-server.service"
owner: root
group: root
- mode: 0644
+ mode: "0644"
---
-- name: handlers
+- name: Handlers
when:
- - not rolling_update | bool
- - not docker2podman | default(False) | bool
+ - not rolling_update | bool
+ - not docker2podman | default(False) | bool
block:
- - name: make tempdir for scripts
- tempfile:
+ - name: Make tempdir for scripts
+ ansible.builtin.tempfile:
state: directory
prefix: ceph_ansible
listen:
- - "restart ceph mons"
- - "restart ceph osds"
- - "restart ceph mdss"
- - "restart ceph rgws"
- - "restart ceph nfss"
- - "restart ceph rbdmirrors"
- - "restart ceph mgrs"
+ - "Restart ceph mons"
+ - "Restart ceph osds"
+ - "Restart ceph mdss"
+ - "Restart ceph rgws"
+ - "Restart ceph nfss"
+ - "Restart ceph rbdmirrors"
+ - "Restart ceph mgrs"
register: tmpdirpath
when: tmpdirpath is not defined or tmpdirpath.path is not defined or tmpdirpath.state=="absent"
- - name: mons handler
- include_tasks: handler_mons.yml
+ - name: Mons handler
+ ansible.builtin.include_tasks: handler_mons.yml
when: mon_group_name in group_names
- listen: "restart ceph mons"
+ listen: "Restart ceph mons"
- - name: osds handler
- include_tasks: handler_osds.yml
+ - name: Osds handler
+ ansible.builtin.include_tasks: handler_osds.yml
when: osd_group_name in group_names
- listen: "restart ceph osds"
+ listen: "Restart ceph osds"
- - name: mdss handler
- include_tasks: handler_mdss.yml
+ - name: Mdss handler
+ ansible.builtin.include_tasks: handler_mdss.yml
when: mds_group_name in group_names
- listen: "restart ceph mdss"
+ listen: "Restart ceph mdss"
- - name: rgws handler
- include_tasks: handler_rgws.yml
+ - name: Rgws handler
+ ansible.builtin.include_tasks: handler_rgws.yml
when: rgw_group_name in group_names
- listen: "restart ceph rgws"
+ listen: "Restart ceph rgws"
- - name: nfss handler
- include_tasks: handler_nfss.yml
+ - name: Nfss handler
+ ansible.builtin.include_tasks: handler_nfss.yml
when: nfs_group_name in group_names
- listen: "restart ceph nfss"
+ listen: "Restart ceph nfss"
- - name: rbdmirrors handler
- include_tasks: handler_rbdmirrors.yml
+ - name: Rbdmirrors handler
+ ansible.builtin.include_tasks: handler_rbdmirrors.yml
when: rbdmirror_group_name in group_names
- listen: "restart ceph rbdmirrors"
+ listen: "Restart ceph rbdmirrors"
- - name: mgrs handler
- include_tasks: handler_mgrs.yml
+ - name: Mgrs handler
+ ansible.builtin.include_tasks: handler_mgrs.yml
when: mgr_group_name in group_names
- listen: "restart ceph mgrs"
+ listen: "Restart ceph mgrs"
- - name: tcmu-runner handler
- include_tasks: handler_tcmu_runner.yml
+ - name: Tcmu-runner handler
+ ansible.builtin.include_tasks: handler_tcmu_runner.yml
when: iscsi_gw_group_name in group_names
- listen: "restart ceph tcmu-runner"
+ listen: "Restart ceph tcmu-runner"
- - name: rbd-target-api and rbd-target-gw handler
- include_tasks: handler_rbd_target_api_gw.yml
+ - name: Rbd-target-api and rbd-target-gw handler
+ ansible.builtin.include_tasks: handler_rbd_target_api_gw.yml
when: iscsi_gw_group_name in group_names
- listen: "restart ceph rbd-target-api-gw"
+ listen: "Restart ceph rbd-target-api-gw"
- - name: ceph crash handler
- include_tasks: handler_crash.yml
- listen: "restart ceph crash"
+ - name: Ceph crash handler
+ ansible.builtin.include_tasks: handler_crash.yml
+ listen: "Restart ceph crash"
when:
- inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(mgr_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
or inventory_hostname in groups.get(rbdmirror_group_name, [])
- - name: remove tempdir for scripts
- file:
+ - name: Remove tempdir for scripts
+ ansible.builtin.file:
path: "{{ tmpdirpath.path }}"
state: absent
listen:
- - "restart ceph mons"
- - "restart ceph osds"
- - "restart ceph mdss"
- - "restart ceph rgws"
- - "restart ceph nfss"
- - "restart ceph rbdmirrors"
- - "restart ceph mgrs"
+ - "Restart ceph mons"
+ - "Restart ceph osds"
+ - "Restart ceph mdss"
+ - "Restart ceph rgws"
+ - "Restart ceph nfss"
+ - "Restart ceph rbdmirrors"
+ - "Restart ceph mgrs"
register: tmpdirpath
when: tmpdirpath.path is defined
author: Sébastien Han
description: Contains handlers for Ceph services
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: include check_running_containers.yml
- include_tasks: check_running_containers.yml
+- name: Include check_running_containers.yml
+ ansible.builtin.include_tasks: check_running_containers.yml
when: containerized_deployment | bool
-- name: include check_socket_non_container.yml
- include_tasks: check_socket_non_container.yml
+- name: Include check_socket_non_container.yml
+ ansible.builtin.include_tasks: check_socket_non_container.yml
when: not containerized_deployment | bool
---
-- name: check for a mon container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
+- name: Check for a mon container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
register: ceph_mon_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(mon_group_name, [])
-- name: check for an osd container
- command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
+- name: Check for an osd container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-osd'"
register: ceph_osd_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(osd_group_name, [])
-- name: check for a mds container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
+- name: Check for a mds container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(mds_group_name, [])
-- name: check for a rgw container
- command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
+- name: Check for a rgw container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: check for a mgr container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
+- name: Check for a mgr container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(mgr_group_name, [])
-- name: check for a rbd mirror container
- command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
+- name: Check for a rbd mirror container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
-- name: check for a nfs container
- command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
+- name: Check for a nfs container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(nfs_group_name, [])
-- name: check for a tcmu-runner container
- command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
+- name: Check for a tcmu-runner container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=tcmu-runner'"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a rbd-target-api container
- command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
+- name: Check for a rbd-target-api container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-api'"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a rbd-target-gw container
- command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
+- name: Check for a rbd-target-gw container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=rbd-target-gw'"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a ceph-crash container
- command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
+- name: Check for a ceph-crash container
+ ansible.builtin.command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
register: ceph_crash_container_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when:
- inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(mgr_group_name, [])
or inventory_hostname in groups.get(osd_group_name, [])
or inventory_hostname in groups.get(mds_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
- or inventory_hostname in groups.get(rbdmirror_group_name, [])
\ No newline at end of file
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
---
-- name: find ceph mon socket
- find:
+- name: Find ceph mon socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-mon*.asok"
- use_regex: no
+ use_regex: false
register: mon_socket_stat
when: inventory_hostname in groups.get(mon_group_name, [])
-- name: check if the ceph mon socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph mon socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: mon_socket
with_items: "{{ mon_socket_stat.files }}"
when:
- inventory_hostname in groups.get(mon_group_name, [])
- mon_socket_stat.files | length > 0
-- name: remove ceph mon socket if exists and not used by a process
- file:
+- name: Remove ceph mon socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- mon_socket_stat.files | length > 0
- item.1.rc == 1
-- name: find ceph osd socket
- find:
+- name: Find ceph osd socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-osd.*.asok"
- use_regex: no
+ use_regex: false
register: osd_socket_stat
when: inventory_hostname in groups.get(osd_group_name, [])
-- name: check if the ceph osd socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph osd socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: osd_socket
with_items: "{{ osd_socket_stat.files }}"
when:
- inventory_hostname in groups.get(osd_group_name, [])
- osd_socket_stat.files | length > 0
-- name: remove ceph osd socket if exists and not used by a process
- file:
+- name: Remove ceph osd socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- osd_socket_stat.files | length > 0
- item.1.rc == 1
-- name: find ceph osd socket
- find:
+- name: Find ceph osd socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-mds*.asok"
- use_regex: no
+ use_regex: false
register: mds_socket_stat
when: inventory_hostname in groups.get(mds_group_name, [])
-- name: check if the ceph mds socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph mds socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: mds_socket
with_items: "{{ mds_socket_stat.files }}"
when:
- inventory_hostname in groups.get(mds_group_name, [])
- mds_socket_stat.files | length > 0
-- name: remove ceph mds socket if exists and not used by a process
- file:
+- name: Remove ceph mds socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- mds_socket_stat.files | length > 0
- item.1.rc == 1
-- name: find ceph rgw socket
- find:
+- name: Find ceph rgw socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-client.rgw*.asok"
- use_regex: no
+ use_regex: false
register: rgw_socket_stat
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: check if the ceph rgw socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph rgw socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: rgw_socket
with_items: "{{ rgw_socket_stat.files }}"
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_socket_stat.files | length > 0
-- name: remove ceph rgw socket if exists and not used by a process
- file:
+- name: Remove ceph rgw socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- rgw_socket_stat.files | length > 0
- item.1.rc == 1
-- name: find ceph mgr socket
- find:
+- name: Find ceph mgr socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-mgr*.asok"
- use_regex: no
+ use_regex: false
register: mgr_socket_stat
when: inventory_hostname in groups.get(mgr_group_name, [])
-- name: check if the ceph mgr socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph mgr socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: mgr_socket
with_items: "{{ mgr_socket_stat.files }}"
when:
- inventory_hostname in groups.get(mgr_group_name, [])
- mgr_socket_stat.files | length > 0
-- name: remove ceph mgr socket if exists and not used by a process
- file:
+- name: Remove ceph mgr socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- mgr_socket_stat.files | length > 0
- item.1.rc == 1
-- name: find ceph rbd mirror socket
- find:
+- name: Find ceph rbd mirror socket
+ ansible.builtin.find:
paths: ["{{ rbd_client_admin_socket_path }}"]
- recurse: yes
+ recurse: true
file_type: any
patterns: "{{ cluster }}-client.rbd-mirror*.asok"
- use_regex: no
+ use_regex: false
register: rbd_mirror_socket_stat
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
-- name: check if the ceph rbd mirror socket is in-use
- command: grep -q {{ item.path }} /proc/net/unix
+- name: Check if the ceph rbd mirror socket is in-use
+ ansible.builtin.command: grep -q {{ item.path }} /proc/net/unix
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: rbd_mirror_socket
with_items: "{{ rbd_mirror_socket_stat.files }}"
when:
- inventory_hostname in groups.get(rbdmirror_group_name, [])
- rbd_mirror_socket_stat.files | length > 0
-- name: remove ceph rbd mirror socket if exists and not used by a process
- file:
+- name: Remove ceph rbd mirror socket if exists and not used by a process
+ ansible.builtin.file:
name: "{{ item.0.path }}"
state: absent
with_together:
- rbd_mirror_socket_stat.files | length > 0
- item.1.rc == 1
-- name: check for a nfs ganesha pid
- command: "pgrep ganesha.nfsd"
+- name: Check for a nfs ganesha pid
+ ansible.builtin.command: "pgrep ganesha.nfsd"
register: nfs_process
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(nfs_group_name, [])
-- name: check for a tcmu-runner
- command: "pgrep tcmu-runner"
+- name: Check for a tcmu-runner
+ ansible.builtin.command: "pgrep tcmu-runner"
register: ceph_tcmu_runner_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a rbd-target-api
- command: "pgrep rbd-target-api"
+- name: Check for a rbd-target-api
+ ansible.builtin.command: "pgrep rbd-target-api"
register: ceph_rbd_target_api_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a rbd-target-gw
- command: "pgrep name=rbd-target-gw"
+- name: Check for a rbd-target-gw
+ ansible.builtin.command: "pgrep name=rbd-target-gw"
register: ceph_rbd_target_gw_stat
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: check for a ceph-crash process
- command: pgrep ceph-crash
+- name: Check for a ceph-crash process
+ ansible.builtin.command: pgrep ceph-crash
changed_when: false
failed_when: false
- check_mode: no
+ check_mode: false
register: crash_process
when:
- inventory_hostname in groups.get(mon_group_name, [])
or inventory_hostname in groups.get(osd_group_name, [])
or inventory_hostname in groups.get(mds_group_name, [])
or inventory_hostname in groups.get(rgw_group_name, [])
- or inventory_hostname in groups.get(rbdmirror_group_name, [])
\ No newline at end of file
+ or inventory_hostname in groups.get(rbdmirror_group_name, [])
---
-- name: set _crash_handler_called before restart
- set_fact:
- _crash_handler_called: True
+- name: Set _crash_handler_called before restart
+ ansible.builtin.set_fact:
+ _crash_handler_called: true
-- name: restart the ceph-crash service
- systemd:
+- name: Restart the ceph-crash service # noqa: ignore-errors
+ ansible.builtin.systemd:
name: ceph-crash@{{ ansible_facts['hostname'] }}
state: restarted
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
ignore_errors: true
when: hostvars[inventory_hostname]['_crash_handler_called'] | default(False) | bool
-- name: set _crash_handler_called after restart
- set_fact:
- _crash_handler_called: False
+- name: Set _crash_handler_called after restart
+ ansible.builtin.set_fact:
+ _crash_handler_called: false
---
-- name: set _mds_handler_called before restart
- set_fact:
- _mds_handler_called: True
+- name: Set _mds_handler_called before restart
+ ansible.builtin.set_fact:
+ _mds_handler_called: true
-- name: copy mds restart script
- template:
+- name: Copy mds restart script
+ ansible.builtin.template:
src: restart_mds_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_mds_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph mds daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh
+- name: Restart ceph mds daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mds_daemon.sh
when:
- hostvars[item]['handler_mds_status'] | default(False) | bool
- hostvars[item]['_mds_handler_called'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[mds_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _mds_handler_called after restart
- set_fact:
- _mds_handler_called: False
+- name: Set _mds_handler_called after restart
+ ansible.builtin.set_fact:
+ _mds_handler_called: false
---
-- name: set _mgr_handler_called before restart
- set_fact:
- _mgr_handler_called: True
+- name: Set _mgr_handler_called before restart
+ ansible.builtin.set_fact:
+ _mgr_handler_called: true
-- name: copy mgr restart script
- template:
+- name: Copy mgr restart script
+ ansible.builtin.template:
src: restart_mgr_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_mgr_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph mgr daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh
+- name: Restart ceph mgr daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mgr_daemon.sh
when:
- hostvars[item]['handler_mgr_status'] | default(False) | bool
- hostvars[item]['_mgr_handler_called'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[mgr_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _mgr_handler_called after restart
- set_fact:
- _mgr_handler_called: False
+- name: Set _mgr_handler_called after restart
+ ansible.builtin.set_fact:
+ _mgr_handler_called: false
# We only want to restart on hosts that have called the handler.
# This var is set when he handler is called, and unset after the
# restart to ensure only the correct hosts are restarted.
-- name: set _mon_handler_called before restart
- set_fact:
- _mon_handler_called: True
+- name: Set _mon_handler_called before restart
+ ansible.builtin.set_fact:
+ _mon_handler_called: true
-- name: copy mon restart script
- template:
+- name: Copy mon restart script
+ ansible.builtin.template:
src: restart_mon_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_mon_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph mon daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh
+- name: Restart ceph mon daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_mon_daemon.sh
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- hostvars[item]['handler_mon_status'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[mon_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _mon_handler_called after restart
- set_fact:
- _mon_handler_called: False
+- name: Set _mon_handler_called after restart
+ ansible.builtin.set_fact:
+ _mon_handler_called: false
---
-- name: set _nfs_handler_called before restart
- set_fact:
- _nfs_handler_called: True
+- name: Set _nfs_handler_called before restart
+ ansible.builtin.set_fact:
+ _nfs_handler_called: true
-- name: copy nfs restart script
- template:
+- name: Copy nfs restart script
+ ansible.builtin.template:
src: restart_nfs_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_nfs_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph nfs daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
+- name: Restart ceph nfs daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_nfs_daemon.sh
when:
- hostvars[item]['handler_nfs_status'] | default(False) | bool
- hostvars[item]['_nfs_handler_called'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[nfs_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _nfs_handler_called after restart
- set_fact:
- _nfs_handler_called: False
+- name: Set _nfs_handler_called after restart
+ ansible.builtin.set_fact:
+ _nfs_handler_called: false
---
-- name: set_fact trigger_restart
- set_fact:
+- name: Set_fact trigger_restart
+ ansible.builtin.set_fact:
trigger_restart: true
loop: "{{ groups[osd_group_name] }}"
when: hostvars[item]['handler_osd_status'] | default(False) | bool
run_once: true
-- name: osd handler
+- name: Osd handler
when: trigger_restart | default(False) | bool
block:
- - name: set _osd_handler_called before restart
- set_fact:
- _osd_handler_called: True
+ - name: Set _osd_handler_called before restart
+ ansible.builtin.set_fact:
+ _osd_handler_called: true
- - name: unset noup flag
+ - name: Unset noup flag
ceph_osd_flag:
name: noup
cluster: "{{ cluster }}"
# This does not need to run during a rolling update as the playbook will
# restart all OSDs using the tasks "start ceph osd" or
# "restart containerized ceph osd"
- - name: copy osd restart script
- template:
+ - name: Copy osd restart script
+ ansible.builtin.template:
src: restart_osd_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_osd_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
- - name: get pool list
- command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
+ - name: Get pool list
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} osd pool ls detail -f json"
register: pool_list
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
run_once: true
changed_when: false
check_mode: false
- - name: get balancer module status
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
+ - name: Get balancer module status
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer status -f json"
register: balancer_status
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
check_mode: false
- - name: set_fact pools_pgautoscaler_mode
- set_fact:
+ - name: Set_fact pools_pgautoscaler_mode
+ ansible.builtin.set_fact:
pools_pgautoscaler_mode: "{{ pools_pgautoscaler_mode | default([]) | union([{'name': item.pool_name, 'mode': item.pg_autoscale_mode}]) }}"
run_once: true
with_items: "{{ pool_list.stdout | default('{}') | from_json }}"
- - name: disable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
+ - name: Disable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer off"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when: (balancer_status.stdout | from_json)['active'] | bool
- - name: disable pg autoscale on pools
+ - name: Disable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: restart ceph osds daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh
+ - name: Restart ceph osds daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_osd_daemon.sh
when:
- hostvars[item]['handler_osd_status'] | default(False) | bool
- handler_health_osd_check | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[osd_group_name] | intersect(ansible_play_batch) }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
- - name: set _osd_handler_called after restart
- set_fact:
- _osd_handler_called: False
+ - name: Set _osd_handler_called after restart
+ ansible.builtin.set_fact:
+ _osd_handler_called: false
- - name: re-enable pg autoscale on pools
+ - name: Re-enable pg autoscale on pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: re-enable balancer
- command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
+ - name: Re-enable balancer
+ ansible.builtin.command: "{{ ceph_cmd }} --cluster {{ cluster }} balancer on"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
---
-- name: set _rbd_target_api_handler_called before restart
- set_fact:
- _rbd_target_api_handler_called: True
+- name: Set _rbd_target_api_handler_called before restart
+ ansible.builtin.set_fact:
+ _rbd_target_api_handler_called: true
-- name: restart rbd-target-api
- service:
+- name: Restart rbd-target-api
+ ansible.builtin.service:
name: rbd-target-api
state: restarted
when:
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ run_once: true
-- name: set _rbd_target_api_handler_called after restart
- set_fact:
- _rbd_target_api_handler_called: False
+- name: Set _rbd_target_api_handler_called after restart
+ ansible.builtin.set_fact:
+ _rbd_target_api_handler_called: false
-- name: set _rbd_target_gw_handler_called before restart
- set_fact:
- _rbd_target_gw_handler_called: True
+- name: Set _rbd_target_gw_handler_called before restart
+ ansible.builtin.set_fact:
+ _rbd_target_gw_handler_called: true
-- name: restart rbd-target-gw
- service:
+- name: Restart rbd-target-gw
+ ansible.builtin.service:
name: rbd-target-gw
state: restarted
when:
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ run_once: true
-- name: set _rbd_target_gw_handler_called after restart
- set_fact:
- _rbd_target_gw_handler_called: False
+- name: Set _rbd_target_gw_handler_called after restart
+ ansible.builtin.set_fact:
+ _rbd_target_gw_handler_called: false
---
-- name: set _rbdmirror_handler_called before restart
- set_fact:
- _rbdmirror_handler_called: True
+- name: Set _rbdmirror_handler_called before restart
+ ansible.builtin.set_fact:
+ _rbdmirror_handler_called: true
-- name: copy rbd mirror restart script
- template:
+- name: Copy rbd mirror restart script
+ ansible.builtin.template:
src: restart_rbd_mirror_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_rbd_mirror_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph rbd mirror daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh
+- name: Restart ceph rbd mirror daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rbd_mirror_daemon.sh
when:
- hostvars[item]['handler_rbd_mirror_status'] | default(False) | bool
- hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[rbdmirror_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _rbdmirror_handler_called after restart
- set_fact:
- _rbdmirror_handler_called: False
+- name: Set _rbdmirror_handler_called after restart
+ ansible.builtin.set_fact:
+ _rbdmirror_handler_called: false
---
-- name: set _rgw_handler_called before restart
- set_fact:
- _rgw_handler_called: True
+- name: Set _rgw_handler_called before restart
+ ansible.builtin.set_fact:
+ _rgw_handler_called: true
-- name: copy rgw restart script
- template:
+- name: Copy rgw restart script
+ ansible.builtin.template:
src: restart_rgw_daemon.sh.j2
dest: "{{ tmpdirpath.path }}/restart_rgw_daemon.sh"
owner: root
group: root
- mode: 0750
+ mode: "0750"
when: tmpdirpath.path is defined
-- name: restart ceph rgw daemon(s)
- command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh
+- name: Restart ceph rgw daemon(s)
+ ansible.builtin.command: /usr/bin/env bash {{ hostvars[item]['tmpdirpath']['path'] }}/restart_rgw_daemon.sh
when:
- hostvars[item]['handler_rgw_status'] | default(False) | bool
- hostvars[item]['_rgw_handler_called'] | default(False) | bool
- hostvars[item].tmpdirpath.path is defined
with_items: "{{ groups[rgw_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ changed_when: false
+ run_once: true
-- name: set _rgw_handler_called after restart
- set_fact:
- _rgw_handler_called: False
+- name: Set _rgw_handler_called after restart
+ ansible.builtin.set_fact:
+ _rgw_handler_called: false
---
-- name: set _tcmu_runner_handler_called before restart
- set_fact:
- _tcmu_runner_handler_called: True
+- name: Set _tcmu_runner_handler_called before restart
+ ansible.builtin.set_fact:
+ _tcmu_runner_handler_called: true
-- name: restart tcmu-runner
- service:
+- name: Restart tcmu-runner
+ ansible.builtin.service:
name: tcmu-runner
state: restarted
when:
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
- run_once: True
+ run_once: true
-- name: set _tcmu_runner_handler_called after restart
- set_fact:
- _tcmu_runner_handler_called: False
+- name: Set _tcmu_runner_handler_called after restart
+ ansible.builtin.set_fact:
+ _tcmu_runner_handler_called: false
---
-- name: include check_running_cluster.yml
- include: check_running_cluster.yml
+- name: Include check_running_cluster.yml
+ ansible.builtin.include_tasks: check_running_cluster.yml
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
-- name: set_fact handler_mon_status
- set_fact:
+- name: Set_fact handler_mon_status
+ ansible.builtin.set_fact:
handler_mon_status: "{{ 0 in (mon_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mon_container_stat.get('rc') == 0 and ceph_mon_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(mon_group_name, [])
-- name: set_fact handler_osd_status
- set_fact:
+- name: Set_fact handler_osd_status
+ ansible.builtin.set_fact:
handler_osd_status: "{{ 0 in (osd_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_osd_container_stat.get('rc') == 0 and ceph_osd_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(osd_group_name, [])
-- name: set_fact handler_mds_status
- set_fact:
+- name: Set_fact handler_mds_status
+ ansible.builtin.set_fact:
handler_mds_status: "{{ 0 in (mds_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mds_container_stat.get('rc') == 0 and ceph_mds_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(mds_group_name, [])
-- name: set_fact handler_rgw_status
- set_fact:
+- name: Set_fact handler_rgw_status
+ ansible.builtin.set_fact:
handler_rgw_status: "{{ 0 in (rgw_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rgw_container_stat.get('rc') == 0 and ceph_rgw_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(rgw_group_name, [])
-- name: set_fact handler_nfs_status
- set_fact:
+- name: Set_fact handler_nfs_status
+ ansible.builtin.set_fact:
handler_nfs_status: "{{ (nfs_process.get('rc') == 0) if not containerized_deployment | bool else (ceph_nfs_container_stat.get('rc') == 0 and ceph_nfs_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(nfs_group_name, [])
-- name: set_fact handler_rbd_status
- set_fact:
+- name: Set_fact handler_rbd_status
+ ansible.builtin.set_fact:
handler_rbd_mirror_status: "{{ 0 in (rbd_mirror_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_rbd_mirror_container_stat.get('rc') == 0 and ceph_rbd_mirror_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
-- name: set_fact handler_mgr_status
- set_fact:
+- name: Set_fact handler_mgr_status
+ ansible.builtin.set_fact:
handler_mgr_status: "{{ 0 in (mgr_socket.results | map(attribute='rc') | list) if not containerized_deployment | bool else (ceph_mgr_container_stat.get('rc') == 0 and ceph_mgr_container_stat.get('stdout_lines', []) | length != 0) }}"
when: inventory_hostname in groups.get(mgr_group_name, [])
-- name: set_fact handler_crash_status
- set_fact:
+- name: Set_fact handler_crash_status
+ ansible.builtin.set_fact:
handler_crash_status: "{{ crash_process.get('rc') == 0 if not containerized_deployment | bool else (ceph_crash_container_stat.get('rc') == 0 and ceph_crash_container_stat.get('stdout_lines', []) | length != 0) }}"
when:
- inventory_hostname in groups.get(mon_group_name, [])
---
-- name: disable ntpd
+- name: Disable ntpd
failed_when: false
- service:
+ ansible.builtin.service:
name: '{{ ntp_service_name }}'
state: stopped
- enabled: no
+ enabled: false
-- name: disable chronyd
+- name: Disable chronyd
failed_when: false
- service:
+ ansible.builtin.service:
name: '{{ chrony_daemon_name }}'
- enabled: no
+ enabled: false
state: stopped
-- name: disable timesyncd
+- name: Disable timesyncd
failed_when: false
- service:
+ ansible.builtin.service:
name: timesyncd
- enabled: no
+ enabled: false
state: stopped
author: Guillaume Abrioux
description: Handles ceph infra requirements (ntp, firewall, ...)
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: check firewalld installation on redhat or SUSE/openSUSE
- command: rpm -q firewalld # noqa [303]
+- name: Check firewalld installation on redhat or SUSE/openSUSE
+ ansible.builtin.command: rpm -q firewalld # noqa command-instead-of-module
register: firewalld_pkg_query
ignore_errors: true
- check_mode: no
+ check_mode: false
changed_when: false
tags: firewall
-- when: (firewalld_pkg_query.get('rc', 1) == 0
- or is_atomic | bool)
+- name: Configuring firewalld
+ when: (firewalld_pkg_query.get('rc', 1) == 0
+ or is_atomic | bool)
tags: firewall
block:
- - name: install firewalld python binding
- package:
+ - name: Install firewalld python binding
+ ansible.builtin.package:
name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
tags: with_pkg
when: not is_atomic | bool
- - name: start firewalld
- service:
+ - name: Start firewalld
+ ansible.builtin.service:
name: firewalld
state: started
- enabled: yes
+ enabled: true
register: result
retries: 5
delay: 3
until: result is succeeded
- - name: open ceph networks on monitor
- firewalld:
+ - name: Open ceph networks on monitor
+ ansible.posix.firewalld:
zone: "{{ ceph_mon_firewall_zone }}"
source: "{{ item }}"
permanent: true
- mon_group_name is defined
- mon_group_name in group_names
- - name: open ceph networks on manager when collocated
- firewalld:
+ - name: Open ceph networks on manager when collocated
+ ansible.posix.firewalld:
zone: "{{ ceph_mgr_firewall_zone }}"
source: "{{ item }}"
permanent: true
- mon_group_name in group_names
- mgr_group_name | length == 0
- - name: open monitor and manager ports
- firewalld:
+ - name: Open monitor and manager ports
+ ansible.posix.firewalld:
service: "{{ item.service }}"
zone: "{{ item.zone }}"
permanent: true
- mon_group_name is defined
- mon_group_name in group_names
- - name: open ceph networks on manager when dedicated
- firewalld:
+ - name: Open ceph networks on manager when dedicated
+ ansible.posix.firewalld:
zone: "{{ ceph_mgr_firewall_zone }}"
source: "{{ item }}"
permanent: true
- mgr_group_name in group_names
- mgr_group_name | length > 0
- - name: open manager ports
- firewalld:
+ - name: Open manager ports
+ ansible.posix.firewalld:
service: ceph
zone: "{{ ceph_mgr_firewall_zone }}"
permanent: true
- mgr_group_name is defined
- mgr_group_name in group_names
- - name: open ceph networks on osd
- firewalld:
+ - name: Open ceph networks on osd
+ ansible.posix.firewalld:
zone: "{{ ceph_osd_firewall_zone }}"
source: "{{ item }}"
permanent: true
- osd_group_name is defined
- osd_group_name in group_names
- - name: open osd ports
- firewalld:
+ - name: Open osd ports
+ ansible.posix.firewalld:
service: ceph
zone: "{{ ceph_osd_firewall_zone }}"
permanent: true
- osd_group_name is defined
- osd_group_name in group_names
- - name: open ceph networks on rgw
- firewalld:
+ - name: Open ceph networks on rgw
+ ansible.posix.firewalld:
zone: "{{ ceph_rgw_firewall_zone }}"
source: "{{ item }}"
permanent: true
- rgw_group_name is defined
- rgw_group_name in group_names
- - name: open rgw ports
- firewalld:
+ - name: Open rgw ports
+ ansible.posix.firewalld:
port: "{{ item.radosgw_frontend_port }}/tcp"
zone: "{{ ceph_rgw_firewall_zone }}"
permanent: true
- rgw_group_name is defined
- rgw_group_name in group_names
- - name: open ceph networks on mds
- firewalld:
+ - name: Open ceph networks on mds
+ ansible.posix.firewalld:
zone: "{{ ceph_mds_firewall_zone }}"
source: "{{ item }}"
permanent: true
- mds_group_name is defined
- mds_group_name in group_names
- - name: open mds ports
- firewalld:
+ - name: Open mds ports
+ ansible.posix.firewalld:
service: ceph
zone: "{{ ceph_mds_firewall_zone }}"
permanent: true
- mds_group_name is defined
- mds_group_name in group_names
- - name: open ceph networks on nfs
- firewalld:
+ - name: Open ceph networks on nfs
+ ansible.posix.firewalld:
zone: "{{ ceph_nfs_firewall_zone }}"
source: "{{ item }}"
permanent: true
- nfs_group_name is defined
- nfs_group_name in group_names
- - name: open nfs ports
- firewalld:
+ - name: Open nfs ports
+ ansible.posix.firewalld:
service: nfs
zone: "{{ ceph_nfs_firewall_zone }}"
permanent: true
- nfs_group_name is defined
- nfs_group_name in group_names
- - name: open nfs ports (portmapper)
- firewalld:
+ - name: Open nfs ports (portmapper)
+ ansible.posix.firewalld:
port: "111/tcp"
zone: "{{ ceph_nfs_firewall_zone }}"
permanent: true
- nfs_group_name is defined
- nfs_group_name in group_names
- - name: open ceph networks on rbdmirror
- firewalld:
+ - name: Open ceph networks on rbdmirror
+ ansible.posix.firewalld:
zone: "{{ ceph_rbdmirror_firewall_zone }}"
source: "{{ item }}"
permanent: true
- rbdmirror_group_name is defined
- rbdmirror_group_name in group_names
- - name: open rbdmirror ports
- firewalld:
+ - name: Open rbdmirror ports
+ ansible.posix.firewalld:
service: ceph
zone: "{{ ceph_rbdmirror_firewall_zone }}"
permanent: true
- rbdmirror_group_name is defined
- rbdmirror_group_name in group_names
- - name: open ceph networks on iscsi
- firewalld:
+ - name: Open ceph networks on iscsi
+ ansible.posix.firewalld:
zone: "{{ ceph_iscsi_firewall_zone }}"
source: "{{ item }}"
permanent: true
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- - name: open iscsi target ports
- firewalld:
+ - name: Open iscsi target ports
+ ansible.posix.firewalld:
port: "3260/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- - name: open iscsi api ports
- firewalld:
+ - name: Open iscsi api ports
+ ansible.posix.firewalld:
port: "{{ api_port | default(5000) }}/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- - name: open iscsi/prometheus port
- firewalld:
+ - name: Open iscsi/prometheus port
+ ansible.posix.firewalld:
port: "9287/tcp"
zone: "{{ ceph_iscsi_firewall_zone }}"
permanent: true
- iscsi_gw_group_name is defined
- iscsi_gw_group_name in group_names
- - name: open dashboard ports
- include_tasks: dashboard_firewall.yml
+ - name: Open dashboard ports
+ ansible.builtin.include_tasks: dashboard_firewall.yml
when: dashboard_enabled | bool
- - name: open ceph networks on haproxy
- firewalld:
+ - name: Open ceph networks on haproxy
+ ansible.posix.firewalld:
zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
source: "{{ item }}"
permanent: true
- rgwloadbalancer_group_name is defined
- rgwloadbalancer_group_name in group_names
- - name: open haproxy ports
- firewalld:
+ - name: Open haproxy ports
+ ansible.posix.firewalld:
port: "{{ haproxy_frontend_port | default(80) }}/tcp"
zone: "{{ ceph_rgwloadbalancer_firewall_zone }}"
permanent: true
- rgwloadbalancer_group_name is defined
- rgwloadbalancer_group_name in group_names
- - name: add rich rule for keepalived vrrp
- firewalld:
+ - name: Add rich rule for keepalived vrrp
+ ansible.posix.firewalld:
rich_rule: 'rule protocol value="vrrp" accept'
permanent: true
immediate: true
---
-- name: open node_exporter port
- firewalld:
+- name: Open node_exporter port
+ ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
-- block:
- - name: open dashboard port
- firewalld:
+- name: Open dashboard port in firewalld
+ when:
+ - mgr_group_name is defined
+ - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or
+ (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names)
+ block:
+ - name: Open dashboard port
+ ansible.posix.firewalld:
port: "{{ dashboard_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
- - name: open mgr/prometheus port
- firewalld:
+ - name: Open mgr/prometheus port
+ ansible.posix.firewalld:
port: "9283/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
- when:
- - mgr_group_name is defined
- - (groups.get(mgr_group_name,[]) | length > 0 and mgr_group_name in group_names) or
- (groups.get(mgr_group_name,[]) | length == 0 and mon_group_name in group_names)
-- block:
- - name: open grafana port
- firewalld:
+- name: Open monitoring stack tcp ports in firewalld
+ when:
+ - monitoring_group_name is defined
+ - monitoring_group_name in group_names
+ block:
+ - name: Open grafana port
+ ansible.posix.firewalld:
port: "{{ grafana_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
- - name: open prometheus port
- firewalld:
+ - name: Open prometheus port
+ ansible.posix.firewalld:
port: "{{ prometheus_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
- - name: open alertmanager port
- firewalld:
+ - name: Open alertmanager port
+ ansible.posix.firewalld:
port: "{{ alertmanager_port }}/tcp"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
immediate: true
state: enabled
- - name: open alertmanager cluster port
- firewalld:
+ - name: Open alertmanager cluster port
+ ansible.posix.firewalld:
port: "{{ alertmanager_cluster_port }}/{{ item }}"
zone: "{{ ceph_dashboard_firewall_zone }}"
permanent: true
with_items:
- "tcp"
- "udp"
- when:
- - monitoring_group_name is defined
- - monitoring_group_name in group_names
---
-- name: update cache for Debian based OSs
- apt:
- update_cache: yes
+- name: Update cache for Debian based OSs
+ ansible.builtin.apt:
+ update_cache: true
when: ansible_facts['os_family'] == "Debian"
register: result
until: result is succeeded
tags: package-install
-- name: include_tasks configure_firewall.yml
- include_tasks: configure_firewall.yml
+- name: Include_tasks configure_firewall.yml
+ ansible.builtin.include_tasks: configure_firewall.yml
when:
- configure_firewall | bool
- ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: configure_firewall
-- name: include_tasks setup_ntp.yml
- include_tasks: setup_ntp.yml
+- name: Include_tasks setup_ntp.yml
+ ansible.builtin.include_tasks: setup_ntp.yml
when: ntp_service_enabled | bool
tags: configure_ntp
-- name: ensure logrotate is installed
- package:
+- name: Ensure logrotate is installed
+ ansible.builtin.package:
name: logrotate
state: present
register: result
inventory_hostname in groups.get(rbdmirror_group_name, []) or
inventory_hostname in groups.get(iscsi_gw_group_name, [])
-- name: add logrotate configuration
- template:
+- name: Add logrotate configuration
+ ansible.builtin.template:
src: logrotate.conf.j2
dest: /etc/logrotate.d/ceph
mode: "0644"
inventory_hostname in groups.get(rgw_group_name, []) or
inventory_hostname in groups.get(mgr_group_name, []) or
inventory_hostname in groups.get(rbdmirror_group_name, []) or
- inventory_hostname in groups.get(iscsi_gw_group_name, [])
\ No newline at end of file
+ inventory_hostname in groups.get(iscsi_gw_group_name, [])
---
-- name: set ntp service and chrony daemon name for Debian family
- set_fact:
+- name: Set ntp service and chrony daemon name for Debian family
+ ansible.builtin.set_fact:
chrony_daemon_name: chrony
ntp_service_name: ntp
when: ansible_facts['os_family'] == 'Debian'
-- name: set ntp service and chrony daemon name for RedHat and Suse family
- set_fact:
+- name: Set ntp service and chrony daemon name for RedHat and Suse family
+ ansible.builtin.set_fact:
chrony_daemon_name: chronyd
ntp_service_name: ntpd
when: ansible_facts['os_family'] in ['RedHat', 'Suse']
# Installation of NTP daemons needs to be a separate task since installations
# can't happen on Atomic
-- name: install the ntp daemon
+- name: Install the ntp daemon
when: not is_atomic | bool
block:
- - name: install ntpd
- package:
+ - name: Install ntpd
+ ansible.builtin.package:
name: ntp
state: present
register: result
until: result is succeeded
when: ntp_daemon_type == "ntpd"
- - name: install chrony
- package:
+ - name: Install chrony
+ ansible.builtin.package:
name: chrony
state: present
register: result
until: result is succeeded
when: ntp_daemon_type == "chronyd"
-- name: enable the ntp daemon and disable the rest
+- name: Enable the ntp daemon and disable the rest
block:
- - name: enable timesyncing on timesyncd
- command: timedatectl set-ntp on
+ - name: Enable timesyncing on timesyncd
+ ansible.builtin.command: timedatectl set-ntp on
notify:
- - disable ntpd
- - disable chronyd
+ - Disable ntpd
+ - Disable chronyd
+ changed_when: false
when: ntp_daemon_type == "timesyncd"
- - name: disable time sync using timesyncd if we are not using it
- command: timedatectl set-ntp no
+ - name: Disable time sync using timesyncd if we are not using it
+ ansible.builtin.command: timedatectl set-ntp no
+ changed_when: false
when: ntp_daemon_type != "timesyncd"
- - name: enable ntpd
- service:
+ - name: Enable ntpd
+ ansible.builtin.service:
name: "{{ ntp_service_name }}"
- enabled: yes
+ enabled: true
state: started
notify:
- - disable chronyd
- - disable timesyncd
+ - Disable chronyd
+ - Disable timesyncd
when: ntp_daemon_type == "ntpd"
- - name: enable chronyd
- service:
+ - name: Enable chronyd
+ ansible.builtin.service:
name: "{{ chrony_daemon_name }}"
- enabled: yes
+ enabled: true
state: started
notify:
- - disable ntpd
- - disable timesyncd
+ - Disable ntpd
+ - Disable timesyncd
when: ntp_daemon_type == "chronyd"
# GENERAL #
###########
# Whether or not to generate secure certificate to iSCSI gateway nodes
-generate_crt: False
+generate_crt: false
iscsi_conf_overrides: {}
iscsi_pool_name: rbd
-#iscsi_pool_size: 3
+# iscsi_pool_size: 3
-copy_admin_key: True
+copy_admin_key: true
##################
# RBD-TARGET-API #
author: Paul Cuzner
description: Installs Ceph iSCSI Gateways
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: get keys from monitors
+- name: Get keys from monitors
ceph_key:
name: client.admin
cluster: "{{ cluster }}"
- copy_admin_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: copy ceph key(s) if needed
- copy:
+- name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
content: "{{ _admin_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- copy_admin_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: add mgr ip address to trusted list with dashboard - ipv4
- set_fact:
+- name: Add mgr ip address to trusted list with dashboard - ipv4
+ ansible.builtin.set_fact:
trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- ip_version == 'ipv4'
-- name: add mgr ip address to trusted list with dashboard - ipv6
- set_fact:
+- name: Add mgr ip address to trusted list with dashboard - ipv6
+ ansible.builtin.set_fact:
trusted_ip_list: '{{ trusted_ip_list | default("") }}{{ "," if trusted_ip_list is defined else "" }}{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- ip_version == 'ipv6'
-- name: deploy gateway settings, used by the ceph_iscsi_config modules
+- name: Deploy gateway settings, used by the ceph_iscsi_config modules
openstack.config_template.config_template:
src: "{{ role_path }}/templates/iscsi-gateway.cfg.j2"
dest: /etc/ceph/iscsi-gateway.cfg
config_type: ini
config_overrides: '{{ iscsi_conf_overrides }}'
mode: "0600"
- notify: restart ceph rbd-target-api-gw
+ notify: Restart ceph rbd-target-api-gw
-- name: set_fact container_exec_cmd
- set_fact:
+- name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment | bool
-- name: create iscsi pool
+- name: Create iscsi pool
ceph_pool:
name: "{{ iscsi_pool_name }}"
cluster: "{{ cluster }}"
---
-- name: create /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}
- file:
+- name: Create /var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}
+ ansible.builtin.file:
path: "/var/lib/ceph/iscsi.{{ ansible_facts['hostname'] }}"
state: directory
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: "{{ ceph_directories_mode }}"
-- name: create rbd target log directories
- file:
+- name: Create rbd target log directories
+ ansible.builtin.file:
path: '/var/log/{{ item }}'
state: directory
+ mode: "0755"
with_items:
- rbd-target-api
- rbd-target-gw
- tcmu-runner
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
- systemd:
+- name: Systemd start tcmu-runner, rbd-target-api and rbd-target-gw containers
+ ansible.builtin.systemd:
name: "{{ item }}"
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
with_items:
- tcmu-runner
- rbd-target-gw
---
-- name: create a temporary directory
- tempfile:
+- name: Create a temporary directory
+ ansible.builtin.tempfile:
state: directory
register: iscsi_ssl_tmp_dir
delegate_to: localhost
run_once: true
-- name: set_fact crt_files
- set_fact:
+- name: Set_fact crt_files
+ ansible.builtin.set_fact:
crt_files:
- "iscsi-gateway.crt"
- "iscsi-gateway.key"
- "iscsi-gateway.pem"
- "iscsi-gateway-pub.key"
-- name: check for existing crt file(s) in monitor key/value store
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
+- name: Check for existing crt file(s) in monitor key/value store
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config get iscsi/ssl/{{ item }}"
with_items: "{{ crt_files }}"
changed_when: false
- failed_when: false
+ failed_when: crt_files_exist.rc not in [0, 22]
run_once: true
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
register: crt_files_exist
-- name: set_fact crt_files_missing
- set_fact:
+- name: Set_fact crt_files_missing
+ ansible.builtin.set_fact:
crt_files_missing: "{{ crt_files_exist.results | selectattr('rc', 'equalto', 0) | map(attribute='rc') | list | length != crt_files | length }}"
-- name: generate ssl crt/key files
+- name: Generate ssl crt/key files
+ when: crt_files_missing
block:
- - name: create ssl crt/key files
- command: >
+ - name: Create ssl crt/key files
+ ansible.builtin.command: >
openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
-x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
-subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
delegate_to: localhost
- run_once: True
+ run_once: true
+ changed_when: false
with_items: "{{ crt_files_exist.results }}"
- - name: create pem
- shell: >
+ - name: Create pem # noqa: no-changed-when
+ ansible.builtin.shell: >
cat {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
{{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
delegate_to: localhost
- run_once: True
+ run_once: true
register: pem
with_items: "{{ crt_files_exist.results }}"
- - name: create public key from pem
- shell: >
+ - name: Create public key from pem
+ ansible.builtin.shell: >
openssl x509 -inform pem -in {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.pem
-pubkey -noout > {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway-pub.key
delegate_to: localhost
- run_once: True
+ run_once: true
when: pem.changed
tags: skip_ansible_lint
- - name: slurp ssl crt/key files
- slurp:
+ - name: Slurp ssl crt/key files
+ ansible.builtin.slurp:
src: "{{ iscsi_ssl_tmp_dir.path }}/{{ item }}"
register: iscsi_ssl_files_content
with_items: "{{ crt_files }}"
run_once: true
delegate_to: localhost
- - name: store ssl crt/key files
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
+ - name: Store ssl crt/key files
+ ansible.builtin.command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config-key put iscsi/ssl/{{ item.item }} {{ item.content }}"
run_once: true
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
with_items: "{{ iscsi_ssl_files_content.results }}"
- when: crt_files_missing
+ changed_when: false
-- name: copy crt file(s) to gateway nodes
- copy:
+- name: Copy crt file(s) to gateway nodes
+ ansible.builtin.copy:
content: "{{ item.stdout | b64decode }}"
dest: "/etc/ceph/{{ item.item }}"
owner: root
group: root
- mode: 0400
+ mode: "0400"
changed_when: false
with_items: "{{ crt_files_exist.results if not crt_files_missing else iscsi_ssl_files_content.results }}"
when: not crt_files_missing
-- name: clean temporary directory
- file:
+- name: Clean temporary directory
+ ansible.builtin.file:
path: "{{ iscsi_ssl_tmp_dir.path }}"
- state: absent
\ No newline at end of file
+ state: absent
---
-- name: include common.yml
- include_tasks: common.yml
+- name: Include common.yml
+ ansible.builtin.include_tasks: common.yml
-- name: include non-container/prerequisites.yml
- include_tasks: non-container/prerequisites.yml
+- name: Include non-container/prerequisites.yml
+ ansible.builtin.include_tasks: non-container/prerequisites.yml
when: not containerized_deployment | bool
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
-- name: include deploy_ssl_keys.yml
- include_tasks: deploy_ssl_keys.yml
+- name: Include deploy_ssl_keys.yml
+ ansible.builtin.include_tasks: deploy_ssl_keys.yml
when: generate_crt | bool
-- name: include non-container/configure_iscsi.yml
- include_tasks: non-container/configure_iscsi.yml
+- name: Include non-container/configure_iscsi.yml
+ ansible.builtin.include_tasks: non-container/configure_iscsi.yml
when:
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
-- name: include non-container/postrequisites.yml
- include_tasks: non-container/postrequisites.yml
+- name: Include non-container/postrequisites.yml
+ ansible.builtin.include_tasks: non-container/postrequisites.yml
when: not containerized_deployment | bool
-- name: include containerized.yml
- include_tasks: containerized.yml
+- name: Include containerized.yml
+ ansible.builtin.include_tasks: containerized.yml
when: containerized_deployment | bool
---
-- name: igw_gateway (tgt) | configure iscsi target (gateway)
+- name: Igw_gateway (tgt) | configure iscsi target (gateway)
igw_gateway:
mode: "target"
gateway_iqn: "{{ gateway_iqn }}"
gateway_ip_list: "{{ gateway_ip_list }}"
register: target
-- name: igw_lun | configure luns (create/map rbds and add to lio)
+- name: Igw_lun | configure luns (create/map rbds and add to lio)
igw_lun:
pool: "{{ item.pool }}"
image: "{{ item.image }}"
with_items: "{{ rbd_devices }}"
register: images
-- name: igw_gateway (map) | map luns to the iscsi target
+- name: Igw_gateway (map) | map luns to the iscsi target
igw_gateway:
mode: "map"
gateway_iqn: "{{ gateway_iqn }}"
gateway_ip_list: "{{ gateway_ip_list }}"
register: luns
-- name: igw_client | configure client connectivity
+- name: Igw_client | configure client connectivity
igw_client:
client_iqn: "{{ item.client }}"
image_list: "{{ item.image_list }}"
-- name: start rbd-target-api and rbd-target-gw
- service:
+- name: Start rbd-target-api and rbd-target-gw
+ ansible.builtin.systemd:
name: "{{ item }}"
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
with_items:
- rbd-target-api
- rbd-target-gw
---
-- name: red hat based systems tasks
+- name: Red hat based systems tasks
when: ansible_facts['os_family'] == 'RedHat'
block:
- - name: set_fact common_pkgs
- set_fact:
+ - name: Set_fact common_pkgs
+ ansible.builtin.set_fact:
common_pkgs:
- tcmu-runner
- targetcli
- - name: set_fact base iscsi pkgs if new style ceph-iscsi
- set_fact:
+ - name: Set_fact base iscsi pkgs if new style ceph-iscsi
+ ansible.builtin.set_fact:
iscsi_base:
- ceph-iscsi
when: use_new_ceph_iscsi | bool
- - name: set_fact base iscsi pkgs if using older ceph-iscsi-config
- set_fact:
+ - name: Set_fact base iscsi pkgs if using older ceph-iscsi-config
+ ansible.builtin.set_fact:
iscsi_base:
- ceph-iscsi-cli
- ceph-iscsi-config
when: not use_new_ceph_iscsi | bool
- - name: when ceph_iscsi_config_dev is true
+ - name: When ceph_iscsi_config_dev is true
when:
- ceph_origin == 'repository'
- ceph_repository in ['dev', 'community']
- ceph_iscsi_config_dev | bool
block:
- - name: ceph-iscsi dependency repositories
- get_url:
+ - name: Ceph-iscsi dependency repositories
+ ansible.builtin.get_url:
url: "https://shaman.ceph.com/api/repos/tcmu-runner/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo?arch={{ ansible_facts['architecture'] }}"
dest: '/etc/yum.repos.d/tcmu-runner-dev.repo'
force: true
+ mode: "0644"
register: result
until: result is succeeded
- - name: ceph-iscsi development repository
- get_url:
+ - name: Ceph-iscsi development repository
+ ansible.builtin.get_url:
url: "https://shaman.ceph.com/api/repos/{{ item }}/main/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
force: true
+ mode: "0644"
register: result
until: result is succeeded
with_items: '{{ iscsi_base }}'
when: ceph_repository == 'dev'
- - name: ceph-iscsi stable repository
- get_url:
+ - name: Ceph-iscsi stable repository
+ ansible.builtin.get_url:
url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
dest: /etc/yum.repos.d/ceph-iscsi.repo
force: true
+ mode: "0644"
register: result
until: result is succeeded
when: ceph_repository == 'community'
- - name: install ceph iscsi package
- package:
+ - name: Install ceph iscsi package
+ ansible.builtin.package:
name: "{{ common_pkgs + iscsi_base }}"
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
-- name: check the status of the target.service override
- stat:
+- name: Check the status of the target.service override
+ ansible.builtin.stat:
path: /etc/systemd/system/target.service
register: target
-- name: mask the target service - preventing manual start
- systemd:
+- name: Mask the target service - preventing manual start
+ ansible.builtin.systemd:
name: target
- masked: yes
- enabled: no
+ masked: true
+ enabled: false
when:
- target.stat.exists
- not target.stat.islnk
# We must start rbd-target-gw/api after configure_iscsi.yml to avoid
# races where they are both trying to setup the same object during
# a rolling update.
-- name: start tcmu-runner
- service:
+- name: Start tcmu-runner
+ ansible.builtin.systemd:
name: tcmu-runner
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
---
-- name: generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
- template:
+- name: Generate systemd unit files for tcmu-runner, rbd-target-api and rbd-target-gw
+ ansible.builtin.template:
src: "{{ role_path }}/templates/{{ item }}.service.j2"
dest: /etc/systemd/system/{{ item }}.service
owner: "root"
- rbd-target-gw
- rbd-target-api
notify:
- - restart ceph tcmu-runner
- - restart ceph rbd-target-api-gw
+ - Restart ceph tcmu-runner
+ - Restart ceph rbd-target-api-gw
# ceph_mds_systemd_overrides will override the systemd settings
# for the ceph-mds services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mds_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mds_systemd_overrides:
+# Service:
+# PrivateDevices: false
author: Sébastien Han
description: Installs Ceph Metadata
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create bootstrap-mds and mds directories
- file:
+- name: Create bootstrap-mds and mds directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- /var/lib/ceph/bootstrap-mds/
- /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}
-- name: get keys from monitors
+- name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: copy ceph key(s) if needed
- copy:
+- name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item.item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: create mds keyring
+- name: Create mds keyring
ceph_key:
name: "mds.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
---
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: enable ceph-mds.target
- service:
+- name: Enable ceph-mds.target
+ ansible.builtin.service:
name: ceph-mds.target
- enabled: yes
- daemon_reload: yes
+ enabled: true
+ daemon_reload: true
when: containerized_deployment | bool
-- name: systemd start mds container
- systemd:
+- name: Systemd start mds container
+ ansible.builtin.systemd:
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
-- name: wait for mds socket to exist
- command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
+- name: Wait for mds socket to exist
+ ansible.builtin.command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
changed_when: false
register: multi_mds_socket
retries: 5
---
-- import_role:
+- name: Import ceph-facts role
+ ansible.builtin.import_role:
name: ceph-facts
tasks_from: get_def_crush_rule_name.yml
-- name: create filesystem pools
+- name: Create filesystem pools
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: create ceph filesystem
+- name: Create ceph filesystem
ceph_fs:
name: "{{ cephfs }}"
cluster: "{{ cluster }}"
---
-- name: include create_mds_filesystems.yml
- include_tasks: create_mds_filesystems.yml
+- name: Include create_mds_filesystems.yml
+ ansible.builtin.include_tasks: create_mds_filesystems.yml
when:
- inventory_hostname == groups[mds_group_name] | first
- not rolling_update | bool
-- name: include common.yml
- include_tasks: common.yml
+- name: Include common.yml
+ ansible.builtin.include_tasks: common.yml
-- name: non_containerized.yml
- include_tasks: non_containerized.yml
+- name: Non_containerized.yml
+ ansible.builtin.include_tasks: non_containerized.yml
when: not containerized_deployment | bool
-- name: containerized.yml
- include_tasks: containerized.yml
+- name: Containerized.yml
+ ansible.builtin.include_tasks: containerized.yml
when: containerized_deployment | bool
---
-- name: install ceph mds for debian
- apt:
+- name: Install ceph mds for debian
+ ansible.builtin.apt:
name: ceph-mds
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
when:
- mds_group_name in group_names
register: result
until: result is succeeded
-- name: install ceph-mds package on redhat or SUSE/openSUSE
- package:
+- name: Install ceph-mds package on redhat or SUSE/openSUSE
+ ansible.builtin.package:
name: "ceph-mds"
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when:
- mds_group_name in group_names
- ansible_facts['os_family'] in ['Suse', 'RedHat']
-- name: ensure systemd service override directory exists
- file:
+- name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-mds@.service.d/"
+ mode: "0755"
when:
- ceph_mds_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: add ceph-mds systemd service overrides
+- name: Add ceph-mds systemd service overrides
openstack.config_template.config_template:
src: "ceph-mds.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-mds@.service.d/ceph-mds-systemd-overrides.conf"
- ceph_mds_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: start and add that the metadata service to the init sequence
- service:
+- name: Start and add that the metadata service to the init sequence
+ ansible.builtin.systemd:
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
changed_when: false
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-mds.service.j2"
dest: /etc/systemd/system/ceph-mds@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph mdss
+ notify: Restart ceph mdss
-- name: generate systemd ceph-mds target file
- copy:
+- name: Generate systemd ceph-mds target file
+ ansible.builtin.copy:
src: ceph-mds.target
dest: /etc/systemd/system/ceph-mds.target
- when: containerized_deployment | bool
\ No newline at end of file
+ mode: "0644"
+ when: containerized_deployment | bool
# ceph_mgr_systemd_overrides will override the systemd settings
# for the ceph-mgr services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mgr_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mgr_systemd_overrides:
+# Service:
+# PrivateDevices: false
author: Sébastien Han
description: Installs Ceph Manager
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create mgr directory
- file:
+- name: Create mgr directory
+ ansible.builtin.file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_directories_mode }}"
-- name: fetch ceph mgr keyring
+- name: Fetch ceph mgr keyring
ceph_key:
name: "mgr.{{ ansible_facts['hostname'] }}"
caps:
when: groups.get(mgr_group_name, []) | length == 0 # the key is present already since one of the mons created it in "create ceph mgr keyring(s)"
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: create and copy keyrings
+- name: Create and copy keyrings
when: groups.get(mgr_group_name, []) | length > 0
block:
- - name: create ceph mgr keyring(s) on a mon node
+ - name: Create ceph mgr keyring(s) on a mon node
ceph_key:
name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
caps:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ groups.get(mgr_group_name, []) }}"
- run_once: True
+ run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: set_fact _mgr_keys
- set_fact:
+ - name: Set_fact _mgr_keys
+ ansible.builtin.set_fact:
_mgr_keys:
- { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': "{{ copy_admin_key }}" }
- { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item.item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: set mgr key permissions
- file:
+- name: Set mgr key permissions
+ ansible.builtin.file:
path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when: cephx | bool
-- name: append dashboard modules to ceph_mgr_modules
- set_fact:
+- name: Append dashboard modules to ceph_mgr_modules
+ ansible.builtin.set_fact:
ceph_mgr_modules: "{{ ceph_mgr_modules | union(['dashboard', 'prometheus']) }}"
when: dashboard_enabled | bool
---
-- name: set_fact container_exec_cmd
- set_fact:
+- name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
run_once: true
when: containerized_deployment | bool
-- name: include common.yml
- include_tasks: common.yml
+- name: Include common.yml
+ ansible.builtin.include_tasks: common.yml
-- name: include pre_requisite.yml
- include_tasks: pre_requisite.yml
+- name: Include pre_requisite.yml
+ ansible.builtin.include_tasks: pre_requisite.yml
when: not containerized_deployment | bool
-- name: include start_mgr.yml
- include_tasks: start_mgr.yml
+- name: Include start_mgr.yml
+ ansible.builtin.include_tasks: start_mgr.yml
-- name: include mgr_modules.yml
- include_tasks: mgr_modules.yml
+- name: Include mgr_modules.yml
+ ansible.builtin.include_tasks: mgr_modules.yml
when:
- ceph_mgr_modules | length > 0
- ((groups[mgr_group_name] | default([]) | length == 0 and inventory_hostname == groups[mon_group_name] | last) or
---
-- name: wait for all mgr to be up
- command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
+- name: Wait for all mgr to be up
+ ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
retries: 30
delay: 5
- (mgr_dump.stdout | from_json).available | bool
when: not ansible_check_mode
-- name: get enabled modules from ceph-mgr
- command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
- check_mode: no
+- name: Get enabled modules from ceph-mgr
+ ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} --format json mgr module ls"
+ check_mode: false
changed_when: false
register: _ceph_mgr_modules
delegate_to: "{{ groups[mon_group_name][0] }}"
-- name: set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
- set_fact:
+- name: Set _ceph_mgr_modules fact (convert _ceph_mgr_modules.stdout to a dict)
+ ansible.builtin.set_fact:
_ceph_mgr_modules: "{{ _ceph_mgr_modules.get('stdout', '{}') | from_json }}"
-- name: set _disabled_ceph_mgr_modules fact
- set_fact:
+- name: Set _disabled_ceph_mgr_modules fact
+ ansible.builtin.set_fact:
_disabled_ceph_mgr_modules: "{% if _ceph_mgr_modules.disabled_modules | length == 0 %}[]{% elif _ceph_mgr_modules.disabled_modules[0] | type_debug != 'dict' %}{{ _ceph_mgr_modules['disabled_modules'] }}{% else %}{{ _ceph_mgr_modules['disabled_modules'] | map(attribute='name') | list }}{% endif %}"
-- name: disable ceph mgr enabled modules
+- name: Disable ceph mgr enabled modules
ceph_mgr_module:
name: "{{ item }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item not in ceph_mgr_modules
-- name: add modules to ceph-mgr
+- name: Add modules to ceph-mgr
ceph_mgr_module:
name: "{{ item }}"
cluster: "{{ cluster }}"
---
-- name: set_fact ceph_mgr_packages for sso
- set_fact:
+- name: Set_fact ceph_mgr_packages for sso
+ ansible.builtin.set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
when:
- dashboard_enabled | bool
- ansible_facts['distribution'] == 'RedHat'
-- name: set_fact ceph_mgr_packages for dashboard
- set_fact:
+- name: Set_fact ceph_mgr_packages for dashboard
+ ansible.builtin.set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-dashboard']) }}"
when: dashboard_enabled | bool
-- name: set_fact ceph_mgr_packages for non el7 distribution
- set_fact:
+- name: Set_fact ceph_mgr_packages for non el7 distribution
+ ansible.builtin.set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}"
when:
- ansible_facts['os_family'] != 'RedHat'
- ansible_facts['distribution_major_version'] | int != 7
-- name: enable crb repository
+- name: Enable crb repository
community.general.dnf_config_manager:
name: crb
state: enabled
- ansible_facts['os_family'] == 'RedHat'
- ansible_facts['distribution_major_version'] | int == 9
-- name: install ceph-mgr packages on RedHat or SUSE
- package:
+- name: Install ceph-mgr packages on RedHat or SUSE
+ ansible.builtin.package:
name: '{{ ceph_mgr_packages }}'
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when: ansible_facts['os_family'] in ['RedHat', 'Suse']
-- name: install ceph-mgr packages for debian
- apt:
+- name: Install ceph-mgr packages for debian
+ ansible.builtin.apt:
name: '{{ ceph_mgr_packages }}'
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded
---
-- name: ensure systemd service override directory exists
- file:
+- name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-mgr@.service.d/"
+ mode: "0755"
when:
- ceph_mgr_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: add ceph-mgr systemd service overrides
+- name: Add ceph-mgr systemd service overrides
openstack.config_template.config_template:
src: "ceph-mgr.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-mgr@.service.d/ceph-mgr-systemd-overrides.conf"
- ceph_mgr_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
when: containerized_deployment | bool
-- name: enable ceph-mgr.target
- service:
+- name: Enable ceph-mgr.target
+ ansible.builtin.service:
name: ceph-mgr.target
- enabled: yes
- daemon_reload: yes
+ enabled: true
+ daemon_reload: true
when: containerized_deployment | bool
-- name: systemd start mgr
- systemd:
+- name: Systemd start mgr
+ ansible.builtin.systemd:
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-mgr.service.j2"
dest: /etc/systemd/system/ceph-mgr@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph mgrs
+ notify: Restart ceph mgrs
-- name: generate systemd ceph-mgr target file
- copy:
+- name: Generate systemd ceph-mgr target file
+ ansible.builtin.copy:
src: ceph-mgr.target
dest: /etc/systemd/system/ceph-mgr.target
- when: containerized_deployment | bool
\ No newline at end of file
+ mode: "0644"
+ when: containerized_deployment | bool
# ceph_mon_systemd_overrides will override the systemd settings
# for the ceph-mon services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_mon_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_mon_systemd_overrides:
+# Service:
+# PrivateDevices: false
author: Sébastien Han
description: Installs Ceph Monitor
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: waiting for the monitor(s) to form the quorum...
- command: >
+- name: Waiting for the monitor(s) to form the quorum...
+ ansible.builtin.command: >
{{ container_exec_cmd }}
ceph
--cluster {{ cluster }}
changed_when: false
when: not ansible_check_mode
-- name: fetch ceph initial keys
+- name: Fetch ceph initial keys
ceph_key:
state: fetch_initial_keys
cluster: "{{ cluster }}"
---
-- name: cephx related tasks
+- name: Cephx related tasks
when: cephx | bool
block:
- - name: check if monitor initial keyring already exists
+ - name: Check if monitor initial keyring already exists
ceph_key:
name: mon.
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: initial_mon_key
- run_once: True
+ run_once: true
delegate_to: "{{ running_mon }}"
failed_when: initial_mon_key.rc not in [0, 2]
no_log: "{{ no_log_on_ceph_key_tasks }}"
when: running_mon is defined
- - name: generate monitor initial keyring
+ - name: Generate monitor initial keyring
ceph_key:
state: generate_secret
register: monitor_keyring
or
initial_mon_key is not succeeded
- - name: set_fact _initial_mon_key_success
- set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success.
+ - name: Set_fact _initial_mon_key_success
+ ansible.builtin.set_fact: # when initial_mon_key is registered above, `rc: 2` is considered success.
_initial_mon_key_success: "{{ initial_mon_key is not skipped and initial_mon_key.rc == 0 }}"
- - name: get initial keyring when it already exists
- set_fact:
+ - name: Get initial keyring when it already exists
+ ansible.builtin.set_fact:
monitor_keyring: "{{ (initial_mon_key.stdout | from_json)[0]['key'] if _initial_mon_key_success | bool else monitor_keyring.stdout }}"
when: initial_mon_key.stdout|default('')|length > 0 or monitor_keyring is not skipped
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: create monitor initial keyring
+ - name: Create monitor initial keyring
ceph_key:
name: mon.
dest: "/var/lib/ceph/tmp/"
cluster: "{{ cluster }}"
caps:
mon: allow *
- import_key: False
+ import_key: false
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "0400"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- - name: copy the initial key in /etc/ceph (for containers)
- copy:
+ - name: Copy the initial key in /etc/ceph (for containers)
+ ansible.builtin.copy:
src: /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
dest: /etc/ceph/{{ cluster }}.mon.keyring
remote_src: true
+ mode: "0640"
when: containerized_deployment | bool
-- name: create monitor directory
- file:
+- name: Create monitor directory
+ ansible.builtin.file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
#
# This is only needed when upgrading from older versions of Ceph that used to
# run as `root` (https://github.com/ceph/ceph-ansible/issues/1635).
-- name: recursively fix ownership of monitor directory
- file:
+- name: Recursively fix ownership of monitor directory
+ ansible.builtin.file:
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
recurse: true
-- name: create admin keyring
+- name: Create admin keyring
ceph_authtool:
name: client.admin
path: /etc/ceph/ceph.client.admin.keyring
mgr: allow *
osd: allow *
mds: allow *
- create_keyring: True
+ create_keyring: true
gen_key: "{{ True if admin_secret == 'admin_secret' else omit }}"
add_key: "{{ admin_secret if admin_secret != 'admin_secret' else omit }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- cephx | bool
-- name: slurp admin keyring
- slurp:
+- name: Slurp admin keyring
+ ansible.builtin.slurp:
src: "/etc/ceph/{{ cluster }}.client.admin.keyring"
delegate_to: "{{ groups[mon_group_name][0] }}"
- run_once: True
+ run_once: true
register: admin_keyring
-- name: copy admin keyring over to mons
- copy:
+- name: Copy admin keyring over to mons
+ ansible.builtin.copy:
dest: "{{ admin_keyring.source }}"
content: "{{ admin_keyring.content | b64decode }}"
owner: "{{ ceph_uid }}"
delegate_to: "{{ item }}"
loop: "{{ groups[mon_group_name] }}"
-- name: import admin keyring into mon keyring
+- name: Import admin keyring into mon keyring
ceph_authtool:
path: "/var/lib/ceph/tmp/{{ cluster }}.mon..keyring"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- no_log: False
+ no_log: false
# no_log: "{{ no_log_on_ceph_key_tasks }}"
when:
- cephx | bool
-- name: set_fact ceph-mon container command
- set_fact:
- ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}"
+- name: Set_fact ceph-mon container command
+ ansible.builtin.set_fact:
+ ceph_mon_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=ceph-mon ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'ceph-mon' }}"
-- name: set_fact monmaptool container command
- set_fact:
- ceph_monmaptool_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=monmaptool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' +ceph_client_docker_image_tag if containerized_deployment | bool else 'monmaptool' }}"
+- name: Set_fact monmaptool container command
+ ansible.builtin.set_fact:
+ ceph_monmaptool_cmd: "{{ container_binary + ' run --rm --net=host -v /var/lib/ceph/:/var/lib/ceph:z -v /etc/ceph/:/etc/ceph/:z --entrypoint=monmaptool ' + ceph_client_docker_registry + '/' + ceph_client_docker_image + ':' + ceph_client_docker_image_tag if containerized_deployment | bool else 'monmaptool' }}"
-- name: generate initial monmap
- command: >
+- name: Generate initial monmap
+ ansible.builtin.command: >
{{ ceph_monmaptool_cmd }}
--create
{% for host in _monitor_addresses -%}
args:
creates: /etc/ceph/monmap
-#[v2:192.168.17.10:3300,v1:192.168.17.10:6789]
-
-- name: ceph monitor mkfs with keyring
- command: >
+- name: Ceph monitor mkfs with keyring
+ ansible.builtin.command: >
{{ ceph_mon_cmd }}
--cluster {{ cluster }}
--setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when: cephx | bool
-- name: ceph monitor mkfs without keyring
- command: >
+- name: Ceph monitor mkfs without keyring
+ ansible.builtin.command: >
{{ ceph_mon_cmd }}
--cluster {{ cluster }}
--setuser "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
---
-- name: set_fact container_exec_cmd
- set_fact:
+- name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
-- name: include deploy_monitors.yml
- include_tasks: deploy_monitors.yml
+- name: Include deploy_monitors.yml
+ ansible.builtin.include_tasks: deploy_monitors.yml
when:
# we test for both container and non-container
- (mon_socket is defined and mon_socket.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
- not switch_to_containers | default(False) | bool
-- name: include start_monitor.yml
- include_tasks: start_monitor.yml
+- name: Include start_monitor.yml
+ ansible.builtin.include_tasks: start_monitor.yml
-- name: include_tasks ceph_keys.yml
- include_tasks: ceph_keys.yml
+- name: Include_tasks ceph_keys.yml
+ ansible.builtin.include_tasks: ceph_keys.yml
when: not switch_to_containers | default(False) | bool
-- name: include secure_cluster.yml
- include_tasks: secure_cluster.yml
+- name: Include secure_cluster.yml
+ ansible.builtin.include_tasks: secure_cluster.yml
when:
- secure_cluster | bool
- inventory_hostname == groups[mon_group_name] | first
---
-- name: collect all the pools
- command: >
+- name: Collect all the pools
+ ansible.builtin.command: >
{{ container_exec_cmd }} rados --cluster {{ cluster }} lspools
changed_when: false
register: ceph_pools
- check_mode: no
+ check_mode: false
-- name: secure the cluster
- command: >
+- name: Secure the cluster
+ ansible.builtin.command: >
{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
changed_when: false
with_nested:
- - "{{ ceph_pools.stdout_lines|default([]) }}"
+ - "{{ ceph_pools.stdout_lines | default([]) }}"
- "{{ secure_cluster_flags }}"
---
-- name: ensure systemd service override directory exists
- file:
+- name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-mon@.service.d/"
+ mode: "0755"
when:
- not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: add ceph-mon systemd service overrides
+- name: Add ceph-mon systemd service overrides
openstack.config_template.config_template:
src: "ceph-mon.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-mon@.service.d/ceph-mon-systemd-overrides.conf"
- ceph_mon_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
when: containerized_deployment | bool
-- name: start the monitor service
- systemd:
+- name: Start the monitor service
+ ansible.builtin.systemd:
name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
---
-- name: generate systemd unit file for mon container
- template:
+- name: Generate systemd unit file for mon container
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-mon.service.j2"
dest: /etc/systemd/system/ceph-mon@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph mons
+ notify: Restart ceph mons
-- name: generate systemd ceph-mon target file
- copy:
+- name: Generate systemd ceph-mon target file
+ ansible.builtin.copy:
src: ceph-mon.target
dest: /etc/systemd/system/ceph-mon.target
+ mode: "0644"
when: containerized_deployment | bool
-- name: enable ceph-mon.target
- service:
+- name: Enable ceph-mon.target
+ ansible.builtin.service:
name: ceph-mon.target
- enabled: yes
- daemon_reload: yes
- when: containerized_deployment | bool
\ No newline at end of file
+ enabled: true
+ daemon_reload: true
+ when: containerized_deployment | bool
ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
# Note: keys are optional and can be generated, but not on containerized, where
# they must be configered.
-#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
-#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+# ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+# ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# https://github.com/nfs-ganesha/nfs-ganesha/blob/next/src/config_samples/ganesha.conf.example
#
# Example:
-#CACHEINODE {
- #Entries_HWMark = 100000;
-#}
+# CACHEINODE {
+ # Entries_HWMark = 100000;
+# }
#
-#ganesha_core_param_overrides:
-#ganesha_ceph_export_overrides:
-#ganesha_rgw_export_overrides:
-#ganesha_rgw_section_overrides:
-#ganesha_log_overrides:
-#ganesha_conf_overrides: |
-# CACHEINODE {
- #Entries_HWMark = 100000;
-# }
+# ganesha_core_param_overrides:
+# ganesha_ceph_export_overrides:
+# ganesha_rgw_export_overrides:
+# ganesha_rgw_section_overrides:
+# ganesha_log_overrides:
+# ganesha_conf_overrides: |
+# CACHEINODE {
+ # Entries_HWMark = 100000;
+# }
##########
# DOCKER #
author: Daniel Gryniewicz
description: Installs Ceph NFS Gateway
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
+- name: Create rgw nfs user "{{ ceph_nfs_rgw_user }}"
radosgw_user:
name: "{{ ceph_nfs_rgw_user }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key
- set_fact:
+- name: Set_fact ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key
+ ansible.builtin.set_fact:
ceph_nfs_rgw_access_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['access_key'] }}"
ceph_nfs_rgw_secret_key: "{{ (rgw_nfs_user.stdout | from_json)['keys'][0]['secret_key'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
---
# global/common requirement
-- name: stop nfs server service
- systemd:
+- name: Stop nfs server service
+ ansible.builtin.systemd:
name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
state: stopped
- enabled: no
+ enabled: false
failed_when: false
-- name: include pre_requisite_non_container.yml
- include_tasks: pre_requisite_non_container.yml
+- name: Include pre_requisite_non_container.yml
+ ansible.builtin.include_tasks: pre_requisite_non_container.yml
when: not containerized_deployment | bool
-- name: include pre_requisite_container.yml
- include_tasks: pre_requisite_container.yml
+- name: Include pre_requisite_container.yml
+ ansible.builtin.include_tasks: pre_requisite_container.yml
when: containerized_deployment | bool
-- name: set_fact _rgw_hostname
- set_fact:
+- name: Set_fact _rgw_hostname
+ ansible.builtin.set_fact:
_rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"
-- name: set rgw parameter (log file)
+- name: Set rgw parameter (log file)
ceph_config:
action: set
who: "client.rgw.{{ _rgw_hostname }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
loop: "{{ groups.get('nfss', []) }}"
-- name: include create_rgw_nfs_user.yml
- import_tasks: create_rgw_nfs_user.yml
+- name: Include create_rgw_nfs_user.yml
+ ansible.builtin.import_tasks: create_rgw_nfs_user.yml
when: groups.get(mon_group_name, []) | length > 0
-- name: install nfs-ganesha-selinux on RHEL 8
- package:
+- name: Install nfs-ganesha-selinux on RHEL 8
+ ansible.builtin.package:
name: nfs-ganesha-selinux
state: present
register: result
- ansible_facts['distribution_major_version'] == '8'
# NOTE (leseb): workaround for issues with ganesha and librgw
-- name: add ganesha_t to permissive domain
- selinux_permissive:
+- name: Add ganesha_t to permissive domain
+ community.general.selinux_permissive:
name: ganesha_t
permissive: true
failed_when: false
- ansible_facts['os_family'] == 'RedHat'
- ansible_facts['selinux']['status'] == 'enabled'
-- name: nfs with external ceph cluster task related
+- name: Nfs with external ceph cluster task related
when:
- groups.get(mon_group_name, []) | length == 0
- ceph_nfs_ceph_user is defined
block:
- - name: create keyring directory
- file:
+ - name: Create keyring directory
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ item }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- "{{ ceph_nfs_ceph_user }}"
- "{{ ansible_facts['hostname'] }}"
- - name: set_fact rgw_client_name
- set_fact:
+ - name: Set_fact rgw_client_name
+ ansible.builtin.set_fact:
rgw_client_name: "client.rgw.{{ ceph_nfs_ceph_user }}"
- - name: get client cephx keys
- copy:
+ - name: Get client cephx keys
+ ansible.builtin.copy:
dest: "{{ item.1 }}"
content: "{{ item.0.content | b64decode }}"
mode: "{{ item.0.item.get('mode', '0600') }}"
- item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: include start_nfs.yml
- import_tasks: start_nfs.yml
+- name: Include start_nfs.yml
+ ansible.builtin.import_tasks: start_nfs.yml
---
-- name: keyring related tasks
+- name: Keyring related tasks
when: groups.get(mon_group_name, []) | length > 0
block:
- - name: set_fact container_exec_cmd
- set_fact:
+ - name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
run_once: true
- - name: "/var/lib/ceph/radosgw/{{ cluster }}-{{ ansible_facts['hostname'] }}"
- file:
+ - name: Create directories
+ ansible.builtin.file:
path: "{{ item.0 }}"
state: "directory"
owner: "{{ ceph_uid }}"
delegate_to: "{{ item.1 }}"
with_nested:
- ["/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}",
- "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}" ]
- - [ "{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}" ]
+ "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}"]
+ - ["{{ groups.get(mon_group_name)[0] }}", "{{ inventory_hostname }}"]
- - name: set_fact keyrings_list
- set_fact:
+ - name: Set_fact keyrings_list
+ ansible.builtin.set_fact:
keyrings_list:
- { name: "client.bootstrap-rgw", path: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
- { name: "client.admin", path: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
- - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: True, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} }
- - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: True, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} }
+ - { name: "client.rgw.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "allow r", "osd": "allow rwx tag rgw *=*"} }
+ - { name: "client.nfs.{{ ansible_facts['hostname'] }}", create: true, path: "/var/lib/ceph/radosgw/{{ cluster }}-nfs.{{ ansible_facts['hostname'] }}/keyring", caps: { "mon": "r", "osd": "allow rw pool=.nfs"} }
- - name: create keyrings from a monitor
+ - name: Create keyrings from a monitor
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
dest: "{{ item.path }}"
caps: "{{ item.caps }}"
- import_key: True
+ import_key: true
owner: "{{ ceph_uid }}"
group: "{{ ceph_uid }}"
mode: "0600"
- cephx | bool
- item.create | default(False) | bool
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- item.copy_key | default(True) | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: debug
- debug:
+ - name: Debug
+ ansible.builtin.debug:
msg: "{{ _rgw_keys }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item.item.copy_key | default(True) | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: dbus related tasks
+ - name: Dbus related tasks
+ when: ceph_nfs_dynamic_exports | bool
block:
- - name: get file
- command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf"
+ - name: Get file
+ ansible.builtin.command: "{{ container_binary }} run --rm --entrypoint=cat {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag }} /etc/dbus-1/system.d/org.ganesha.nfsd.conf"
register: dbus_ganesha_file
run_once: true
changed_when: false
- - name: create dbus service file
- copy:
+ - name: Create dbus service file
+ ansible.builtin.copy:
content: "{{ dbus_ganesha_file.stdout }}"
dest: /etc/dbus-1/system.d/org.ganesha.nfsd.conf
owner: "root"
group: "root"
mode: "0644"
- - name: reload dbus configuration
- command: "killall -SIGHUP dbus-daemon"
- when: ceph_nfs_dynamic_exports | bool
+ - name: Reload dbus configuration
+ ansible.builtin.command: "killall -SIGHUP dbus-daemon"
+ changed_when: false
---
-- name: include red hat based system related tasks
- include_tasks: pre_requisite_non_container_red_hat.yml
+- name: Include red hat based system related tasks
+ ansible.builtin.include_tasks: pre_requisite_non_container_red_hat.yml
when: ansible_facts['os_family'] == 'RedHat'
-- name: include debian based system related tasks
- include_tasks: pre_requisite_non_container_debian.yml
+- name: Include debian based system related tasks
+ ansible.builtin.include_tasks: pre_requisite_non_container_debian.yml
when: ansible_facts['os_family'] == 'Debian'
-- name: install nfs rgw/cephfs gateway - SUSE/openSUSE
- zypper:
+- name: Install nfs rgw/cephfs gateway - SUSE/openSUSE
+ community.general.zypper:
name: "{{ item.name }}"
- disable_gpg_check: yes
+ disable_gpg_check: true
with_items:
- { name: 'nfs-ganesha-rgw', install: "{{ nfs_obj_gw }}" }
- { name: 'radosgw', install: "{{ nfs_obj_gw }}" }
# NOTE (leseb): we use root:ceph for permissions since ganesha
# does not have the right selinux context to read ceph directories.
-- name: create rados gateway and ganesha directories
- file:
+- name: Create rados gateway and ganesha directories
+ ansible.builtin.file:
path: "{{ item.name }}"
state: directory
owner: "{{ item.owner | default('ceph') }}"
- { name: "/var/run/ceph", create: true }
when: item.create | bool
-- name: cephx related tasks
+- name: Cephx related tasks
when:
- cephx | bool
- groups.get(mon_group_name, []) | length > 0
block:
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item.item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: nfs object gateway related tasks
+ - name: Nfs object gateway related tasks
when: nfs_obj_gw | bool
block:
- - name: create rados gateway keyring
+ - name: Create rados gateway keyring
ceph_key:
name: "client.rgw.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
owner: ceph
group: ceph
mode: "{{ ceph_keyring_permissions }}"
- no_log: "{{ no_log_on_ceph_key_tasks }}"
\ No newline at end of file
+ no_log: "{{ no_log_on_ceph_key_tasks }}"
---
-- name: debian based systems - repo handling
+- name: Debian based systems - repo handling
when: ceph_origin == 'repository'
block:
- - name: stable repos specific tasks
+ - name: Stable repos specific tasks
when:
- nfs_ganesha_stable | bool
- ceph_repository == 'community'
block:
- - name: add nfs-ganesha stable repository
- apt_repository:
+ - name: Add nfs-ganesha stable repository
+ ansible.builtin.apt_repository:
repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
- update_cache: no
+ update_cache: false
register: add_ganesha_apt_repo
- - name: add libntirpc stable repository
- apt_repository:
+ - name: Add libntirpc stable repository
+ ansible.builtin.apt_repository:
repo: "deb {{ libntirpc_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
- update_cache: no
+ update_cache: false
register: add_libntirpc_apt_repo
when: libntirpc_stable_deb_repo is defined
- - name: add nfs-ganesha ppa apt key
- apt_key:
+ - name: Add nfs-ganesha ppa apt key
+ ansible.builtin.apt_key:
keyserver: "{{ nfs_ganesha_apt_keyserver }}"
id: "{{ nfs_ganesha_apt_key_id }}"
when:
- nfs_ganesha_apt_key_id is defined
- nfs_ganesha_apt_keyserver is defined
- - name: update apt cache
- apt:
- update_cache: yes
+ - name: Update apt cache
+ ansible.builtin.apt:
+ update_cache: true
register: update_ganesha_apt_cache
retries: 5
delay: 2
until: update_ganesha_apt_cache is success
when: add_ganesha_apt_repo is changed or add_libntirpc_apt_repo is changed
- - name: debian based systems - dev repos specific tasks
+ - name: Debian based systems - dev repos specific tasks
when:
- nfs_ganesha_dev | bool
- ceph_repository == 'dev'
block:
- - name: fetch nfs-ganesha development repository
- uri:
+ - name: Fetch nfs-ganesha development repository
+ ansible.builtin.uri:
url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
- return_content: yes
+ return_content: true
register: nfs_ganesha_dev_apt_repo
- - name: add nfs-ganesha development repository
- copy:
+ - name: Add nfs-ganesha development repository
+ ansible.builtin.copy:
content: "{{ nfs_ganesha_dev_apt_repo.content }}"
dest: /etc/apt/sources.list.d/nfs-ganesha-dev.list
owner: root
group: root
- backup: yes
+ backup: true
+ mode: "0644"
-- name: debain based systems - install required packages
+- name: Debain based systems - install required packages
block:
- - name: debian based systems- non-rhcs installation
+ - name: Debian based systems- non-rhcs installation
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
block:
- - name: install nfs rgw/cephfs gateway - debian
- apt:
+ - name: Install nfs rgw/cephfs gateway - debian
+ ansible.builtin.apt:
name: ['nfs-ganesha-rgw', 'radosgw']
- allow_unauthenticated: yes
+ allow_unauthenticated: true
register: result
until: result is succeeded
when: nfs_obj_gw | bool
- - name: install nfs rgw/cephfs gateway - debian
- apt:
+ - name: Install nfs rgw/cephfs gateway - debian
+ ansible.builtin.apt:
name: nfs-ganesha-ceph
- allow_unauthenticated: yes
+ allow_unauthenticated: true
register: result
until: result is succeeded
when: nfs_file_gw | bool
- - name: debian based systems - rhcs installation
+ - name: Debian based systems - rhcs installation
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository == 'rhcs'
block:
- - name: install red hat storage nfs gateway for debian
- apt:
+ - name: Install red hat storage nfs gateway for debian
+ ansible.builtin.apt:
name: nfs-ganesha
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
- - name: install red hat storage nfs file gateway
- apt:
+
+ - name: Install red hat storage nfs file gateway
+ ansible.builtin.apt:
name: nfs-ganesha-ceph
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when: nfs_file_gw | bool
- - name: install red hat storage nfs obj gateway
- apt:
+
+ - name: Install red hat storage nfs obj gateway
+ ansible.builtin.apt:
name: nfs-ganesha-rgw
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when: nfs_obj_gw | bool
---
-- name: red hat based systems - repo handling
+- name: Red hat based systems - repo handling
when: ceph_origin == 'repository'
block:
- - name: red hat based systems - stable repo related tasks
+ - name: Red hat based systems - stable repo related tasks
when:
- nfs_ganesha_stable | bool
- ceph_repository == 'community'
block:
- - name: add nfs-ganesha stable repository
- package:
+ - name: Add nfs-ganesha stable repository
+ ansible.builtin.package:
name: "{{ centos_release_nfs }}"
state: present
- - name: red hat based systems - dev repo related tasks
+ - name: Red hat based systems - dev repo related tasks
+ when:
+ - nfs_ganesha_dev | bool
+ - ceph_repository == 'dev'
block:
- - name: add nfs-ganesha dev repo
- get_url:
+ - name: Add nfs-ganesha dev repo
+ ansible.builtin.get_url:
url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo?arch={{ ansible_facts['architecture'] }}"
dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
+ mode: "0644"
force: true
- when:
- - nfs_ganesha_dev | bool
- - ceph_repository == 'dev'
-- name: red hat based systems - install nfs packages
+- name: Red hat based systems - install nfs packages
block:
- - name: install nfs cephfs gateway
- package:
+ - name: Install nfs cephfs gateway
+ ansible.builtin.package:
name: ['nfs-ganesha-ceph', 'nfs-ganesha-rados-grace']
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when: nfs_file_gw | bool
- - name: install redhat nfs-ganesha-rgw and ceph-radosgw packages
- package:
+ - name: Install redhat nfs-ganesha-rgw and ceph-radosgw packages
+ ansible.builtin.package:
name: ['nfs-ganesha-rgw', 'nfs-ganesha-rados-grace', 'nfs-ganesha-rados-urls', 'ceph-radosgw']
- state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
+ state: "{{ (upgrade_ceph_packages | bool) | ternary('latest', 'present') }}"
register: result
until: result is succeeded
when: nfs_obj_gw | bool
---
-- block:
- - name: set_fact exec_cmd_nfs - external
- set_fact:
- exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring"
- delegate_node: "{{ inventory_hostname }}"
- when: groups.get(mon_group_name, []) | length == 0
+- name: Nfs various pre-requisites tasks
+ block:
+ - name: Set_fact exec_cmd_nfs - external
+ ansible.builtin.set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph/:/var/lib/ceph/:z -v /var/log/ceph/:/var/log/ceph/:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }} -n client.{{ ceph_nfs_ceph_user }} -k /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring"
+ delegate_node: "{{ inventory_hostname }}"
+ when: groups.get(mon_group_name, []) | length == 0
- - name: set_fact exec_cmd_nfs - internal
- set_fact:
- exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
- delegate_node: "{{ groups[mon_group_name][0] }}"
- when: groups.get(mon_group_name, []) | length > 0
+ - name: Set_fact exec_cmd_nfs - internal
+ ansible.builtin.set_fact:
+ exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
+ delegate_node: "{{ groups[mon_group_name][0] }}"
+ when: groups.get(mon_group_name, []) | length > 0
- - name: check if rados index object exists
- shell: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
- changed_when: false
- failed_when: false
- register: rados_index_exists
- check_mode: no
- when: ceph_nfs_rados_backend | bool
- delegate_to: "{{ delegate_node }}"
- run_once: true
+ - name: Check if rados index object exists
+ ansible.builtin.shell: "set -o pipefail && {{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} ls | grep {{ ceph_nfs_rados_export_index }}"
+ changed_when: false
+ failed_when: false
+ register: rados_index_exists
+ check_mode: false
+ when: ceph_nfs_rados_backend | bool
+ delegate_to: "{{ delegate_node }}"
+ run_once: true
- - name: create an empty rados index object
- command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
- when:
- - ceph_nfs_rados_backend | bool
- - rados_index_exists.rc != 0
- delegate_to: "{{ delegate_node }}"
- run_once: true
+ - name: Create an empty rados index object
+ ansible.builtin.command: "{{ exec_cmd_nfs | default('') }} -p {{ cephfs_data_pool.name }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
+ when:
+ - ceph_nfs_rados_backend | bool
+ - rados_index_exists.rc != 0
+ delegate_to: "{{ delegate_node }}"
+ changed_when: false
+ run_once: true
-- name: create /etc/ganesha
- file:
+- name: Create /etc/ganesha
+ ansible.builtin.file:
path: /etc/ganesha
state: directory
owner: root
group: root
mode: "0755"
-- name: generate ganesha configuration file
- template:
+- name: Generate ganesha configuration file
+ ansible.builtin.template:
src: "ganesha.conf.j2"
dest: /etc/ganesha/ganesha.conf
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph nfss
+ notify: Restart ceph nfss
-- name: generate ganesha idmap.conf file
+- name: Generate ganesha idmap.conf file
openstack.config_template.config_template:
src: "idmap.conf.j2"
dest: "{{ ceph_nfs_idmap_conf }}"
mode: "0644"
config_overrides: "{{ idmap_conf_overrides }}"
config_type: ini
- notify: restart ceph nfss
+ notify: Restart ceph nfss
-- name: create exports directory
- file:
+- name: Create exports directory
+ ansible.builtin.file:
path: /etc/ganesha/export.d
state: directory
owner: "root"
mode: "0755"
when: ceph_nfs_dynamic_exports | bool
-- name: create exports dir index file
- copy:
+- name: Create exports dir index file
+ ansible.builtin.copy:
content: ""
- force: no
+ force: false
dest: /etc/ganesha/export.d/INDEX.conf
owner: "root"
group: "root"
mode: "0644"
when: ceph_nfs_dynamic_exports | bool
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
when: containerized_deployment | bool
-- name: systemd start nfs container
- systemd:
+- name: Systemd start nfs container
+ ansible.builtin.systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
when:
- containerized_deployment | bool
- ceph_nfs_enable_service | bool
-- name: start nfs gateway service
- systemd:
+- name: Start nfs gateway service
+ ansible.builtin.systemd:
name: nfs-ganesha
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
when:
- not containerized_deployment | bool
- ceph_nfs_enable_service | bool
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-nfs.service.j2"
dest: /etc/systemd/system/ceph-nfs@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph nfss
\ No newline at end of file
+ notify: Restart ceph nfss
--- /dev/null
+#!/bin/sh
+T=$1
+N=$2
+
+# start nfs-ganesha
+/usr/bin/{{ container_binary }} run --rm --net=host \
+{% if container_binary == 'podman' %}
+ -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
+{% endif %}
+--pids-limit={{ 0 if container_binary == 'podman' else -1 }} \
+-v /var/lib/ceph:/var/lib/ceph:z \
+-v /etc/ceph:/etc/ceph:z \
+-v /var/lib/nfs/ganesha:/var/lib/nfs/ganesha:z \
+-v /etc/ganesha:/etc/ganesha:z \
+-v /var/run/ceph:/var/run/ceph:z \
+-v /var/log/ceph:/var/log/ceph:z \
+-v /var/log/ganesha:/var/log/ganesha:z \
+{% if ceph_nfs_dynamic_exports | bool %}
+--privileged \
+-v /var/run/dbus/system_bus_socket:/var/run/dbus/system_bus_socket \
+{% endif -%}
+-v /etc/localtime:/etc/localtime:ro \
+{{ ceph_nfs_docker_extra_env }} \
+--entrypoint=/usr/bin/ganesha.nfsd \
+--name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
+{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
+-F -L STDOUT "${GANESHA_EPOCH}"
author: Boris Ranto
description: Configures Prometheus Node Exporter
license: Apache
- min_ansible_version: 2.4
+ min_ansible_version: '2.4'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: include setup_container.yml
- include_tasks: setup_container.yml
+- name: Include setup_container.yml
+ ansible.builtin.include_tasks: setup_container.yml
---
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: start the node_exporter service
- systemd:
+- name: Start the node_exporter service
+ ansible.builtin.systemd:
name: node_exporter
state: started
- enabled: yes
- daemon_reload: yes
+ enabled: true
+ daemon_reload: true
failed_when: false
---
-- name: ship systemd service
- template:
+- name: Ship systemd service
+ ansible.builtin.template:
src: node_exporter.service.j2
dest: "/etc/systemd/system/node_exporter.service"
owner: root
group: root
- mode: 0644
+ mode: "0644"
# All scenario(except 3rd) inherit from the following device declaration
# Note: This scenario uses the ceph-volume lvm batch method to provision OSDs
-#devices:
-# - /dev/sdb
-# - /dev/sdc
-# - /dev/sdd
-# - /dev/sde
+# devices:
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
devices: []
# Declare devices to be used as block.db devices
-#dedicated_devices:
-# - /dev/sdx
-# - /dev/sdy
+# dedicated_devices:
+# - /dev/sdx
+# - /dev/sdy
dedicated_devices: []
# Declare devices to be used as block.wal devices
-#bluestore_wal_devices:
-# - /dev/nvme0n1
-# - /dev/nvme0n2
+# bluestore_wal_devices:
+# - /dev/nvme0n1
+# - /dev/nvme0n2
bluestore_wal_devices: []
-#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# 'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
# Encrypt your OSD device using dmcrypt
# If set to True, no matter which osd_objecstore you use the data will be encrypted
-dmcrypt: False
+dmcrypt: true
# Use ceph-volume to create OSDs from logical volumes.
# lvm_volumes is a list of dictionaries.
# NUMA node0 CPU(s): 0,2,4,6,8,10,12,14,16
# NUMA node1 CPU(s): 1,3,5,7,9,11,13,15,17
# then, the following would run the OSD on the first NUMA node only.
-#ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
-#ceph_osd_docker_cpuset_mems: "0"
+# ceph_osd_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+# ceph_osd_docker_cpuset_mems: "0"
# PREPARE DEVICE
#
# ceph_osd_systemd_overrides will override the systemd settings
# for the ceph-osd services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_osd_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_osd_systemd_overrides:
+# Service:
+# PrivateDevices: false
###########
author: Sébastien Han
description: Installs Ceph Object Storage Daemon
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create bootstrap-osd and osd directories
- file:
+- name: Create bootstrap-osd and osd directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/
-- name: get keys from monitors
+- name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- cephx | bool
- item.copy_key | bool
-- name: copy ceph key(s) if needed
- copy:
+- name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item is not skipped
- item.item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-
---
-- name: configure crush hierarchy
+- name: Configure crush hierarchy
ceph_crush:
cluster: "{{ cluster }}"
location: "{{ osd_crush_location }}"
- hostvars[groups[mon_group_name][0]]['create_crush_tree'] | default(create_crush_tree) | bool
- osd_crush_location is defined
-- name: create configured crush rules
+- name: Create configured crush rules
ceph_crush_rule:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
delegate_to: '{{ groups[mon_group_name][0] }}'
run_once: true
-- name: get id for new default crush rule
+- name: Get id for new default crush rule
ceph_crush_rule:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
# If multiple rules are set as default (should not be) then the last one is taken as actual default.
# the with_items statement overrides each iteration with the new one.
# NOTE(leseb): we should actually fail if multiple rules are set as default
-- name: set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name
- set_fact:
+- name: Set_fact info_ceph_default_crush_rule_yaml, ceph_osd_pool_default_crush_rule_name
+ ansible.builtin.set_fact:
info_ceph_default_crush_rule_yaml: "{{ item.stdout | default('{}', True) | from_json() }}"
ceph_osd_pool_default_crush_rule_name: "{{ (item.stdout | default('{}', True) | from_json).get('rule_name') }}"
with_items: "{{ info_ceph_default_crush_rule.results }}"
run_once: true
when: not item.get('skipped', false)
-- name: insert new default crush rule into daemon to prevent restart
- command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
+- name: Insert new default crush rule into daemon to prevent restart
+ ansible.builtin.command: "{{ hostvars[item]['container_exec_cmd'] | default('') }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[item]['monitor_name'] }}.asok config set osd_pool_default_crush_rule {{ info_ceph_default_crush_rule_yaml.rule_id }}"
changed_when: false
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when:
- info_ceph_default_crush_rule_yaml | default('') | length > 0
-- name: "add new default crush rule to {{ cluster }}.conf"
- ini_file:
+- name: Add new default crush rule to ceph config file
+ community.general.ini_file:
dest: "/etc/ceph/{{ cluster }}.conf"
section: "global"
option: "osd pool default crush rule"
value: "{{ info_ceph_default_crush_rule_yaml.rule_id }}"
+ mode: "0644"
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
run_once: true
---
-- name: set_fact add_osd
- set_fact:
+- name: Set_fact add_osd
+ ansible.builtin.set_fact:
add_osd: "{{ groups[osd_group_name] | length != ansible_play_hosts_all | length }}"
-- name: set_fact container_exec_cmd
- set_fact:
+- name: Set_fact container_exec_cmd
+ ansible.builtin.set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
run_once: true
when: containerized_deployment | bool
-- name: include_tasks system_tuning.yml
- include_tasks: system_tuning.yml
+- name: Include_tasks system_tuning.yml
+ ansible.builtin.include_tasks: system_tuning.yml
-- name: install dependencies
- package:
+- name: Install dependencies
+ ansible.builtin.package:
name: parted
state: present
register: result
- not containerized_deployment | bool
- ansible_facts['os_family'] != 'ClearLinux'
-- name: install numactl when needed
- package:
+- name: Install numactl when needed
+ ansible.builtin.package:
name: numactl
register: result
until: result is succeeded
- ceph_osd_numactl_opts | length > 0
tags: with_pkg
-- name: include_tasks common.yml
- include_tasks: common.yml
+- name: Include_tasks common.yml
+ ansible.builtin.include_tasks: common.yml
-- name: set noup flag
+- name: Set noup flag
ceph_osd_flag:
name: noup
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- run_once: True
+ run_once: true
when:
- not rolling_update | default(False) | bool
- not switch_to_containers | default(False) | bool
-- name: include_tasks scenarios/lvm.yml
- include_tasks: scenarios/lvm.yml
+- name: Include_tasks scenarios/lvm.yml
+ ansible.builtin.include_tasks: scenarios/lvm.yml
when:
- lvm_volumes|length > 0
- not rolling_update|default(False) | bool
-- name: include_tasks scenarios/lvm-batch.yml
- include_tasks: scenarios/lvm-batch.yml
+- name: Include_tasks scenarios/lvm-batch.yml
+ ansible.builtin.include_tasks: scenarios/lvm-batch.yml
when:
- devices|length > 0
- not rolling_update|default(False) | bool
-- name: include_tasks start_osds.yml
- include_tasks: start_osds.yml
+- name: Include_tasks start_osds.yml
+ ansible.builtin.include_tasks: start_osds.yml
-- name: unset noup flag
+- name: Unset noup flag
ceph_osd_flag:
name: noup
cluster: "{{ cluster }}"
- not switch_to_containers | default(False) | bool
- inventory_hostname == ansible_play_hosts_all | last
-- name: wait for all osd to be up
- command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
+- name: Wait for all osd to be up
+ ansible.builtin.command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd stat -f json"
register: wait_for_all_osds_up
retries: "{{ nb_retry_wait_osd_up }}"
delay: "{{ delay_wait_osd_up }}"
- inventory_hostname == ansible_play_hosts_all | last
tags: wait_all_osds_up
-- name: include crush_rules.yml
- include_tasks: crush_rules.yml
+- name: Include crush_rules.yml
+ ansible.builtin.include_tasks: crush_rules.yml
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
tags: wait_all_osds_up
# Create the pools listed in openstack_pools
-- name: include openstack_config.yml
- include_tasks: openstack_config.yml
+- name: Include openstack_config.yml
+ ansible.builtin.include_tasks: openstack_config.yml
when:
- not add_osd | bool
- not rolling_update | default(False) | bool
---
-- name: pool related tasks
+- name: Pool related tasks
block:
- - name: create openstack pool(s)
+ - name: Create openstack pool(s)
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: create openstack cephx key(s)
+- name: Create openstack cephx key(s)
+ when:
+ - cephx | bool
+ - openstack_config | bool
block:
- - name: generate keys
+ - name: Generate keys
ceph_key:
name: "{{ item.name }}"
caps: "{{ item.caps }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: get keys from monitors
+ - name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key(s) if needed
- copy:
+ - name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring"
content: "{{ item.0.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- "{{ groups[mon_group_name] }}"
delegate_to: "{{ item.1 }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- when:
- - cephx | bool
- - openstack_config | bool
---
-- name: "use ceph-volume lvm batch to create {{ osd_objectstore }} osds"
+- name: Use ceph-volume lvm batch to create osds
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
batch_devices: "{{ _devices }}"
- dmcrypt: "{{ dmcrypt|default(omit) }}"
- crush_device_class: "{{ crush_device_class|default(omit) }}"
+ dmcrypt: "{{ dmcrypt | default(omit) }}"
+ crush_device_class: "{{ crush_device_class | default(omit) }}"
osds_per_device: "{{ osds_per_device }}"
block_db_size: "{{ block_db_size }}"
block_db_devices: "{{ dedicated_devices | unique if dedicated_devices | length > 0 else omit }}"
---
-- name: "use ceph-volume to create {{ osd_objectstore }} osds"
+- name: Use ceph-volume to create osds
ceph_volume:
cluster: "{{ cluster }}"
objectstore: "{{ osd_objectstore }}"
data: "{{ item.data }}"
- data_vg: "{{ item.data_vg|default(omit) }}"
- db: "{{ item.db|default(omit) }}"
- db_vg: "{{ item.db_vg|default(omit) }}"
- wal: "{{ item.wal|default(omit) }}"
- wal_vg: "{{ item.wal_vg|default(omit) }}"
+ data_vg: "{{ item.data_vg | default(omit) }}"
+ db: "{{ item.db | default(omit) }}"
+ db_vg: "{{ item.db_vg | default(omit) }}"
+ wal: "{{ item.wal | default(omit) }}"
+ wal_vg: "{{ item.wal_vg | default(omit) }}"
crush_device_class: "{{ item.crush_device_class | default(crush_device_class) | default(omit) }}"
- dmcrypt: "{{ dmcrypt|default(omit) }}"
+ dmcrypt: "{{ dmcrypt | default(omit) }}"
action: "{{ 'prepare' if containerized_deployment | bool else 'create' }}"
environment:
CEPH_VOLUME_DEBUG: "{{ ceph_volume_debug }}"
---
# this is for ceph-disk, the ceph-disk command is gone so we have to list /var/lib/ceph
-- name: get osd ids
- shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa 306
+- name: Get osd ids
+ ansible.builtin.shell: ls /var/lib/ceph/osd/ | sed 's/.*-//' # noqa risky-shell-pipe
args:
executable: /bin/bash
changed_when: false
failed_when: false
register: osd_ids_non_container
-- name: collect osd ids
+- name: Collect osd ids
ceph_volume:
cluster: "{{ cluster }}"
action: list
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: ceph_osd_ids
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
when: containerized_deployment | bool
-- name: ensure systemd service override directory exists
- file:
+- name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-osd@.service.d/"
+ mode: "0755"
when:
- ceph_osd_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: add ceph-osd systemd service overrides
+- name: Add ceph-osd systemd service overrides
openstack.config_template.config_template:
src: "ceph-osd.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-osd@.service.d/ceph-osd-systemd-overrides.conf"
- ceph_osd_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
-- name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present
- file:
+- name: Ensure /var/lib/ceph/osd/<cluster>-<item> is present
+ ansible.builtin.file:
state: directory
path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
mode: "{{ ceph_directories_mode }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}"
-- name: write /var/lib/ceph/osd/{{ cluster }}-{{ osd_id }}/run
- template:
+- name: Write run file in /var/lib/ceph/osd/xxxx/run
+ ansible.builtin.template:
src: systemd-run.j2
dest: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}/run"
mode: "0700"
with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}"
when: containerized_deployment | bool
-- name: systemd start osd
- systemd:
+- name: Systemd start osd
+ ansible.builtin.systemd:
name: ceph-osd@{{ item }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
with_items: "{{ ((ceph_osd_ids.stdout | default('{}', True) | from_json).keys() | list) | union(osd_ids_non_container.stdout_lines | default([])) }}"
---
-- name: create tmpfiles.d directory
- file:
+- name: Create tmpfiles.d directory
+ ansible.builtin.file:
path: "/etc/tmpfiles.d"
state: "directory"
owner: "root"
register: "tmpfiles_d"
when: disable_transparent_hugepage | bool
-- name: disable transparent hugepage
- template:
+- name: Disable transparent hugepage
+ ansible.builtin.template:
src: "tmpfiles_hugepage.j2"
dest: "/etc/tmpfiles.d/ceph_transparent_hugepage.conf"
group: "root"
validate: "systemd-tmpfiles --create %s"
when: disable_transparent_hugepage | bool
-- name: get default vm.min_free_kbytes
- slurp:
+- name: Get default vm.min_free_kbytes
+ ansible.builtin.slurp:
src: /proc/sys/vm/min_free_kbytes
register: default_vm_min_free_kbytes
-- name: set_fact vm_min_free_kbytes
- set_fact:
+- name: Set_fact vm_min_free_kbytes
+ ansible.builtin.set_fact:
vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
-- name: apply operating system tuning
- sysctl:
+- name: Apply operating system tuning
+ ansible.posix.sysctl:
name: "{{ item.name }}"
value: "{{ item.value }}"
state: present
sysctl_file: /etc/sysctl.d/ceph-tuning.conf
- sysctl_set: yes
- ignoreerrors: yes
+ sysctl_set: true
+ ignoreerrors: true
with_items:
- { name: "fs.aio-max-nr", value: "1048576", enable: "{{ osd_objectstore == 'bluestore' }}" }
- "{{ os_tuning_params }}"
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-osd.service.j2"
dest: /etc/systemd/system/ceph-osd@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph osds
+ notify: Restart ceph osds
-- name: generate systemd ceph-osd target file
- copy:
+- name: Generate systemd ceph-osd target file
+ ansible.builtin.copy:
src: ceph-osd.target
dest: /etc/systemd/system/ceph-osd.target
+ mode: "0644"
when: containerized_deployment | bool
-- name: enable ceph-osd.target
- service:
+- name: Enable ceph-osd.target
+ ansible.builtin.service:
name: ceph-osd.target
- enabled: yes
- daemon_reload: yes
- when: containerized_deployment | bool
\ No newline at end of file
+ enabled: true
+ daemon_reload: true
+ when: containerized_deployment | bool
groups:
-- name: dashboard
- rules:
- - alert: Ceph Health Warning
- expr: ceph_health_status == 1
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Ceph Health Warning"
- description: "Overall Ceph Health"
- - alert: Ceph Health Error
- expr: ceph_health_status > 1
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Ceph Health Error"
- description: "The Ceph cluster health is in an error state"
- - alert: Disk(s) Near Full
- expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Disk(s) Near Full"
- description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's."
- - alert: OSD(s) Down
- expr: ceph_osd_up < 0.5
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "OSD(s) Down"
- description: "This indicates that one or more OSDs is currently marked down in the cluster."
- - alert: OSD Host(s) Down
- expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "OSD Host(s) Down"
- description: "This indicates that one or more OSD hosts is currently down in the cluster."
- - alert: PG(s) Stuck
- expr: max(ceph_osd_numpg) > scalar(ceph_pg_active)
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "PG(s) Stuck"
- description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve."
- - alert: OSD Host Loss Check
- expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0))))
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "OSD Host Loss Check"
- description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host."
- - alert: Slow OSD Responses
- expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Slow OSD Responses"
- description: "This indicates that some OSD Latencies are above 1s."
- - alert: Network Errors
- expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Network Errors"
- description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval"
- - alert: Pool Capacity Low
- expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Pool Capacity Low"
- description: "This indicates a low capacity in a pool."
- - alert: MON(s) Down
- expr: ceph_mon_quorum_status != 1
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "MON(s) down"
- description: "This indicates that one or more MON(s) is down."
- - alert: Cluster Capacity Low
- expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Cluster Capacity Low"
- description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster."
- - alert: OSD(s) with High PG Count
- expr: ceph_osd_numpg > 275
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "OSD(s) with High PG Count"
- description: "This indicates there are some OSDs with high PG count (275+)."
- - alert: Slow OSD Ops
- expr: ceph_healthcheck_slow_ops > 0
- for: 1m
- labels:
- severity: page
- annotations:
- summary: "Slow OSD Ops"
- description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
+ - name: Dashboard
+ rules:
+ - alert: Ceph Health Warning
+ expr: ceph_health_status == 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Ceph Health Warning"
+ description: "Overall Ceph Health"
+ - alert: Ceph Health Error
+ expr: ceph_health_status > 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Ceph Health Error"
+ description: "The Ceph cluster health is in an error state"
+ - alert: Disk(s) Near Full
+ expr: (ceph_osd_stat_bytes_used / ceph_osd_stat_bytes) * 100 > 85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Disk(s) Near Full"
+ description: "This shows how many disks are at or above 85% full. Performance may degrade beyond this threshold on filestore (XFS) backed OSD's."
+ - alert: OSD(s) Down
+ expr: ceph_osd_up < 0.5
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD(s) Down"
+ description: "This indicates that one or more OSDs is currently marked down in the cluster."
+ - alert: OSD Host(s) Down
+ expr: count by(instance) (ceph_disk_occupation * on(ceph_daemon) group_right(instance) ceph_osd_up == 0) - count by(instance) (ceph_disk_occupation) == 0
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD Host(s) Down"
+ description: "This indicates that one or more OSD hosts is currently down in the cluster."
+ - alert: PG(s) Stuck
+ expr: max(ceph_osd_numpg) > scalar(ceph_pg_active)
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "PG(s) Stuck"
+ description: "This indicates there are pg's in a stuck state, manual intervention needed to resolve."
+ - alert: OSD Host Loss Check
+ expr: max(sum(ceph_osd_stat_bytes - ceph_osd_stat_bytes_used)) * 0.9 < scalar(max(sum by (instance) (ceph_osd_stat_bytes + on (ceph_daemon) group_left (instance) (ceph_disk_occupation*0))))
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD Host Loss Check"
+ description: "This indicates that the cluster @ 90% full is not enough to support the loss of the largest OSD host."
+ - alert: Slow OSD Responses
+ expr: ((irate(node_disk_read_time_seconds_total[5m]) / clamp_min(irate(node_disk_reads_completed_total[5m]), 1) + irate(node_disk_write_time_seconds_total[5m]) / clamp_min(irate(node_disk_writes_completed_total[5m]), 1)) and on (instance, device) ceph_disk_occupation) > 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Slow OSD Responses"
+ description: "This indicates that some OSD Latencies are above 1s."
+ - alert: Network Errors
+ expr: sum by (instance, device) (irate(node_network_receive_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_receive_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_drop_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m]) + irate(node_network_transmit_errs_total{device=~"(eth|en|bond|ib|mlx|p).*"}[5m])) > 10
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Network Errors"
+ description: "This indicates that more than 10 dropped/error packets are seen in a 5m interval"
+ - alert: Pool Capacity Low
+ expr: (ceph_pool_stored / (ceph_pool_stored + ceph_pool_max_avail) * 100 + on (pool_id) group_left (name) (ceph_pool_metadata*0)) > 85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Pool Capacity Low"
+ description: "This indicates a low capacity in a pool."
+ - alert: MON(s) Down
+ expr: ceph_mon_quorum_status != 1
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "MON(s) down"
+ description: "This indicates that one or more MON(s) is down."
+ - alert: Cluster Capacity Low
+ expr: sum(ceph_osd_stat_bytes_used) / sum(ceph_osd_stat_bytes) > 0.85
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Cluster Capacity Low"
+ description: "This indicates raw used space crosses the 85% capacity threshold of the ceph cluster."
+ - alert: OSD(s) with High PG Count
+ expr: ceph_osd_numpg > 275
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "OSD(s) with High PG Count"
+ description: "This indicates there are some OSDs with high PG count (275+)."
+ - alert: Slow OSD Ops
+ expr: ceph_healthcheck_slow_ops > 0
+ for: 1m
+ labels:
+ severity: page
+ annotations:
+ summary: "Slow OSD Ops"
+ description: "OSD requests are taking too long to process (osd_op_complaint_time exceeded)"
---
-- name: service handler
+- name: Service handler
# We use the systemd module here so we can use the daemon_reload feature,
# since we're shipping the .service file ourselves
- systemd:
+ ansible.builtin.systemd:
name: "{{ item }}"
daemon_reload: true
enabled: true
author: Boris Ranto
description: Configures Prometheus for Ceph Dashboard
license: Apache
- min_ansible_version: 2.4
+ min_ansible_version: '2.4'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create prometheus directories
- file:
+- name: Create prometheus directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
+ mode: "0755"
with_items:
- - "{{ prometheus_conf_dir }}"
- - "{{ prometheus_data_dir }}"
+ - "{{ prometheus_conf_dir }}"
+ - "{{ prometheus_data_dir }}"
-- name: write prometheus config file
+- name: Write prometheus config file
openstack.config_template.config_template:
src: prometheus.yml.j2
dest: "{{ prometheus_conf_dir }}/prometheus.yml"
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
- mode: 0640
+ mode: "0640"
config_type: yaml
config_overrides: "{{ prometheus_conf_overrides }}"
- notify: service handler
+ notify: Service handler
-- name: make sure the alerting rules directory exists
- file:
+- name: Make sure the alerting rules directory exists
+ ansible.builtin.file:
path: "/etc/prometheus/alerting/"
state: directory
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
+ mode: "0755"
-- name: copy alerting rules
- copy:
+- name: Copy alerting rules
+ ansible.builtin.copy:
src: "ceph_dashboard.yml"
dest: "/etc/prometheus/alerting/ceph_dashboard.yml"
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
- mode: 0644
+ mode: "0644"
-- name: create alertmanager directories
- file:
+- name: Create alertmanager directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
+ mode: "0755"
with_items:
- - "{{ alertmanager_conf_dir }}"
- - "{{ alertmanager_data_dir }}"
+ - "{{ alertmanager_conf_dir }}"
+ - "{{ alertmanager_data_dir }}"
-- name: write alertmanager config file
+- name: Write alertmanager config file
openstack.config_template.config_template:
src: alertmanager.yml.j2
dest: "{{ alertmanager_conf_dir }}/alertmanager.yml"
owner: "{{ prometheus_user_id }}"
group: "{{ prometheus_user_id }}"
- mode: 0640
+ mode: "0640"
config_type: yaml
config_overrides: "{{ alertmanager_conf_overrides }}"
- notify: service handler
+ notify: Service handler
-- name: include setup_container.yml
- include_tasks: setup_container.yml
+- name: Include setup_container.yml
+ ansible.builtin.include_tasks: setup_container.yml
---
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: start prometheus services
- systemd:
+- name: Start prometheus services
+ ansible.builtin.systemd:
name: "{{ item }}"
daemon_reload: true
enabled: true
---
-- name: ship systemd services
- template:
+- name: Ship systemd services
+ ansible.builtin.template:
src: "{{ item }}.j2"
dest: "/etc/systemd/system/{{ item }}"
owner: root
group: root
- mode: 0644
+ mode: "0644"
with_items:
- 'alertmanager.service'
- 'prometheus.service'
- notify: service handler
+ notify: Service handler
# ceph_rbd_mirror_systemd_overrides will override the systemd settings
# for the ceph-rbd-mirror services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_rbd_mirror_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_rbd_mirror_systemd_overrides:
+# Service:
+# PrivateDevices: false
author: Sébastien Han
description: Installs Ceph Mirror Agent
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: cephx tasks
+- name: Cephx tasks
when:
- cephx | bool
block:
- - name: get client.bootstrap-rbd-mirror from ceph monitor
+ - name: Get client.bootstrap-rbd-mirror from ceph monitor
ceph_key:
name: client.bootstrap-rbd-mirror
cluster: "{{ cluster }}"
run_once: true
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: ensure /var/lib/ceph/bootstrap-rbd-mirror exists
- file:
+ - name: Ensure /var/lib/ceph/bootstrap-rbd-mirror exists
+ ansible.builtin.file:
path: /var/lib/ceph/bootstrap-rbd-mirror
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+ mode: "0750"
- - name: copy ceph key(s)
- copy:
+ - name: Copy ceph key(s)
+ ansible.builtin.copy:
dest: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring"
content: "{{ _bootstrap_rbd_mirror_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: create rbd-mirror keyrings
+ - name: Create rbd-mirror keyrings
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
dest: "/etc/ceph/{{ cluster }}.{{ ceph_rbd_mirror_local_user }}.keyring",
secret: "{{ ceph_rbd_mirror_local_user_secret | default('') }}" }
- - name: get "client.rbd-mirror.{{ ansible_facts['hostname'] }}" from ceph monitor
+ - name: Get client.rbd-mirror keyring from ceph monitor
ceph_key:
name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
delegate_to: "{{ groups.get(mon_group_name)[0] }}"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- - name: copy ceph key
- copy:
+ - name: Copy ceph key
+ ansible.builtin.copy:
dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
content: "{{ _rbd_mirror_key.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
no_log: false
-- name: start and add the rbd-mirror service instance
- service:
+- name: Start and add the rbd-mirror service instance
+ ansible.builtin.service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
changed_when: false
when:
- not containerized_deployment | bool
- ceph_rbd_mirror_remote_user is defined
-- name: set_fact ceph_rbd_mirror_pools
- set_fact:
+- name: Set_fact ceph_rbd_mirror_pools
+ ansible.builtin.set_fact:
ceph_rbd_mirror_pools:
- name: "{{ ceph_rbd_mirror_pool }}"
when: ceph_rbd_mirror_pools is undefined
-- name: create pool if it doesn't exist
+- name: Create pool if it doesn't exist
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: enable mirroring on the pool
- command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}"
+- name: Enable mirroring on the pool
+ ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool enable {{ item.name }} {{ ceph_rbd_mirror_mode }}"
register: result
changed_when: false
retries: 60
loop: "{{ ceph_rbd_mirror_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
-- name: add mirroring peer
+- name: Add mirroring peer
when: ceph_rbd_mirror_remote_user is defined
block:
- - name: list mirroring peer
- command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}"
+ - name: List mirroring peer
+ ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool info {{ item.name }}"
changed_when: false
register: mirror_peer
loop: "{{ ceph_rbd_mirror_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: create a temporary file
- tempfile:
+ - name: Create a temporary file
+ ansible.builtin.tempfile:
path: /etc/ceph
state: file
suffix: _ceph-ansible
register: tmp_file
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: write secret to temporary file
- copy:
+ - name: Write secret to temporary file
+ ansible.builtin.copy:
dest: "{{ tmp_file.path }}"
content: "{{ ceph_rbd_mirror_remote_key }}"
+ mode: "0644"
delegate_to: "{{ groups[mon_group_name][0] }}"
- - name: add a mirroring peer
- command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}"
+ - name: Add a mirroring peer
+ ansible.builtin.command: "{{ rbd_cmd }} --cluster {{ cluster }} mirror pool peer add {{ item.item.name }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }} --remote-mon-host {{ ceph_rbd_mirror_remote_mon_hosts }} --remote-key-file {{ tmp_file.path }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
loop: "{{ mirror_peer.results }}"
run_once: true
when: ceph_rbd_mirror_remote_user not in item.stdout
- - name: rm temporary file
- file:
+ - name: Rm temporary file
+ ansible.builtin.file:
path: "{{ tmp_file.path }}"
state: absent
delegate_to: "{{ groups[mon_group_name][0] }}"
---
-- name: non-containerized related tasks
+- name: Non-containerized related tasks
when:
- not containerized_deployment | bool
- ceph_rbd_mirror_remote_user is defined
block:
- - name: install dependencies
- package:
+ - name: Install dependencies
+ ansible.builtin.package:
name: rbd-mirror
state: present
register: result
until: result is succeeded
tags: package-install
- - name: ensure systemd service override directory exists
- file:
+ - name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/"
+ mode: "0755"
when:
- ceph_rbd_mirror_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
- - name: add ceph-rbd-mirror systemd service overrides
+ - name: Add ceph-rbd-mirror systemd service overrides
openstack.config_template.config_template:
src: "ceph-rbd-mirror.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-rbd-mirror@.service.d/ceph-rbd-mirror-systemd-overrides.conf"
- ceph_rbd_mirror_systemd_overrides is defined
- ansible_facts['service_mgr'] == 'systemd'
- - name: enable ceph-rbd-mirror.target
- systemd:
+ - name: Enable ceph-rbd-mirror.target
+ ansible.builtin.systemd:
name: "ceph-rbd-mirror.target"
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
changed_when: false
-- name: set_fact ceph_cmd
- set_fact:
+- name: Set_fact ceph_cmd
+ ansible.builtin.set_fact:
rbd_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=rbd ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rbd' }}"
-- name: include configure_mirroring.yml
- include_tasks: configure_mirroring.yml
+- name: Include configure_mirroring.yml
+ ansible.builtin.include_tasks: configure_mirroring.yml
-- name: include start_container_rbd_mirror.yml
- include_tasks: start_container_rbd_mirror.yml
+- name: Include start_container_rbd_mirror.yml
+ ansible.builtin.include_tasks: start_container_rbd_mirror.yml
when:
- containerized_deployment | bool
- ceph_rbd_mirror_remote_user is defined
---
# Use systemd to manage container on Atomic host
-- name: include_tasks systemd.yml
- include_tasks: systemd.yml
+- name: Include_tasks systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: systemd start rbd mirror container
- systemd:
+- name: Systemd start rbd mirror container
+ ansible.builtin.systemd:
name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-rbd-mirror.service.j2"
dest: /etc/systemd/system/ceph-rbd-mirror@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph rbdmirrors
+ notify: Restart ceph rbdmirrors
-- name: generate systemd ceph-rbd-mirror target file
- copy:
+- name: Generate systemd ceph-rbd-mirror target file
+ ansible.builtin.copy:
src: ceph-rbd-mirror.target
dest: /etc/systemd/system/ceph-rbd-mirror.target
+ mode: "0644"
when: containerized_deployment | bool
-- name: enable ceph-rbd-mirror.target
- service:
+- name: Enable ceph-rbd-mirror.target
+ ansible.builtin.service:
name: ceph-rbd-mirror.target
- enabled: yes
- daemon_reload: yes
- when: containerized_deployment | bool
\ No newline at end of file
+ enabled: true
+ daemon_reload: true
+ when: containerized_deployment | bool
- no-tlsv11
- no-tls-tickets
#
-#virtual_ips:
-# - 192.168.238.250
-# - 192.168.238.251
+# virtual_ips:
+# - 192.168.238.250
+# - 192.168.238.251
#
-#virtual_ip_netmask: 24
-#virtual_ip_interface: ens33
+# virtual_ip_netmask: 24
+# virtual_ip_interface: ens33
---
-- name: restart haproxy
- service:
+- name: Restart haproxy
+ ansible.builtin.service:
name: haproxy
state: restarted
-- name: restart keepalived
- service:
+- name: Restart keepalived
+ ansible.builtin.service:
name: keepalived
state: restarted
author: Gui Hecheng
description: Config HAProxy & Keepalived
license: Apache
- min_ansible_version: 2.8
+ min_ansible_version: '2.8'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: include_tasks pre_requisite.yml
- include_tasks: pre_requisite.yml
+- name: Include_tasks pre_requisite.yml
+ ansible.builtin.include_tasks: pre_requisite.yml
-- name: include_tasks start_rgw_loadbalancer.yml
- include_tasks: start_rgw_loadbalancer.yml
+- name: Include_tasks start_rgw_loadbalancer.yml
+ ansible.builtin.include_tasks: start_rgw_loadbalancer.yml
---
-- name: install haproxy and keepalived
- package:
+- name: Install haproxy and keepalived
+ ansible.builtin.package:
name: ['haproxy', 'keepalived']
state: present
register: result
until: result is succeeded
-- name: "generate haproxy configuration file: haproxy.cfg"
- template:
+- name: Generate haproxy configuration file haproxy.cfg
+ ansible.builtin.template:
src: haproxy.cfg.j2
dest: /etc/haproxy/haproxy.cfg
owner: "root"
group: "root"
mode: "0644"
validate: "haproxy -f %s -c"
- notify:
- - restart haproxy
+ notify: Restart haproxy
-- name: set_fact vip to vrrp_instance
- set_fact:
- vrrp_instances: "{{ vrrp_instances | default([]) | union([{ 'name': 'VI_' + index|string , 'vip': item, 'master': groups[rgwloadbalancer_group_name][index] }]) }}"
+- name: Set_fact vip to vrrp_instance
+ ansible.builtin.set_fact:
+ vrrp_instances: "{{ vrrp_instances | default([]) | union([{'name': 'VI_' + index | string, 'vip': item, 'master': groups[rgwloadbalancer_group_name][index]}]) }}"
loop: "{{ virtual_ips | flatten(levels=1) }}"
loop_control:
index_var: index
-- name: "generate keepalived: configuration file: keepalived.conf"
- template:
+- name: Generate keepalived configuration file keepalived.conf
+ ansible.builtin.template:
src: keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
owner: "root"
group: "root"
mode: "0644"
- notify:
- - restart keepalived
+ notify: Restart keepalived
-- name: selinux related tasks
+- name: Selinux related tasks
when:
- ansible_facts['os_family'] == 'RedHat'
- ansible_facts['selinux']['status'] == 'enabled'
block:
- - name: set_fact rgw_ports
- set_fact:
+ - name: Set_fact rgw_ports
+ ansible.builtin.set_fact:
rgw_ports: "{{ rgw_ports | default([]) | union(hostvars[item]['rgw_instances'] | map(attribute='radosgw_frontend_port') | map('string') | list) }}"
with_items: "{{ groups.get(rgw_group_name, []) }}"
- - name: add selinux rules
- seport:
+ - name: Add selinux rules
+ community.general.seport:
ports: "{{ rgw_ports }}"
proto: tcp
setype: http_port_t
---
-- name: start haproxy
- service:
+- name: Start haproxy
+ ansible.builtin.service:
name: haproxy
state: started
- enabled: yes
+ enabled: true
-- name: start keepalived
- service:
+- name: Start keepalived
+ ansible.builtin.service:
name: keepalived
state: started
- enabled: yes
+ enabled: true
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.
-#rgw_create_pools:
-# "{{ rgw_zone }}.rgw.buckets.data":
-# pg_num: 64
-# type: ec
-# ec_profile: myecprofile
-# ec_k: 5
-# ec_m: 3
-# "{{ rgw_zone }}.rgw.buckets.index":
-# pg_num: 16
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.meta":
-# pg_num: 8
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.log":
-# pg_num: 8
-# size: 3
-# type: replicated
-# "{{ rgw_zone }}.rgw.control":
-# pg_num: 8
-# size: 3
-# type: replicated
-# rule_name: foo
+# rgw_create_pools:
+# "{{ rgw_zone }}.rgw.buckets.data":
+# pg_num: 64
+# type: ec
+# ec_profile: myecprofile
+# ec_k: 5
+# ec_m: 3
+# "{{ rgw_zone }}.rgw.buckets.index":
+# pg_num: 16
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.meta":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.log":
+# pg_num: 8
+# size: 3
+# type: replicated
+# "{{ rgw_zone }}.rgw.control":
+# pg_num: 8
+# size: 3
+# type: replicated
+# rule_name: foo
##########
# These options can be passed using the 'ceph_rgw_docker_extra_env' variable.
ceph_rgw_docker_memory_limit: "4096m"
ceph_rgw_docker_cpu_limit: 8
-#ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
-#ceph_rgw_docker_cpuset_mems: "0"
+# ceph_rgw_docker_cpuset_cpus: "0,2,4,6,8,10,12,14,16"
+# ceph_rgw_docker_cpuset_mems: "0"
ceph_rgw_docker_extra_env:
ceph_config_keys: [] # DON'T TOUCH ME
# ceph_rgw_systemd_overrides will override the systemd settings
# for the ceph-rgw services.
# For example,to set "PrivateDevices=false" you can specify:
-#ceph_rgw_systemd_overrides:
-# Service:
-# PrivateDevices: False
+# ceph_rgw_systemd_overrides:
+# Service:
+# PrivateDevices: false
---
-- name: restart rgw
- service:
+- name: Restart rgw
+ ansible.builtin.service:
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: restarted
with_items: "{{ rgw_instances }}"
author: Sébastien Han
description: Installs Ceph Rados Gateway
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: create rados gateway directories
- file:
+- name: Create rados gateway directories
+ ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_directories_mode }}"
with_items: "{{ rbd_client_admin_socket_path }}"
-- name: get keys from monitors
+- name: Get keys from monitors
ceph_key:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
- item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: copy ceph key(s) if needed
- copy:
+- name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "{{ item.item.path }}"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- item.item.copy_key | bool
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: copy SSL certificate & key data to certificate path
- copy:
+- name: Copy SSL certificate & key data to certificate path
+ ansible.builtin.copy:
content: "{{ radosgw_frontend_ssl_certificate_data }}"
dest: "{{ radosgw_frontend_ssl_certificate }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- mode: 0440
+ mode: "0440"
when: radosgw_frontend_ssl_certificate | length > 0 and radosgw_frontend_ssl_certificate_data | length > 0
- notify: restart ceph rgws
+ notify: Restart ceph rgws
---
-- name: include common.yml
- include_tasks: common.yml
+- name: Include common.yml
+ ansible.builtin.include_tasks: common.yml
-- name: include_tasks pre_requisite.yml
- include_tasks: pre_requisite.yml
+- name: Include_tasks pre_requisite.yml
+ ansible.builtin.include_tasks: pre_requisite.yml
-- name: rgw pool creation tasks
- include_tasks: rgw_create_pools.yml
+- name: Rgw pool creation tasks
+ ansible.builtin.include_tasks: rgw_create_pools.yml
run_once: true
when: rgw_create_pools is defined
-- name: include_tasks openstack-keystone.yml
- include_tasks: openstack-keystone.yml
+- name: Include_tasks openstack-keystone.yml
+ ansible.builtin.include_tasks: openstack-keystone.yml
when: radosgw_keystone_ssl | bool
-- name: include_tasks start_radosgw.yml
- include_tasks: start_radosgw.yml
+- name: Include_tasks start_radosgw.yml
+ ansible.builtin.include_tasks: start_radosgw.yml
when:
- not containerized_deployment | bool
-- name: include start_docker_rgw.yml
- include_tasks: start_docker_rgw.yml
+- name: Include start_docker_rgw.yml
+ ansible.builtin.include_tasks: start_docker_rgw.yml
when:
- containerized_deployment | bool
---
-- name: install nss-tools on redhat
- package:
+- name: Install nss-tools on redhat
+ ansible.builtin.package:
name: nss-tools
state: present
register: result
until: result is succeeded
when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf'
-- name: install libnss3-tools on debian
- package:
+- name: Install libnss3-tools on debian
+ ansible.builtin.package:
name: libnss3-tools
state: present
register: result
until: result is succeeded
when: ansible_facts['pkg_mgr'] == 'apt'
-- name: create nss directory for keystone certificates
- file:
+- name: Create nss directory for keystone certificates
+ ansible.builtin.file:
path: "{{ radosgw_nss_db_path }}"
state: directory
owner: root
group: root
- mode: 0644
+ mode: "0644"
-- name: create nss entries for keystone certificates
- shell: "{{ item }}"
+- name: Create nss entries for keystone certificates
+ ansible.builtin.shell: "{{ item }}"
changed_when: false
with_items:
- "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'"
---
-- name: set_fact _rgw_hostname
- set_fact:
+- name: Set_fact _rgw_hostname
+ ansible.builtin.set_fact:
_rgw_hostname: "{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}"
-- name: set rgw parameter (log file)
+- name: Set rgw parameter (log file)
ceph_config:
action: set
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
-- name: set rgw parameter (rgw_frontends)
+- name: Set rgw parameter (rgw_frontends)
ceph_config:
action: set
who: "client.rgw.{{ _rgw_hostname + '.' + item.instance_name }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ groups.get(mon_group_name, [])[0] }}"
loop: "{{ hostvars[inventory_hostname]['rgw_instances'] }}"
- notify: restart ceph rgws
+ notify: Restart ceph rgws
# rgw_frontends
# {{ 'ssl_' if radosgw_frontend_ssl_certificate else '' }}endpoint={{ _rgw_binding_socket }}{{ ' ssl_certificate='+radosgw_frontend_ssl_certificate if radosgw_frontend_ssl_certificate else '' }}
-- name: create rados gateway directories
- file:
+- name: Create rados gateway directories
+ ansible.builtin.file:
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
loop: "{{ rgw_instances }}"
when: groups.get(mon_group_name, []) | length > 0
-- name: create rgw keyrings
+- name: Create rgw keyrings
ceph_key:
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
cluster: "{{ cluster }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "0600"
no_log: "{{ no_log_on_ceph_key_tasks }}"
- delegate_to: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else 'localhost'}}"
+ delegate_to: "{{ groups[mon_group_name][0] if groups.get(mon_group_name, []) | length > 0 else 'localhost' }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ rgw_instances }}"
when: cephx | bool
-- name: get keys from monitors
+- name: Get keys from monitors
ceph_key:
name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
cluster: "{{ cluster }}"
- groups.get(mon_group_name, []) | length > 0
no_log: "{{ no_log_on_ceph_key_tasks }}"
-- name: copy ceph key(s) if needed
- copy:
+- name: Copy ceph key(s) if needed
+ ansible.builtin.copy:
dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.item.instance_name }}/keyring"
content: "{{ item.stdout + '\n' }}"
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
---
-- name: create ec profile
+- name: Create ec profile
ceph_ec_profile:
name: "{{ item.value.ec_profile }}"
cluster: "{{ cluster }}"
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: set crush rule
+- name: Set crush rule
ceph_crush_rule:
name: "{{ item.key }}"
cluster: "{{ cluster }}"
- item.value.type is defined
- item.value.type == 'ec'
-- name: create ec pools for rgw
+- name: Create ec pools for rgw
ceph_pool:
name: "{{ item.key }}"
state: present
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-- name: create replicated pools for rgw
+- name: Create replicated pools for rgw
ceph_pool:
name: "{{ item.key }}"
state: present
---
-- name: include_task systemd.yml
- include_tasks: systemd.yml
+- name: Include_task systemd.yml
+ ansible.builtin.include_tasks: systemd.yml
-- name: systemd start rgw container
- systemd:
+- name: Systemd start rgw container
+ ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started
- enabled: yes
- masked: no
- daemon_reload: yes
+ enabled: true
+ masked: false
+ daemon_reload: true
with_items: "{{ rgw_instances }}"
---
-- name: ensure systemd service override directory exists
- file:
+- name: Ensure systemd service override directory exists
+ ansible.builtin.file:
state: directory
path: "/etc/systemd/system/ceph-radosgw@.service.d/"
+ mode: "0750"
when: ceph_rgw_systemd_overrides is defined
-- name: add ceph-rgw systemd service overrides
+- name: Add ceph-rgw systemd service overrides
openstack.config_template.config_template:
src: "ceph-rgw.service.d-overrides.j2"
dest: "/etc/systemd/system/ceph-radosgw@.service.d/ceph-radosgw-systemd-overrides.conf"
config_type: "ini"
when: ceph_rgw_systemd_overrides is defined
-- name: start rgw instance
- service:
+- name: Start rgw instance
+ ansible.builtin.systemd:
name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
with_items: "{{ rgw_instances }}"
-- name: enable the ceph-radosgw.target service
- systemd:
+- name: Enable the ceph-radosgw.target service
+ ansible.builtin.systemd:
name: ceph-radosgw.target
- enabled: yes
- masked: no
+ enabled: true
+ masked: false
---
-- name: generate systemd unit file
- template:
+- name: Generate systemd unit file
+ ansible.builtin.template:
src: "{{ role_path }}/templates/ceph-radosgw.service.j2"
dest: /etc/systemd/system/ceph-radosgw@.service
owner: "root"
group: "root"
mode: "0644"
- notify: restart ceph rgws
+ notify: Restart ceph rgws
-- name: generate systemd ceph-radosgw target file
- copy:
+- name: Generate systemd ceph-radosgw target file
+ ansible.builtin.copy:
src: ceph-radosgw.target
dest: /etc/systemd/system/ceph-radosgw.target
+ mode: "0644"
when: containerized_deployment | bool
-- name: enable ceph-radosgw.target
- service:
+- name: Enable ceph-radosgw.target
+ ansible.builtin.service:
name: ceph-radosgw.target
- enabled: yes
- daemon_reload: yes
- when: containerized_deployment | bool
\ No newline at end of file
+ enabled: true
+ daemon_reload: true
+ when: containerized_deployment | bool
author: Andrew Schoen
description: Validates Ceph config options
license: Apache
- min_ansible_version: 2.7
+ min_ansible_version: '2.7'
platforms:
- name: EL
versions:
- - 7
+ - 'all'
galaxy_tags:
- system
dependencies: []
---
-- name: set_fact root_device
- set_fact:
+- name: Set_fact root_device
+ ansible.builtin.set_fact:
root_device: "{{ ansible_facts['mounts'] | selectattr('mount', 'match', '^/$') | map(attribute='device') | first }}"
-- name: lvm_volumes variable's tasks related
+- name: Lvm_volumes variable's tasks related
when:
- lvm_volumes is defined
- lvm_volumes | length > 0
block:
- - name: resolve devices in lvm_volumes
- command: "readlink -f {{ item.data }}"
+ - name: Resolve devices in lvm_volumes
+ ansible.builtin.command: "readlink -f {{ item.data }}"
changed_when: false
register: _lvm_volumes_data_devices
with_items: "{{ lvm_volumes }}"
when: item.data_vg is undefined
- - name: set_fact lvm_volumes_data_devices
- set_fact:
+ - name: Set_fact lvm_volumes_data_devices
+ ansible.builtin.set_fact:
lvm_volumes_data_devices: "{{ lvm_volumes_data_devices | default([]) + [item.stdout] }}"
with_items: "{{ _lvm_volumes_data_devices.results }}"
when: item.skipped is undefined
-- name: fail if root_device is passed in lvm_volumes or devices
- fail:
+- name: Fail if root_device is passed in lvm_volumes or devices
+ ansible.builtin.fail:
msg: "{{ root_device }} found in either lvm_volumes or devices variable"
when: root_device in lvm_volumes_data_devices | default([]) or root_device in devices | default([])
-- name: check devices are block devices
+- name: Check devices are block devices
block:
- - name: get devices information
- parted:
+ - name: Get devices information
+ community.general.parted:
device: "{{ item }}"
unit: MiB
register: devices_parted
- failed_when: False
+ failed_when: false
with_items:
- "{{ devices | default([]) }}"
- "{{ dedicated_devices | default([]) }}"
- "{{ bluestore_wal_devices | default([]) }}"
- "{{ lvm_volumes_data_devices | default([]) }}"
- - name: fail if one of the devices is not a device
- fail:
+ - name: Fail if one of the devices is not a device
+ ansible.builtin.fail:
msg: "{{ item.item }} is not a block special file!"
when: item.rc is defined
with_items: "{{ devices_parted.results }}"
- - name: fail when gpt header found on osd devices
- fail:
+ - name: Fail when gpt header found on osd devices
+ ansible.builtin.fail:
msg: "{{ item.disk.dev }} has gpt header, please remove it."
with_items: "{{ devices_parted.results }}"
when:
- item.disk.table == 'gpt'
- item.partitions | length == 0
-- name: check logical volume in lvm_volumes
+- name: Check logical volume in lvm_volumes
when: lvm_volumes is defined
block:
- - name: check data logical volume
- stat:
+ - name: Check data logical volume
+ ansible.builtin.stat:
path: "/dev/{{ item.data_vg }}/{{ item.data }}"
follow: true
register: lvm_volumes_data
- item.data is defined
- item.data_vg is defined
- - name: fail if one of the data logical volume is not a device or doesn't exist
- fail:
+ - name: Fail if one of the data logical volume is not a device or doesn't exist
+ ansible.builtin.fail:
msg: "{{ item.item.data_vg }}/{{ item.item.data }} doesn't exist or isn't a block"
loop: "{{ lvm_volumes_data.results }}"
when:
- item.skipped is undefined
- not item.stat.exists | bool or not item.stat.isblk | bool
- - name: check bluestore db logical volume
- stat:
+ - name: Check bluestore db logical volume
+ ansible.builtin.stat:
path: "/dev/{{ item.db_vg }}/{{ item.db }}"
follow: true
register: lvm_volumes_db
- item.db is defined
- item.db_vg is defined
- - name: fail if one of the bluestore db logical volume is not a device or doesn't exist
- fail:
+ - name: Fail if one of the bluestore db logical volume is not a device or doesn't exist
+ ansible.builtin.fail:
msg: "{{ item.item.db_vg }}/{{ item.item.db }} doesn't exist or isn't a block"
loop: "{{ lvm_volumes_db.results }}"
when:
- item.skipped is undefined
- not item.stat.exists | bool or not item.stat.isblk | bool
- - name: check bluestore wal logical volume
- stat:
+ - name: Check bluestore wal logical volume
+ ansible.builtin.stat:
path: "/dev/{{ item.wal_vg }}/{{ item.wal }}"
follow: true
register: lvm_volumes_wal
- item.wal is defined
- item.wal_vg is defined
- - name: fail if one of the bluestore wal logical volume is not a device or doesn't exist
- fail:
+ - name: Fail if one of the bluestore wal logical volume is not a device or doesn't exist
+ ansible.builtin.fail:
msg: "{{ item.item.wal_vg }}/{{ item.item.wal }} doesn't exist or isn't a block"
loop: "{{ lvm_volumes_wal.results }}"
when:
- item.skipped is undefined
- not item.stat.exists | bool or not item.stat.isblk | bool
-
---
-- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface exists
+ ansible.builtin.fail:
msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
when: monitor_interface not in ansible_facts['interfaces']
-- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface is active
+ ansible.builtin.fail:
msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active']
-- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface has an IPv4 address
+ ansible.builtin.fail:
msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv4"
- hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined
-- name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface has an IPv6 address
+ ansible.builtin.fail:
msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv6"
---
-- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface exists
+ ansible.builtin.fail:
msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
when: radosgw_interface not in ansible_facts['interfaces']
-- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface is active
+ ansible.builtin.fail:
msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false"
-- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface has an IPv4 address
+ ansible.builtin.fail:
msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv4"
- hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
-- name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
- fail:
+- name: Check if network interface has an IPv6 address
+ ansible.builtin.fail:
msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv6"
---
-- name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
- fail:
+- name: Check if network interface has an IP address in `monitor_address_block`
+ ansible.builtin.fail:
msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
---
-- name: fail on unsupported distribution for iscsi gateways
- fail:
+- name: Fail on unsupported distribution for iscsi gateways
+ ansible.builtin.fail:
msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora', 'AlmaLinux', 'Rocky']
-- name: make sure gateway_ip_list is configured
- fail:
+- name: Make sure gateway_ip_list is configured
+ ansible.builtin.fail:
msg: "you must set a list of IPs (comma separated) for gateway_ip_list"
when:
- gateway_ip_list == '0.0.0.0'
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
-- name: make sure gateway_iqn is configured
- fail:
+- name: Make sure gateway_iqn is configured
+ ansible.builtin.fail:
msg: "you must set a iqn for the iSCSI target"
when:
- gateway_iqn | length == 0
- not containerized_deployment | bool
- not use_new_ceph_iscsi | bool
-- name: fail if unsupported chap configuration
- fail:
+- name: Fail if unsupported chap configuration
+ ansible.builtin.fail:
msg: "Mixing clients with CHAP enabled and disabled is not supported."
- with_items: "{{ client_connections }}"
+ with_items: "{{ client_connections }}"
when:
- item.status is defined
- item.status == "present"
- item.chap
- " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list"
-- name: fail on unsupported distribution version for iscsi gateways
- command: "grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
+- name: Fail on unsupported distribution version for iscsi gateways
+ ansible.builtin.command: "grep -q {{ item }}=m {% if is_atomic | bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
register: iscsi_kernel
changed_when: false
failed_when: iscsi_kernel.rc != 0
---
-- name: fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
- fail:
+- name: Fail if ceph_nfs_rgw_access_key or ceph_nfs_rgw_secret_key are undefined (nfs standalone)
+ ansible.builtin.fail:
msg: "ceph_nfs_rgw_access_key and ceph_nfs_rgw_secret_key must be set if nfs_obj_gw is True"
when:
- nfs_obj_gw | bool
- groups.get(mon_group_name, []) | length == 0
- (ceph_nfs_rgw_access_key is undefined or ceph_nfs_rgw_secret_key is undefined)
-- name: fail on openSUSE Leap 15.x using distro packages
- fail:
+- name: Fail on openSUSE Leap 15.x using distro packages
+ ansible.builtin.fail:
msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
when:
- ceph_origin == 'distro'
---
-- name: fail if target_size_ratio is not set when pg_autoscale_mode is True
- fail:
+- name: Fail if target_size_ratio is not set when pg_autoscale_mode is True
+ ansible.builtin.fail:
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
with_items:
- "{{ openstack_pools | default([]) }}"
---
-- name: ensure ceph_rbd_mirror_pool is set
- fail:
+- name: Ensure ceph_rbd_mirror_pool is set
+ ansible.builtin.fail:
msg: "ceph_rbd_mirror_pool needs to be provided"
when: ceph_rbd_mirror_pool | default("") | length == 0
-- name: ensure ceph_rbd_mirror_remote_cluster is set
- fail:
+- name: Ensure ceph_rbd_mirror_remote_cluster is set
+ ansible.builtin.fail:
msg: "ceph_rbd_mirror_remote_cluster needs to be provided"
when:
- ceph_rbd_mirror_remote_cluster | default("") | length == 0
- - ceph_rbd_mirror_remote_user | default("") | length > 0
\ No newline at end of file
+ - ceph_rbd_mirror_remote_user | default("") | length > 0
-- name: validate ceph_origin
- fail:
+- name: Validate ceph_origin
+ ansible.builtin.fail:
msg: "ceph_origin must be either 'repository', 'distro' or 'local'"
when: ceph_origin not in ['repository', 'distro', 'local']
-- name: validate ceph_repository
- fail:
+- name: Validate ceph_repository
+ ansible.builtin.fail:
msg: "ceph_repository must be either 'community', 'rhcs', 'obs', 'dev', 'custom' or 'uca'"
when:
- ceph_origin == 'repository'
- ceph_repository not in ['community', 'rhcs', 'obs', 'dev', 'custom', 'uca']
-- name: validate ceph_repository_community
- fail:
+- name: Validate ceph_repository_community
+ ansible.builtin.fail:
msg: "ceph_stable_release must be 'reef'"
when:
- ceph_origin == 'repository'
---
-- name: fail if rgw_zone is default
- fail:
+- name: Fail if rgw_zone is default
+ ansible.builtin.fail:
msg: "rgw_zone cannot be named 'default'"
loop: "{{ rgw_instances }}"
when: item.rgw_zone is undefined or item.rgw_zone == 'default'
-- name: fail if either rgw_zonemaster or rgw_zonesecondary is undefined
- fail:
+- name: Fail if either rgw_zonemaster or rgw_zonesecondary is undefined
+ ansible.builtin.fail:
msg: "rgw_zonemaster and rgw_zonesecondary must be defined"
loop: "{{ rgw_instances }}"
when: item.rgw_zonemaster | default(rgw_zonemaster) is undefined or item.rgw_zonesecondary | default(rgw_zonesecondary) is undefined
-- name: fail if rgw_zonemaster and rgw_zonesecondary are both true
- fail:
+- name: Fail if rgw_zonemaster and rgw_zonesecondary are both true
+ ansible.builtin.fail:
msg: "rgw_zonemaster and rgw_zonesecondary cannot both be true"
loop: "{{ rgw_instances }}"
when:
- item.rgw_zonemaster | default(rgw_zonemaster) | bool
- item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
-- name: fail if rgw_zonegroup is not set
- fail:
+- name: Fail if rgw_zonegroup is not set
+ ansible.builtin.fail:
msg: "rgw_zonegroup has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.rgw_zonegroup is undefined
-- name: fail if rgw_zone_user is not set
- fail:
+- name: Fail if rgw_zone_user is not set
+ ansible.builtin.fail:
msg: "rgw_zone_user has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.rgw_zone_user is undefined
-- name: fail if rgw_zone_user_display_name is not set
- fail:
+- name: Fail if rgw_zone_user_display_name is not set
+ ansible.builtin.fail:
msg: "rgw_zone_user_display_name has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.rgw_zone_user_display_name is undefined
-- name: fail if rgw_realm is not set
- fail:
+- name: Fail if rgw_realm is not set
+ ansible.builtin.fail:
msg: "rgw_realm has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.rgw_realm is undefined
-- name: fail if system_access_key is not set
- fail:
+- name: Fail if system_access_key is not set
+ ansible.builtin.fail:
msg: "system_access_key has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.system_access_key is undefined
-- name: fail if system_secret_key is not set
- fail:
+- name: Fail if system_secret_key is not set
+ ansible.builtin.fail:
msg: "system_secret_key has not been set by the user"
loop: "{{ rgw_instances }}"
when: item.system_secret_key is undefined
-- name: fail if endpoint is not set
- fail:
+- name: Fail if endpoint is not set
+ ansible.builtin.fail:
msg: "endpoint has not been set by the user"
loop: "{{ rgw_instances }}"
when:
- item.rgw_zonesecondary | default(rgw_zonesecondary) | bool
- rgw_pull_port is undefined and rgw_pullhost is undefined and item.rgw_pull_proto | default(rgw_pull_proto) is undefined
- item.endpoint is undefined
-
---
-- name: fail if ec_profile is not set for ec pools
- fail:
+- name: Fail if ec_profile is not set for ec pools
+ ansible.builtin.fail:
msg: "ec_profile must be set for ec pools"
loop: "{{ rgw_create_pools | dict2items }}"
when:
- item.value.type == 'ec'
- item.value.ec_profile is undefined
-- name: fail if ec_k is not set for ec pools
- fail:
+- name: Fail if ec_k is not set for ec pools
+ ansible.builtin.fail:
msg: "ec_k must be set for ec pools"
loop: "{{ rgw_create_pools | dict2items }}"
when:
- item.value.type == 'ec'
- item.value.ec_k is undefined
-- name: fail if ec_m is not set for ec pools
- fail:
+- name: Fail if ec_m is not set for ec pools
+ ansible.builtin.fail:
msg: "ec_m must be set for ec pools"
loop: "{{ rgw_create_pools | dict2items }}"
when:
---
-- name: fail on unsupported ansible version (1.X)
- fail:
+- name: Fail on unsupported ansible version (1.X)
+ ansible.builtin.fail:
msg: "Ansible version must be >= 2.x, please update!"
when: ansible_version.major|int < 2
-- name: fail on unsupported ansible version
- fail:
+- name: Fail on unsupported ansible version
+ ansible.builtin.fail:
msg: "Ansible version must be either 2.15 or 2.16!"
when: ansible_version.minor|int not in [15, 16]
-- name: fail on unsupported system
- fail:
+- name: Fail on unsupported system
+ ansible.builtin.fail:
msg: "System not supported {{ ansible_facts['system'] }}"
when: ansible_facts['system'] not in ['Linux']
-- name: fail on unsupported architecture
- fail:
+- name: Fail on unsupported architecture
+ ansible.builtin.fail:
msg: "Architecture not supported {{ ansible_facts['architecture'] }}"
when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
-- name: fail on unsupported distribution
- fail:
+- name: Fail on unsupported distribution
+ ansible.builtin.fail:
msg: "Distribution not supported {{ ansible_facts['os_family'] }}"
when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
-- name: fail on unsupported CentOS release
- fail:
+- name: Fail on unsupported CentOS release
+ ansible.builtin.fail:
msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard"
when:
- ansible_facts['distribution'] == 'CentOS'
- not containerized_deployment | bool
- dashboard_enabled | bool
-- name: red hat based systems tasks
+- name: Red hat based systems tasks
when:
- ceph_repository == 'rhcs'
- ansible_facts['distribution'] == 'RedHat'
block:
- - name: fail on unsupported distribution for red hat ceph storage
- fail:
+ - name: Fail on unsupported distribution for red hat ceph storage
+ ansible.builtin.fail:
msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL >= 8.2"
when: ansible_facts['distribution_version'] is version('8.2', '<')
-- name: fail on unsupported distribution for ubuntu cloud archive
- fail:
+- name: Fail on unsupported distribution for ubuntu cloud archive
+ ansible.builtin.fail:
msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}"
when:
- ceph_repository == 'uca'
- ansible_facts['distribution'] != 'Ubuntu'
-- name: "fail on unsupported SUSE/openSUSE distribution (only 15.x supported)"
- fail:
+- name: Fail on unsupported SUSE/openSUSE distribution (only 15.x supported)
+ ansible.builtin.fail:
msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}"
when:
- ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE'
- ansible_facts['distribution_major_version'] != '15'
-- name: fail if systemd is not present
- fail:
+- name: Fail if systemd is not present
+ ansible.builtin.fail:
msg: "Systemd must be present"
when: ansible_facts['service_mgr'] != 'systemd'
---
-- name: include check_system.yml
- include_tasks: check_system.yml
+- name: Include check_system.yml
+ ansible.builtin.include_tasks: check_system.yml
-- name: validate repository variables in non-containerized scenario
- include_tasks: check_repository.yml
+- name: Validate repository variables in non-containerized scenario
+ ansible.builtin.include_tasks: check_repository.yml
when: not containerized_deployment | bool
-- name: validate osd_objectstore
- fail:
+- name: Validate osd_objectstore
+ ansible.builtin.fail:
msg: "osd_objectstore must be 'bluestore''"
when: osd_objectstore not in ['bluestore']
-- name: validate monitor network configuration
- fail:
+- name: Validate monitor network configuration
+ ansible.builtin.fail:
msg: "Either monitor_address, monitor_address_block or monitor_interface must be provided"
when:
- mon_group_name in group_names
- monitor_address_block == 'subnet'
- monitor_interface == 'interface'
-- name: validate radosgw network configuration
- fail:
+- name: Validate radosgw network configuration
+ ansible.builtin.fail:
msg: "Either radosgw_address, radosgw_address_block or radosgw_interface must be provided"
when:
- rgw_group_name in group_names
- radosgw_address_block == 'subnet'
- radosgw_interface == 'interface'
-- name: validate osd nodes
+- name: Validate osd nodes
when: osd_group_name in group_names
block:
- - name: validate lvm osd scenario
- fail:
+ - name: Validate lvm osd scenario
+ ansible.builtin.fail:
msg: 'devices or lvm_volumes must be defined for lvm osd scenario'
when:
- not osd_auto_discovery | default(false) | bool
- devices is undefined
- lvm_volumes is undefined
- - name: validate bluestore lvm osd scenario
- fail:
+ - name: Validate bluestore lvm osd scenario
+ ansible.builtin.fail:
msg: 'data key must be defined in lvm_volumes'
when:
- osd_objectstore == 'bluestore'
- item.data is undefined
with_items: '{{ lvm_volumes }}'
-- name: debian based systems tasks
+- name: Debian based systems tasks
when: ansible_facts['os_family'] == 'Debian'
block:
- - name: fail if local scenario is enabled on debian
- fail:
+ - name: Fail if local scenario is enabled on debian
+ ansible.builtin.fail:
msg: "'local' installation scenario not supported on Debian systems"
when: ceph_origin == 'local'
- - name: fail if rhcs repository is enabled on debian
- fail:
+ - name: Fail if rhcs repository is enabled on debian
+ ansible.builtin.fail:
msg: "RHCS isn't supported anymore on Debian distribution"
when:
- ceph_origin == 'repository'
when: ansible_facts['os_family'] == 'Suse'
block:
- name: Check ceph_origin definition on SUSE/openSUSE Leap
- fail:
+ ansible.builtin.fail:
msg: "Unsupported installation method origin:{{ ceph_origin }}"
when: ceph_origin not in ['distro', 'repository']
- name: Check ceph_repository definition on SUSE/openSUSE Leap
- fail:
+ ansible.builtin.fail:
msg: "Unsupported installation method origin:{{ ceph_origin }} repo:{{ ceph_repository }}'
only valid combination is ceph_origin == 'repository' and ceph_repository == 'obs'"
when:
- ceph_origin == 'repository'
- ceph_repository != 'obs'
-- name: validate ntp daemon type
- fail:
+- name: Validate ntp daemon type
+ ansible.builtin.fail:
msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
when:
- ntp_service_enabled | bool
- ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
# Since NTPd can not be installed on Atomic...
-- name: abort if ntp_daemon_type is ntpd on Atomic
- fail:
+- name: Abort if ntp_daemon_type is ntpd on Atomic
+ ansible.builtin.fail:
msg: installation can't happen on Atomic and ntpd needs to be installed
when:
- is_atomic | default(False) | bool
- ansible_facts['os_family'] == 'RedHat'
- ntp_daemon_type == 'ntpd'
-- name: include check_devices.yml
- include_tasks: check_devices.yml
+- name: Include check_devices.yml
+ ansible.builtin.include_tasks: check_devices.yml
when:
- osd_group_name in group_names
- not osd_auto_discovery | default(False) | bool
-- name: include check_eth_mon.yml
- include_tasks: check_eth_mon.yml
+- name: Include check_eth_mon.yml
+ ansible.builtin.include_tasks: check_eth_mon.yml
when:
- mon_group_name in group_names
- monitor_interface != "dummy"
- monitor_address == "x.x.x.x"
- monitor_address_block == "subnet"
-- name: include check_ipaddr_mon.yml
- include_tasks: check_ipaddr_mon.yml
+- name: Include check_ipaddr_mon.yml
+ ansible.builtin.include_tasks: check_ipaddr_mon.yml
when:
- mon_group_name in group_names
- monitor_interface == "interface"
- monitor_address == "x.x.x.x"
- monitor_address_block != "subnet"
-- name: include check_eth_rgw.yml
- include_tasks: check_eth_rgw.yml
+- name: Include check_eth_rgw.yml
+ ansible.builtin.include_tasks: check_eth_rgw.yml
when:
- rgw_group_name in group_names
- radosgw_interface != "dummy"
- radosgw_address == "x.x.x.x"
- radosgw_address_block == "subnet"
-- name: include check_rgw_pools.yml
- include_tasks: check_rgw_pools.yml
+- name: Include check_rgw_pools.yml
+ ansible.builtin.include_tasks: check_rgw_pools.yml
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- rgw_create_pools is defined
-- name: include check_iscsi.yml
- include_tasks: check_iscsi.yml
+- name: Include check_iscsi.yml
+ ansible.builtin.include_tasks: check_iscsi.yml
when: iscsi_gw_group_name in group_names
-- name: include check_nfs.yml
- include_tasks: check_nfs.yml
+- name: Include check_nfs.yml
+ ansible.builtin.include_tasks: check_nfs.yml
when: inventory_hostname in groups.get(nfs_group_name, [])
-- name: include check_rbdmirror.yml
- include_tasks: check_rbdmirror.yml
+- name: Include check_rbdmirror.yml
+ ansible.builtin.include_tasks: check_rbdmirror.yml
when:
- rbdmirror_group_name in group_names
- ceph_rbd_mirror_configure | default(false) | bool
-- block:
- - name: fail if monitoring group doesn't exist
- fail:
+- name: Monitoring related tasks
+ when: dashboard_enabled | bool
+ block:
+ - name: Fail if monitoring group doesn't exist
+ ansible.builtin.fail:
msg: "you must add a monitoring group and add at least one node."
when: groups[monitoring_group_name] is undefined
- - name: fail when monitoring doesn't contain at least one node.
- fail:
+ - name: Fail when monitoring doesn't contain at least one node.
+ ansible.builtin.fail:
msg: "you must add at least one node in the monitoring hosts group"
when: groups[monitoring_group_name] | length < 1
- - name: fail when dashboard_admin_password and/or grafana_admin_password are not set
- fail:
+ - name: Fail when dashboard_admin_password and/or grafana_admin_password are not set
+ ansible.builtin.fail:
msg: "you must set dashboard_admin_password and grafana_admin_password."
when:
- dashboard_admin_password is undefined
or grafana_admin_password is undefined
- when: dashboard_enabled | bool
-- name: validate container registry credentials
- fail:
+- name: Validate container registry credentials
+ ansible.builtin.fail:
msg: 'ceph_docker_registry_username and/or ceph_docker_registry_password variables need to be set'
when:
- ceph_docker_registry_auth | bool
- (ceph_docker_registry_username is not defined or ceph_docker_registry_password is not defined) or
(ceph_docker_registry_username | string | length == 0 or ceph_docker_registry_password | string | length == 0)
-- name: validate container service and container package
- fail:
+- name: Validate container service and container package
+ ansible.builtin.fail:
msg: 'both container_package_name and container_service_name should be defined'
when:
- (container_package_name is undefined and container_service_name is defined) or
(container_package_name is defined and container_service_name is undefined)
-- name: validate openstack_keys key format
- fail:
+- name: Validate openstack_keys key format
+ ansible.builtin.fail:
msg: '{{ item.name }} key format invalid'
with_items: '{{ openstack_keys }}'
when:
- item.key is defined
- item.key is not match("^[a-zA-Z0-9+/]{38}==$")
-- name: validate clients keys key format
- fail:
+- name: Validate clients keys key format
+ ansible.builtin.fail:
msg: '{{ item.name }} key format invalid'
with_items: '{{ keys }}'
when:
- item.key is defined
- item.key is not match("^[a-zA-Z0-9+/]{38}==$")
-- name: validate openstack_keys caps
- fail:
+- name: Validate openstack_keys caps
+ ansible.builtin.fail:
msg: '{{ item.name }} key has no caps defined'
with_items: '{{ openstack_keys }}'
when:
- openstack_keys | length > 0
- item.caps is not defined
-- name: validate clients keys caps
- fail:
+- name: Validate clients keys caps
+ ansible.builtin.fail:
msg: '{{ item.name }} key has no caps defined'
with_items: '{{ keys }}'
when:
- keys | length > 0
- item.caps is not defined
-- name: check virtual_ips is defined
- fail:
+- name: Check virtual_ips is defined
+ ansible.builtin.fail:
msg: "virtual_ips is not defined."
when:
- rgwloadbalancer_group_name in group_names
- groups[rgwloadbalancer_group_name] | length > 0
- virtual_ips is not defined
-- name: validate virtual_ips length
- fail:
+- name: Validate virtual_ips length
+ ansible.builtin.fail:
msg: "There are more virual_ips defined than rgwloadbalancer nodes"
when:
- rgwloadbalancer_group_name in group_names