---
-- include: pre_requisite.yml
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
-- include: start_agent.yml
+- name: include start_agent.yml
+ include: start_agent.yml
---
-- include: pre_requisite.yml
-- include: create_users_keys.yml
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
+
+- name: include create_users_keys.yml
+ include: create_users_keys.yml
when:
- user_config
- global_in_ceph_conf_overrides
- cephx
- copy_admin_key
-- name: check if global key exists in ceph_conf_overrides
+- name: set_fact global_in_ceph_conf_overrides
set_fact:
global_in_ceph_conf_overrides: "{{ 'global' in ceph_conf_overrides }}"
ignore_errors: true
always_run: true
changed_when: false
- when: ansible_os_family == 'Debian'
+ when:
+ - ansible_os_family == 'Debian'
- name: install ntp on debian
package:
ignore_errors: true
always_run: true
changed_when: false
- when: ansible_os_family == 'RedHat'
+ when:
+ - ansible_os_family == 'RedHat'
- name: install ntp on redhat
package:
- name: fail on unsupported system
fail:
msg: "System not supported {{ ansible_system }}"
- when: "'{{ ansible_system }}' not in ['Linux']"
+ when:
+ - ansible_system not in ['Linux']
- name: fail on unsupported architecture
fail:
msg: "Architecture not supported {{ ansible_architecture }}"
- when: "'{{ ansible_architecture }}' not in ['x86_64', 'ppc64le', 'armv7l']"
+ when:
+ - ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l']
- name: fail on unsupported distribution
fail:
msg: "Distribution not supported {{ ansible_os_family }}"
- when: "'{{ ansible_os_family }}' not in ['Debian', 'RedHat', 'ClearLinux']"
+ when:
+ - ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux']
- name: fail on unsupported distribution for red hat ceph storage
fail:
- name: fail if systemd is not present
fail:
msg: "Systemd must be present"
- when: ansible_service_mgr != 'systemd'
+ when:
+ - ansible_service_mgr != 'systemd'
- name: fail on unsupported distribution for iscsi gateways
fail:
insertafter: EOF
create: yes
line: "CLUSTER={{ cluster }}"
- when: ansible_os_family == "RedHat"
+ when:
+ - ansible_os_family == "RedHat"
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
path: /etc/default/ceph
register: etc_default_ceph
always_run: true
- when: ansible_os_family == "Debian"
+ when:
+ - ansible_os_family == "Debian"
- name: configure cluster name
lineinfile:
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
- when: rbd_client_directories
+ when:
+ - rbd_client_directories
- name: fail if initial mon keyring found doesn't work
fail:
msg: "Initial mon keyring found doesn't work."
- when: test_initial_monitor_keyring.rc != 0
+ when:
+ - test_initial_monitor_keyring.rc != 0
- name: write initial mon keyring in {{ fetch_directory }}/monitor_keyring.conf if it doesn't exist
local_action: shell echo {{ monitor_keyring.stdout }} | tee {{ fetch_directory }}/monitor_keyring.conf
- name: put initial mon keyring in mon kv store
command: ceph --cluster {{ cluster }} config-key put initial_mon_keyring {{ monitor_keyring.stdout }}
- when: test_initial_monitor_keyring.rc == 0
+ when:
+ - test_initial_monitor_keyring.rc == 0
---
-- name: configure debian community repository
+- name: include debian_community_repository.yml
include: debian_community_repository.yml
when:
- ceph_repository == 'community'
-- name: configure debian rhcs repository
+- name: include debian_rhcs_repository.yml
include: debian_rhcs_repository.yml
when:
- ceph_repository == 'rhcs'
-- name: configure debian dev repository
+- name: include debian_dev_repository.yml
include: debian_dev_repository.yml
when:
- ceph_repository == 'dev'
-- name: configure debian custom repository
+- name: include debian_custom_repository.yml
include: debian_custom_repository.yml
when:
- ceph_repository == 'custom'
-- name: configure debian cloud archive repository
+- name: include debian_uca_repository.yml
include: debian_uca_repository.yml
when:
- ceph_repository == 'uca'
---
-- name: configure redhat community repository
+- name: include redhat_community_repository.yml
include: redhat_community_repository.yml
when:
- ceph_repository == 'community'
-- name: configure redhat rhcs repository
+- name: include redhat_rhcs_repository.yml
include: redhat_rhcs_repository.yml
when:
- ceph_repository == 'rhcs'
-- name: configure redhat dev repository
+- name: include redhat_dev_repository.yml
include: redhat_dev_repository.yml
when:
- ceph_repository == 'dev'
-- name: configure redhat custom repository
+- name: include redhat_custom_repository.yml
include: redhat_custom_repository.yml
when:
- ceph_repository == 'custom'
---
-- include: prerequisite_rhcs_cdn_install_debian.yml
+- name: include prerequisite_rhcs_cdn_install_debian.yml
+ include: prerequisite_rhcs_cdn_install_debian.yml
when:
- ceph_repository_type == 'cdn'
apt:
name: ceph-mon
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: mon_group_name in group_names
+ when:
+ - mon_group_name in group_names
- name: install red hat storage ceph osd for debian
apt:
name: ceph-osd
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: osd_group_name in group_names
+ when:
+ - osd_group_name in group_names
- name: install ceph-test for debian
apt:
name: ceph-test
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: ceph_test
+ when:
+ - ceph_test
- name: install red hat storage radosgw for debian
apt:
name: radosgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: rgw_group_name in group_names
+ when:
+ - rgw_group_name in group_names
- name: install red hat storage ceph mds for debian
apt:
pkg: ceph-mds
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: mds_group_name in group_names
+ when:
+ - mds_group_name in group_names
- name: install red hat storage ceph-fuse client for debian
apt:
pkg: ceph-fuse
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: client_group_name in group_names
+ when:
+ - client_group_name in group_names
- name: install red hat storage ceph-common for debian
apt:
pkg: ceph-common
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: client_group_name in group_names
+ when:
+ - client_group_name in group_names
- name: install red hat storage nfs gateway for debian
apt:
name: nfs-ganesha
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: nfs_group_name in group_names
+ when:
+ - nfs_group_name in group_names
- name: install red hat storage nfs file gateway
apt:
name: nfs-ganesha-ceph
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when:
+ when:
- nfs_group_name in group_names
- nfs_file_gw
apt:
name: nfs-ganesha-rgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when:
+ when:
- nfs_group_name in group_names
- nfs_obj_gw
---
-- name: configure debian repository installation
+- name: include configure_debian_repository_installation.yml
include: configure_debian_repository_installation.yml
when:
- ceph_origin == 'repository'
apt:
update_cache: yes
-- name: install ceph debian packages
+- name: include install_debian_packages.yml
include: install_debian_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
-- name: install ceph rhcs debian packages
+- name: include install_debian_rhcs_packages.yml
include: install_debian_rhcs_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
---
-- name: configure redhat repository installation
+- name: include configure_redhat_repository_installation.yml
include: configure_redhat_repository_installation.yml
when:
- ceph_origin == 'repository'
-- name: configure redhat local installation
+- name: include configure_redhat_local_installation.yml
include: configure_redhat_local_installation.yml
when:
- ceph_origin == 'local'
-- name: install redhat packages
+- name: include install_redhat_packages.yml
include: install_redhat_packages.yml
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
name: "{{ item }}"
state: present
with_items: "{{ redhat_package_dependencies }}"
- when: ansible_distribution == "RedHat"
+ when:
+ - ansible_distribution == 'RedHat'
- name: install centos dependencies
package:
name: "{{ item }}"
state: present
with_items: "{{ centos_package_dependencies }}"
- when: ansible_distribution == "CentOS"
+ when:
+ - ansible_distribution == 'CentOS'
- name: install redhat ceph-common
package:
package:
name: ceph-test
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: ceph_test
+ when:
+ - ceph_test
- name: install redhat ceph-radosgw package
package:
name: ceph-radosgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- when: rgw_group_name in group_names
+ when:
+ - rgw_group_name in group_names
- name: install redhat nfs-ganesha-ceph package
package:
failed_when: false
register: rhcs_mon_repo
always_run: true
- when: mon_group_name in group_names
+ when:
+ - mon_group_name in group_names
- name: enable red hat storage monitor repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-mon-rpms
failed_when: false
register: rhcs_osd_repo
always_run: true
- when: osd_group_name in group_names
+ when:
+ - osd_group_name in group_names
- name: enable red hat storage osd repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-osd-rpms
failed_when: false
register: rhcs_rgw_mds_repo
always_run: true
- when: (rgw_group_name in group_names or mds_group_name in group_names)
+ when:
+ - (rgw_group_name in group_names or mds_group_name in group_names)
- name: enable red hat storage rados gateway / mds repository
command: subscription-manager repos --enable rhel-7-server-rhceph-{{ ceph_rhcs_version }}-tools-rpms
repo: "deb {{ ceph_rhcs_cdn_debian_repo }}/2-updates/MON {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
state: present
changed_when: false
- when: mon_group_name in group_names
+ when:
+ - mon_group_name in group_names
- name: enable red hat storage osd repository for debian systems
apt_repository:
repo: "deb {{ ceph_rhcs_cdn_debian_repo }}/2-updates/OSD {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
state: present
changed_when: false
- when: osd_group_name in group_names
+ when:
+ - osd_group_name in group_names
- name: enable red hat storage rados gateway / mds repository for debian systems
apt_repository:
repo: "deb {{ ceph_rhcs_cdn_debian_repo }}/2-updates/Tools {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
state: present
changed_when: false
- when: (rgw_group_name in group_names or mds_group_name in group_names)
+ when:
+ - (rgw_group_name in group_names or mds_group_name in group_names)
- name: enable red hat storage agent repository for debian systems
apt_repository:
path: "{{ ceph_rhcs_iso_path | dirname }}"
state: directory
recurse: yes
- when: "'{{ ceph_rhcs_iso_path | dirname }}' != '/'"
+ when:
+ - ceph_rhcs_iso_path | dirname != '/'
- name: fetch the red hat storage iso from the ansible server
copy:
---
-- include: prerequisite_rhcs_iso_install.yml
+- name: include prerequisite_rhcs_iso_install.yml
+ include: prerequisite_rhcs_iso_install.yml
when:
- ceph_repository_type == 'iso'
-- include: prerequisite_rhcs_cdn_install.yml
+- name: include prerequisite_rhcs_cdn_install.yml
+ include: prerequisite_rhcs_cdn_install.yml
when:
- ceph_repository_type == 'cdn'
---
-- include: ./checks/check_system.yml
-- include: ./checks/check_mandatory_vars.yml
+- name: include checks/check_system.yml
+ include: checks/check_system.yml
-- include: ./checks/check_firewall.yml
- when: check_firewall
+- name: include checks/check_mandatory_vars.yml
+ include: checks/check_mandatory_vars.yml
+
+- name: include checks/check_firewall.yml
+ include: checks/check_firewall.yml
+ when:
+ - check_firewall
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./misc/system_tuning.yml
- when: osd_group_name in group_names
+- name: include misc/system_tuning.yml
+ include: misc/system_tuning.yml
+ when:
+ - osd_group_name in group_names
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./installs/install_on_redhat.yml
+- name: include installs/install_on_redhat.yml
+ include: installs/install_on_redhat.yml
when:
- ansible_os_family == 'RedHat'
tags:
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./installs/install_on_debian.yml
+- name: include installs/install_on_debian.yml
+ include: installs/install_on_debian.yml
when:
- ansible_os_family == 'Debian'
tags:
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./installs/install_on_clear.yml
- when: ansible_os_family == 'ClearLinux'
+- name: include installs/install_on_clear.yml
+ include: installs/install_on_clear.yml
+ when:
+ - ansible_os_family == 'ClearLinux'
tags:
- package-install
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./misc/ntp_redhat.yml
+- name: include misc/ntp_redhat.yml
+ include: misc/ntp_redhat.yml
when:
- ansible_os_family == 'RedHat'
- ntp_service_enabled
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./misc/ntp_debian.yml
+- name: include misc/ntp_debian.yml
+ include: misc/ntp_debian.yml
when:
- ansible_os_family == 'Debian'
- ntp_service_enabled
always_run: yes
register: ceph_version
-- set_fact:
+- name: set_fact ceph_version
+ set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
# override ceph_stable_release for ceph_dev and rhcs installations since ceph_stable_release is not mandatory
-- include: ./release-rhcs.yml
+- name: include release-rhcs.yml
+ include: release-rhcs.yml
when:
- ceph_repository in ['rhcs', 'dev']
tags:
- always
-- include: facts_mon_fsid.yml
+- name: include facts_mon_fsid.yml
+ include: facts_mon_fsid.yml
run_once: true
when:
- cephx
- ceph_current_fsid.rc == 0
- mon_group_name in group_names
-- include: create_ceph_initial_dirs.yml
-- include: create_rbd_client_dir.yml
-- include: configure_cluster_name.yml
+- name: include create_ceph_initial_dirs.yml
+ include: create_ceph_initial_dirs.yml
+
+- name: include create_rbd_client_dir.yml
+ include: create_rbd_client_dir.yml
+
+- name: include configure_cluster_name.yml
+ include: configure_cluster_name.yml
---
-- include: ../checks/check_ntp_atomic.yml
- when: is_atomic
+- name: include ../checks/check_ntp_atomic.yml
+ include: ../checks/check_ntp_atomic.yml
+ when:
+ - is_atomic
- name: start the ntp service
service:
---
-- include: ../checks/check_ntp_debian.yml
- when: ansible_os_family == 'Debian'
+- name: include ../checks/check_ntp_debian.yml
+ include: ../checks/check_ntp_debian.yml
+ when:
+ - ansible_os_family == 'Debian'
- name: start the ntp service
service:
---
-- include: ../checks/check_ntp_redhat.yml
- when: ansible_os_family == 'RedHat'
+- name: include ../checks/check_ntp_redhat.yml
+ include: ../checks/check_ntp_redhat.yml
+ when:
+ - ansible_os_family == 'RedHat'
- name: start the ntp service
service:
command: updatedb -e /var/lib/ceph
changed_when: false
failed_when: false
+ when:
+ - osd_objectstore == 'filestore'
- name: create tmpfiles.d directory
file:
group: "root"
mode: "0755"
register: "tmpfiles_d"
- when: disable_transparent_hugepage
+ when:
+ - disable_transparent_hugepage
- name: disable transparent hugepage
template:
mode: "0644"
force: "yes"
validate: "systemd-tmpfiles --create %s"
- when: disable_transparent_hugepage
+ when:
+ - disable_transparent_hugepage
- name: get default vm.min_free_kbytes
command: sysctl -b vm.min_free_kbytes
always_run: yes
register: default_vm_min_free_kbytes
-- name: define vm.min_free_kbytes
+- name: set_fact vm_min_free_kbytes
set_fact:
vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.stdout }}"
---
-- set_fact:
+- name: set_fact ceph_release jewel
+ set_fact:
ceph_release: jewel
- when: ceph_version.split('.')[0] | version_compare('10', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('10', '==')
-- set_fact:
+- name: set_fact ceph_release kraken
+ set_fact:
ceph_release: kraken
- when: ceph_version.split('.')[0] | version_compare('11', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('11', '==')
-- set_fact:
+- name: set_fact ceph_release luminous
+ set_fact:
ceph_release: luminous
- when: ceph_version.split('.')[0] | version_compare('12', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('12', '==')
-- set_fact:
+- name: set_fact ceph_release mimic
+ set_fact:
ceph_release: mimic
- when: ceph_version.split('.')[0] | version_compare('13', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('13', '==')
- restart ceph mdss
- restart ceph rgws
- restart ceph nfss
- when: not containerized_deployment|bool
+ when:
+ - not containerized_deployment|bool
# ceph-docker-common
# only create fetch directory when:
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
register: cluster_uuid
become: false
- when: generate_fsid
+ when:
+ - generate_fsid
- name: read cluster uuid if it already exists
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
register: cluster_uuid
always_run: true
become: false
- when: generate_fsid
+ when:
+ - generate_fsid
- name: ensure /etc/ceph exists
file:
owner: 'ceph'
group: 'ceph'
mode: 0755
- when: groups.get(mon_group_name, []) | length == 0
+ when:
+ - groups.get(mon_group_name, []) | length == 0
- name: "generate {{ cluster }}.conf configuration file"
action: config_template
- name: set fsid fact when generate_fsid = true
set_fact:
fsid: "{{ cluster_uuid.stdout }}"
- when: generate_fsid
- when: containerized_deployment|bool
+ when:
+ - generate_fsid
+ when:
+ - containerized_deployment|bool
- name: update apt cache
apt:
update-cache: yes
- when: ansible_os_family == 'Debian'
+ when:
+ - ansible_os_family == 'Debian'
- block:
- name: copy mon restart script
---
-- set_fact:
+- name: set_fact monitor_name ansible_hostname
+ set_fact:
monitor_name: "{{ ansible_hostname }}"
- when: not mon_use_fqdn
+ when:
+ - not mon_use_fqdn
-- set_fact:
+- name: set_fact monitor_name ansible_fqdn
+ set_fact:
monitor_name: "{{ ansible_fqdn }}"
- when: mon_use_fqdn
+ when:
+ - mon_use_fqdn
# this task shouldn't run in a rolling_update situation
# because it blindly picks a mon, which may be down because
# set this as a default when performing a rolling_update
# so the rest of the tasks here will succeed
-- set_fact:
+- name: set_fact ceph_current_fsid rc 1
+ set_fact:
ceph_current_fsid:
rc: 1
when:
changed_when: false
become: false
run_once: true
- when: cephx or generate_fsid
+ when:
+ - (cephx or generate_fsid)
-- set_fact:
+- name: set_fact fsid ceph_current_fsid.stdout
+ set_fact:
fsid: "{{ ceph_current_fsid.stdout }}"
when:
- ceph_current_fsid.rc == 0
# Set ceph_release to ceph_stable by default
-- set_fact:
+- name: set_fact ceph_release ceph_stable_release
+ set_fact:
ceph_release: "{{ ceph_stable_release }}"
- name: generate cluster fsid
local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
become: false
- when: ceph_current_fsid.rc == 0
+ when:
+ - ceph_current_fsid.rc == 0
- name: read cluster fsid if it already exists
local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
register: cluster_uuid
become: false
always_run: true
- when: generate_fsid
+ when:
+ - generate_fsid
-- name: set fsid fact when generate_fsid = true
+- name: set_fact fsid
set_fact:
fsid: "{{ cluster_uuid.stdout }}"
- when: generate_fsid
+ when:
+ - generate_fsid
-- name: set docker_exec_cmd fact
+- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}"
- when: containerized_deployment
+ when:
+ - containerized_deployment
-- set_fact:
+- name: set_fact mds_name ansible_hostname
+ set_fact:
mds_name: "{{ ansible_hostname }}"
- when: not mds_use_fqdn
+ when:
+ - not mds_use_fqdn
-- set_fact:
+- name: set_fact mds_name ansible_fqdn
+ set_fact:
mds_name: "{{ ansible_fqdn }}"
- when: mds_use_fqdn
+ when:
+ - mds_use_fqdn
-- set_fact:
+- name: set_fact rbd_client_directory_owner ceph
+ set_fact:
rbd_client_directory_owner: ceph
when:
- rbd_client_directory_owner is not defined
or not rbd_client_directory_owner
-- set_fact:
+- name: set_fact rbd_client_directory_group rbd_client_directory_group
+ set_fact:
rbd_client_directory_group: ceph
when:
- rbd_client_directory_group is not defined
or not rbd_client_directory_group
-- set_fact:
+- name: set_fact rbd_client_directory_mode 0770
+ set_fact:
rbd_client_directory_mode: "0770"
when:
- rbd_client_directory_mode is not defined
---
-- include: facts.yml
-- include: check_socket.yml
+- name: include facts.yml
+ include: facts.yml
+
+- name: include check_socket.yml
+ include: check_socket.yml
---
-- include: stat_ceph_files.yml
+- name: include stat_ceph_files.yml
+ include: stat_ceph_files.yml
- name: fail if we find existing cluster files
fail:
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
ignore_errors: true
always_run: true
changed_when: false
- when: ansible_os_family == 'Debian'
+ when:
+ - ansible_os_family == 'Debian'
- name: install ntp on debian
package:
ignore_errors: true
always_run: true
changed_when: false
- when: ansible_os_family == 'RedHat'
+ when:
+ - ansible_os_family == 'RedHat'
- name: install ntp on redhat
package:
---
-- include: stat_ceph_files.yml
+- name: include stat_ceph_files.yml
+ include: stat_ceph_files.yml
- name: try to fetch ceph config and keys
copy:
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results | default([]) }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
- name: "pull {{ ceph_docker_image }} image"
command: "docker pull {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
- when: ceph_docker_dev_image is undefined or not ceph_docker_dev_image
+ when:
+ - (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
# Dev case - export local dev image and send it across
- name: export local ceph dev image
local_action: command docker save -o "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar" "{{ ceph_docker_username }}/{{ ceph_docker_imagename }}:{{ ceph_docker_image_tag }}"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+ when:
+ - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
run_once: true
- name: copy ceph dev image file
copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+ when:
+ - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: load ceph dev image
command: "docker load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+ when:
+ - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
- name: remove tmp ceph dev image file
command: "rm /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: ceph_docker_dev_image is defined and ceph_docker_dev_image
+ when:
+ - (ceph_docker_dev_image is defined and ceph_docker_dev_image)
---
-- include: system_checks.yml
+- name: include system_checks.yml
+ include: system_checks.yml
- name: check if it is atomic host
- stat: path=/run/ostree-booted
+ stat:
+ path: /run/ostree-booted
register: stat_ostree
always_run: true
-- name: set fact for using atomic host
+- name: set_fact is_atomic
set_fact:
is_atomic: '{{ stat_ostree.stat.exists }}'
-- include: ./pre_requisites/prerequisites.yml
- when: not is_atomic
+- name: include pre_requisites/prerequisites.yml
+ include: pre_requisites/prerequisites.yml
+ when:
+ - not is_atomic
# NOTE(guits): would be nice to refact this block with L39-45 in roles/ceph-common/tasks/facts.yml
-- set_fact:
+- name: set_fact monitor_name ansible_hostname
+ set_fact:
monitor_name: "{{ ansible_hostname }}"
- when: not mon_use_fqdn
+ when:
+ - not mon_use_fqdn
-- set_fact:
+- name: set_fact monitor_name ansible_fqdn
+ set_fact:
monitor_name: "{{ ansible_fqdn }}"
- when: mon_use_fqdn
+ when:
+ - mon_use_fqdn
- name: get docker version
command: docker --version
always_run: yes
register: docker_version
-- set_fact:
+- name: set_fact docker_version docker_version.stdout.split
+ set_fact:
docker_version: "{{ docker_version.stdout.split(' ')[2] }}"
- name: check if a cluster is already running
# a cluster is not already running,
# AND
# we are not playing rolling-update.yml playbook.
-- include: checks.yml
+- name: include checks.yml
+ include: checks.yml
when:
- (not containerized_deployment_with_kv and
((inventory_hostname in groups.get(mon_group_name, [])) or
- ceph_health.rc != 0
- not "{{ rolling_update | default(false) }}"
-- include: ./misc/ntp_atomic.yml
+- name: include misc/ntp_atomic.yml
+ include: misc/ntp_atomic.yml
when:
- is_atomic
- ansible_os_family == 'RedHat'
- ntp_service_enabled
-- include: ./misc/ntp_redhat.yml
+- name: include misc/ntp_redhat.yml
+ include: misc/ntp_redhat.yml
when:
- not is_atomic
- ansible_os_family == 'RedHat'
- ntp_service_enabled
-- include: ./misc/ntp_debian.yml
+- name: include misc/ntp_debian.yml
+ include: misc/ntp_debian.yml
when:
- ansible_os_family == 'Debian'
- ntp_service_enabled
-- include: fetch_image.yml
+- name: include fetch_image.yml
+ include: fetch_image.yml
- name: get ceph version
command: docker run --entrypoint /usr/bin/ceph {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --version
always_run: yes
register: ceph_version
-- set_fact:
+- name: set_fact ceph_version ceph_version.stdout.split
+ set_fact:
ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}"
-- include: release.yml
+- name: include release.yml
+ include: release.yml
# NOTE (jimcurtis): dirs_permissions.yml must precede fetch_configs.yml
# # because it creates the directories needed by the latter.
-- include: dirs_permissions.yml
+- name: include dirs_permissions.yml
+ include: dirs_permissions.yml
-- include: create_configs.yml
+- name: include create_configs.yml
+ include: create_configs.yml
-- include: selinux.yml
- when: ansible_os_family == 'RedHat'
+- name: include selinux.yml
+ include: selinux.yml
+ when:
+ - ansible_os_family == 'RedHat'
---
-- include: ../checks/check_ntp_atomic.yml
+- name: include ../checks/check_ntp_atomic.yml
+ include: ../checks/check_ntp_atomic.yml
when: is_atomic
- name: start the ntp service
---
-- include: ../checks/check_ntp_debian.yml
- when: ansible_os_family == 'Debian'
+- name: include ../checks/check_ntp_debian.yml
+ include: ../checks/check_ntp_debian.yml
+ when:
+ - ansible_os_family == 'Debian'
- name: start the ntp service
service:
---
-- include: ../checks/check_ntp_redhat.yml
- when: ansible_os_family == 'RedHat'
+- name: include ../checks/check_ntp_redhat.yml
+ include: ../checks/check_ntp_redhat.yml
+ when:
+ - ansible_os_family == 'RedHat'
- name: start the ntp service
service:
---
-- include: remove_ceph_udev_rules.yml
+- name: include remove_ceph_udev_rules.yml
+ include: remove_ceph_udev_rules.yml
-- include: debian_prerequisites.yml
- when: ansible_distribution == 'Debian'
+- name: include debian_prerequisites.yml
+ include: debian_prerequisites.yml
+ when:
+ - ansible_distribution == 'Debian'
tags:
with_pkg
name: docker.io
state: present
update_cache: yes
- when: ansible_distribution == 'Ubuntu'
+ when:
+ - ansible_distribution == 'Ubuntu'
tags:
with_pkg
name: python-six
state: present
update_cache: yes
- when: ansible_distribution != 'Debian'
+ when:
+ - ansible_distribution != 'Debian'
tags:
with_pkg
package:
name: python-docker-py
state: present
- when: ansible_os_family == 'RedHat'
+ when:
+ - ansible_os_family == 'RedHat'
tags:
with_pkg
package:
name: python-docker
state: present
- when: ansible_distribution == 'Ubuntu'
+ when:
+ - ansible_distribution == 'Ubuntu'
tags:
with_pkg
package:
name: docker
state: present
- when: ansible_os_family == 'RedHat'
+ when:
+ - ansible_os_family == 'RedHat'
tags:
with_pkg
- name: pause after docker install before starting (on openstack vms)
pause: seconds=5
- when: ceph_docker_on_openstack
+ when:
+ - ceph_docker_on_openstack
tags:
with_pkg
package:
name: ntp
state: present
- when: ntp_service_enabled
+ when:
+ - ntp_service_enabled
tags:
with_pkg
---
-- set_fact:
+- name: set_fact ceph_release jewel
+ set_fact:
ceph_release: jewel
- when: ceph_version.split('.')[0] | version_compare('10', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('10', '==')
-- set_fact:
+- name: set_fact ceph_release kraken
+ set_fact:
ceph_release: kraken
- when: ceph_version.split('.')[0] | version_compare('11', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('11', '==')
-- set_fact:
+- name: set_fact ceph_release luminous
+ set_fact:
ceph_release: luminous
- when: ceph_version.split('.')[0] | version_compare('12', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('12', '==')
-- set_fact:
+- name: set_fact ceph_release mimic
+ set_fact:
ceph_release: mimic
- when: ceph_version.split('.')[0] | version_compare('13', '==')
+ when:
+ - ceph_version.split('.')[0] | version_compare('13', '==')
---
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- "{{ ceph_conf_key_directory }}/{{ cluster }}.client.admin.keyring"
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-- name: register rbd bootstrap key
+- name: set_fact bootstrap_rbd_keyring
set_fact:
bootstrap_rbd_keyring:
- "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- name: merge rbd bootstrap key to config and keys paths
+- name: set_fact ceph_config_keys ceph_config_keys + bootstrap_rbd_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + bootstrap_rbd_keyring }}"
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- name: add mgr keys to config and keys paths
+- name: set_fact tmp_ceph_mgr_keys add mgr keys to config and keys paths
set_fact:
tmp_ceph_mgr_keys: "{{ ceph_conf_key_directory }}/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring"
with_items: "{{ groups.get(mgr_group_name, []) }}"
register: tmp_ceph_mgr_keys_result
when: groups.get(mgr_group_name, []) | length > 0
-- name: convert mgr keys to an array
+- name: set_fact ceph_mgr_keys convert mgr keys to an array
set_fact:
ceph_mgr_keys: "{{ tmp_ceph_mgr_keys_result.results | map(attribute='ansible_facts.tmp_ceph_mgr_keys') | list }}"
when: groups.get(mgr_group_name, []) | length > 0
-- name: merge mgr keys to config and keys paths
+- name: set_fact ceph_config_keys merge mgr keys to config and keys paths
set_fact:
ceph_config_keys: "{{ ceph_config_keys + ceph_mgr_keys }}"
when: groups.get(mgr_group_name, []) | length > 0
with_items:
- "{{ ceph_keys.stdout_lines }}"
-- name: register rbd bootstrap key
+- name: set_fact bootstrap_rbd_keyring
set_fact:
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
---
-- name: set crt path(s)
+- name: set_fact crt_files
set_fact:
crt_files:
- "/etc/ceph/iscsi-gateway.crt"
- "{{ crt_files_exist.results }}"
when: item.1.stat.exists == true
-- include: generate_crt.yml
+- name: include generate_crt.yml
+ include: generate_crt.yml
with_together:
- "{{ crt_files }}"
- "{{ crt_files_exist.results }}"
---
-- include: prerequisites.yml
+- name: include prerequisites.yml
+ include: prerequisites.yml
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
-- include: deploy_ssl_keys.yml
-- include: configure_iscsi.yml
+- name: include deploy_ssl_keys.yml
+ include: deploy_ssl_keys.yml
+
+- name: configure_iscsi.yml
+ include: configure_iscsi.yml
---
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.conf
---
-- name: set docker_exec_cmd fact
+- name: set_fact docker_exec_cmd
set_fact:
docker_exec_cmd: "docker exec ceph-mds-{{ ansible_hostname }}"
-- include: copy_configs.yml
-- include: start_docker_mds.yml
-- include: enable_multimds.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
+
+- name: include start_docker_mds.yml
+ include: start_docker_mds.yml
+
+- name: include enable_multimds.yml
+ include: enable_multimds.yml
---
-- include: pre_requisite.yml
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
when: not containerized_deployment
-- include: ./docker/main.yml
+- name: docker/main.yml
+ include: docker/main.yml
when: containerized_deployment
---
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.conf
group: "root"
mode: "0755"
-- include: copy_configs.yml
-- include: start_docker_mgr.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
+
+- name: include start_docker_mgr.yml
+ include: start_docker_mgr.yml
---
-- include: pre_requisite.yml
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
when: not containerized_deployment
-- include: ./docker/main.yml
+- name: include docker/main.yml
+ include: docker/main.yml
when: containerized_deployment
- ceph_release_num.{{ ceph_release }} > ceph_release_num.jewel
with_items: "{{ groups.get(mgr_group_name, []) }}"
-- include: set_osd_pool_default_pg_num.yml
+- name: include set_osd_pool_default_pg_num.yml
+ include: set_osd_pool_default_pg_num.yml
-- include: crush_rules.yml
+- name: crush_rules.yml
+ include: crush_rules.yml
when:
- crush_rule_config
failed_when: false
register: rbd_pool_exist
-- include: rbd_pool.yml
+- name: include rbd_pool.yml
+ include: rbd_pool.yml
when: rbd_pool_exist.rc == 0
-- include: rbd_pool_pgs.yml
+- name: include rbd_pool_pgs.yml
+ include: rbd_pool_pgs.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
-- include: rbd_pool_size.yml
+- name: include rbd_pool_size.yml
+ include: rbd_pool_size.yml
when:
- rbd_pool_exist.rc == 0
- global_in_ceph_conf_overrides
- ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- rbd_pool_exist.rc != 0
-- include: openstack_config.yml
+- name: include openstack_config.yml
+ include: openstack_config.yml
when:
- openstack_config
- - "{{ inventory_hostname == groups[mon_group_name] | last }}"
+ - inventory_hostname == groups[mon_group_name] | last
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
register: ceph_keys
always_run: true
- when: cephx
+ when:
+ - cephx
- name: set keys permissions
file:
mode: "0600"
with_items:
- "{{ ceph_keys.get('stdout_lines') | default([]) }}"
- when: cephx
+ when:
+ - cephx
-- name: register rbd bootstrap key
+- name: set_fact bootstrap_rbd_keyring
set_fact:
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ when:
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- name: copy keys to the ansible server
fetch:
- "{{ bootstrap_rbd_keyring | default([]) }}"
when:
- cephx
- - "{{ inventory_hostname == groups[mon_group_name] | last }}"
+ - inventory_hostname == groups[mon_group_name] | last
- name: drop in a motd script to report status when logging in
copy:
owner: root
group: root
mode: 0755
- when: ansible_distribution_release == 'precise'
+ when:
+ - ansible_distribution_release == 'precise'
with_items: "{{ crush_rules }}"
when: item.default
-- name: set crush rule info as fact
+- name: set_fact info_ceph_default_crush_rule_yaml
set_fact:
info_ceph_default_crush_rule_yaml: "{{ info_ceph_default_crush_rule.results[0].stdout|from_json() }}"
when: info_ceph_default_crush_rule.results|length > 0
mode: "0755"
recurse: true
-- set_fact:
+- name: set_fact ceph_authtool_cap >= ceph_release_num.luminous
+ set_fact:
ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow' --cap mgr 'allow *'"
when:
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- cephx
- admin_secret != 'admin_secret'
-- set_fact:
+- name: set_fact ceph_authtool_cap < ceph_release_num.luminous
+ set_fact:
ceph_authtool_cap: "--cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'"
when:
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
---
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.conf
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
-- name: add mgr keys to config and keys paths
+- name: set_fact tmp_ceph_mgr_keys add mgr keys to config and keys paths
set_fact:
tmp_ceph_mgr_keys: /etc/ceph/{{ cluster }}.mgr.{{ hostvars[item]['ansible_hostname'] }}.keyring
with_items: "{{ groups.get(mgr_group_name, []) }}"
register: tmp_ceph_mgr_keys_result
- when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+ when:
+ - groups.get(mgr_group_name, []) | length > 0
-- name: convert mgr keys to an array
+- name: set_fact ceph_mgr_keys convert mgr keys to an array
set_fact:
ceph_mgr_keys: "{{ tmp_ceph_mgr_keys_result.results | map(attribute='ansible_facts.tmp_ceph_mgr_keys') | list }}"
- when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+ when:
+ - groups.get(mgr_group_name, []) | length > 0
-- name: merge mgr keys to config and keys paths
+- name: set_fact ceph_config_keys merge mgr keys to config and keys paths
set_fact:
ceph_config_keys: "{{ ceph_config_keys + ceph_mgr_keys }}"
- when: "{{ groups.get(mgr_group_name, []) | length > 0 }}"
+ when:
+ - groups.get(mgr_group_name, []) | length > 0
- name: stat for ceph config and keys
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
- name: set selinux permissions
shell: |
- "{{ ceph_conf_key_directory }}"
- /var/lib/ceph
changed_when: false
- when: sestatus.stdout != 'Disabled'
+ when:
+ - sestatus.stdout != 'Disabled'
---
-- include: copy_configs.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
when: not containerized_deployment_with_kv
-- include: start_docker_monitor.yml
+- name: include start_docker_monitor.yml
+ include: start_docker_monitor.yml
- name: wait for monitor socket to exist
command: docker exec ceph-mon-{{ ansible_hostname }} stat /var/run/ceph/{{ cluster }}-mon.{{ monitor_name }}.asok
- hostvars[groups[mon_group_name][0]]['monitor_address_block'] is defined
- hostvars[groups[mon_group_name][0]]['monitor_address_block'] | length > 0
-- include: ./fetch_configs.yml
+- name: include fetch_configs.yml
+ include: fetch_configs.yml
when: not containerized_deployment_with_kv
- name: create ceph rest api keyring when mon is containerized
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
- not containerized_deployment_with_kv
-- include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
+- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
+ include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/set_osd_pool_default_pg_num.yml"
# create openstack pools only when all mons are up.
-- include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/openstack_config.yml"
+- name: include ceph-mon/tasks/set_osd_pool_default_pg_num.yml
+ include: "{{ lookup('env', 'ANSIBLE_ROLES_PATH') | default (playbook_dir + '/roles', true) }}/ceph-mon/tasks/openstack_config.yml"
when:
- openstack_config
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
---
-- include: check_mandatory_vars.yml
+- name: include check_mandatory_vars.yml
+ include: check_mandatory_vars.yml
-- include: deploy_monitors.yml
+- name: include deploy_monitors.yml
+ include: deploy_monitors.yml
when: not containerized_deployment
-- include: start_monitor.yml
+- name: include start_monitor.yml
+ include: start_monitor.yml
when: not containerized_deployment
-- include: ceph_keys.yml
+- name: include ceph_keys.yml
+ include: ceph_keys.yml
when: not containerized_deployment
# this avoids the bug mentioned here: https://github.com/ansible/ansible/issues/18206
static: no
-- include: create_mds_filesystems.yml
+- name: include create_mds_filesystems.yml
+ include: create_mds_filesystems.yml
when:
- not containerized_deployment
- groups[mds_group_name] is defined
- "{{ groups[mds_group_name]|length > 0 }}"
- "{{ inventory_hostname == groups[mon_group_name] | last }}"
-- include: secure_cluster.yml
+- name: include secure_cluster.yml
+ include: secure_cluster.yml
when:
- secure_cluster
- not containerized_deployment
-- include: ./docker/main.yml
+- name: include docker/main.yml
+ include: docker/main.yml
when: containerized_deployment
-- include: calamari.yml
+- name: include calamari.yml
+ include: calamari.yml
when: calamari
register: default_pool_default_pg_num
when: pool_default_pg_num is not defined or not global_in_ceph_conf_overrides
-- set_fact:
+- name: set_fact osd_pool_default_pg_num pool_default_pg_num
+ set_fact:
osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
when: pool_default_pg_num is defined
-- set_fact:
+- name: set_fact osd_pool_default_pg_num default_pool_default_pg_num.stdout
+ set_fact:
osd_pool_default_pg_num: "{{ default_pool_default_pg_num.stdout }}"
when:
- pool_default_pg_num is not defined
- default_pool_default_pg_num.rc == 0
- (osd_pool_default_pg_num_in_overrides is not defined or not osd_pool_default_pg_num_in_overrides)
-- set_fact:
+- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
+ set_fact:
osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
when:
- global_in_ceph_conf_overrides
---
-# Copy Ceph configs to host
-- include: copy_configs.yml
-# Copy Ganesha Ceph configs to host
-- include: copy_ganesha_configs.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
+
+- name: copy_ganesha_configs.yml
+ include: copy_ganesha_configs.yml
when: not containerized_deployment_with_kv
-- include: start_docker_nfs.yml
+- name: start_docker_nfs.yml
+ include: start_docker_nfs.yml
---
-- include: pre_requisite.yml
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
when: not containerized_deployment
-- include: ./docker/main.yml
+- name: include docker/main.yml
+ include: docker/main.yml
when: containerized_deployment
owner: "ceph"
group: "ceph"
mode: "0600"
- when: cephx
+ when:
+ - cephx
- name: create rgw nfs user
command: radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name="RGW NFS User"
register: rgwuser
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: nfs_obj_gw
+ when:
+ - nfs_obj_gw
-- name: set access key
+- name: set_fact ceph_nfs_rgw_access_key
set_fact:
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- nfs_obj_gw
- ceph_nfs_rgw_access_key is not defined
-- name: set secret key
+- name: set_fact ceph_nfs_rgw_secret_key
set_fact:
ceph_nfs_rgw_secret_key: "{{(rgwuser.stdout | from_json)['keys'][0]['secret_key']}}"
delegate_to: "{{ groups[mon_group_name][0] }}"
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
-- name: combine ispartition results
+- name: set_fact combined_activate_osd_disk_results
set_fact:
combined_activate_osd_disk_results: "{{ activate_osd_disk if osd_scenario != 'collocated' else activate_osd_disk_dmcrypt }}"
---
-- set_fact:
+- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact ceph_disk_cli_options 'ceph_disk_cli_options'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }}'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --bluestore --dmcrypt'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
when:
- osd_objectstore == 'bluestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --filestore --dmcrypt'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact ceph_disk_cli_options '--cluster {{ cluster }} --dmcrypt'
+ set_fact:
ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
when:
- osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- not containerized_deployment
-- set_fact:
+- name: set_fact docker_env_args '-e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}'
+ set_fact:
docker_env_args: -e KV_TYPE={{ kv_type }} -e KV_IP={{ kv_endpoint }} -e KV_PORT={{ kv_port }}
when:
- containerized_deployment_with_kv
-- set_fact:
+- name: set_fact docker_env_args '-e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0'
+ set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- not dmcrypt
-- set_fact:
+- name: set_fact docker_env_args '-e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1'
+ set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
when:
- containerized_deployment
- osd_objectstore == 'filestore'
- dmcrypt
-- set_fact:
+- name: set_fact docker_env_args '-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0'
+ set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
when:
- containerized_deployment
- osd_objectstore == 'bluestore'
- not dmcrypt
-- set_fact:
+- name: set_fact docker_env_args '-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1'
+ set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
when:
- containerized_deployment
# allow 2-digit partition numbers so fast SSDs can be shared by > 9 disks
# for SSD journals.
-- include: ./check_devices_static.yml
+- name: include check_devices_static.yml
+ include: check_devices_static.yml
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: make sure public_network configured
fail:
msg: "public_network must be configured. Ceph public network"
- when: public_network == '0.0.0.0/0'
+ when:
+ - public_network == '0.0.0.0/0'
- name: make sure cluster_network configured
fail:
- "{{ ceph_conf_key_directory }}"
- /var/lib/ceph
changed_when: false
- when: sestatus.stdout != 'Disabled'
+ when:
+ - sestatus.stdout != 'Disabled'
---
-- include: start_docker_osd.yml
+- name: include osd_fragment.yml
+ include: osd_fragment.yml
+ when:
+ - crush_location
+
+- name: include start_docker_osd.yml
+ include: start_docker_osd.yml
src: /dev/vdb
fstype: ext3
state: unmounted
- when: ceph_docker_on_openstack
+ when:
+ - ceph_docker_on_openstack
- name: generate ceph osd docker run script
become: true
---
-- include: check_mandatory_vars.yml
+- name: include check_mandatory_vars.yml
+ include: check_mandatory_vars.yml
-- include: pre_requisite.yml
- when: not containerized_deployment
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
+ when:
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ceph_disk_cli_options_facts.yml
+- name: include ceph_disk_cli_options_facts.yml
+ include: ceph_disk_cli_options_facts.yml
-- name: generate device list when osd_auto_discovery
+- name: set_fact devices generate device list when osd_auto_discovery
set_fact:
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
with_dict: "{{ ansible_devices }}"
- item.value.holders|count == 0
- osd_auto_discovery
-- include: check_devices.yml
+- name: include check_devices.yml
+ include: check_devices.yml
-- include: copy_configs.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
when:
- containerized_deployment
- not containerized_deployment_with_kv
-- include: ./scenarios/collocated.yml
+- name: include scenarios/collocated.yml
+ include: scenarios/collocated.yml
when:
- osd_scenario == 'collocated'
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./scenarios/non-collocated.yml
+- name: include scenarios/non-collocated.yml
+ include: scenarios/non-collocated.yml
when:
- osd_scenario == 'non-collocated'
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- name: create lvm osds with ceph-volume
- include: ./scenarios/lvm.yml
+- name: include scenarios/lvm.yml
+ include: scenarios/lvm.yml
when:
- osd_scenario == 'lvm'
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./activate_osds.yml
+- name: include activate_osds.yml
+ include: activate_osds.yml
when:
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- name: ensure osd daemons are started
+- name: include start_osds.yml
include: start_osds.yml
when:
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./docker/main.yml
- when: containerized_deployment
+- name: include docker/main.yml
+ include: docker/main.yml
+ when:
+ - containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
# NOTE (leseb): we must do this because of
# https://github.com/ansible/ansible/issues/4297
-- name: combine osd_path results
+- name: set_fact combined_osd_id
set_fact:
combined_osd_id: "{{ osd_id_non_dir_scenario }}"
section: extras
option: enabled
value: 1
- when: ansible_distribution == 'CentOS'
+ when:
+ - ansible_distribution == 'CentOS'
- name: install rependencies
package:
name: parted
state: present
- when: ansible_os_family != 'ClearLinux'
+ when:
+ - ansible_os_family != 'ClearLinux'
- name: create bootstrap-osd and osd directories
file:
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/
-- name: copy ceph admin key when using dmcrypt
+- name: set_fact copy_admin_key
set_fact:
copy_admin_key: true
when:
---
-- include: osd_fragment.yml
- when: crush_location
-
- name: get osd id
shell: |
ls /var/lib/ceph/osd/ | sed 's/.*-//'
---
-- name: register rbd bootstrap key
+- name: set_fact bootstrap_rbd_keyring
set_fact:
bootstrap_rbd_keyring: "/var/lib/ceph/bootstrap-rbd/{{ cluster }}.keyring"
- when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ when:
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.conf
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
- name: set selinux permissions
shell: |
- "{{ ceph_conf_key_directory }}"
- /var/lib/ceph
changed_when: false
- when: sestatus.stdout != 'Disabled'
+ when:
+ - sestatus.stdout != 'Disabled'
---
-- include: copy_configs.yml
-- include: start_docker_rbd_mirror.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
+
+- name: include start_docker_rbd_mirror.yml
+ include: start_docker_rbd_mirror.yml
---
-- include: pre_requisite.yml
- when: not containerized_deployment
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
+ when:
+ - not containerized_deployment
-- include: start_rbd_mirror.yml
- when: not containerized_deployment
+- name: include start_rbd_mirror.yml
+ include: start_rbd_mirror.yml
+ when:
+ - not containerized_deployment
-- include: configure_mirroring.yml
+- name: include configure_mirroring.yml
+ include: configure_mirroring.yml
when:
- ceph_rbd_mirror_configure
- not containerized_deployment
-- include: ./docker/main.yml
- when: containerized_deployment
+- name: include docker/main.yml
+ include: docker/main.yml
+ when:
+ - containerized_deployment
state: started
enabled: yes
changed_when: false
- when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+ when:
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: stop and remove the generic rbd-mirror service instance
service:
state: stopped
enabled: no
changed_when: false
- when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ when:
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- name: start and add the rbd-mirror service instance
service:
state: started
enabled: yes
changed_when: false
- when: ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+ when:
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
---
-- name: set config and keys paths
+- name: set_fact ceph_config_keys
set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.conf
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
- name: set selinux permissions
shell: |
- "{{ ceph_conf_key_directory }}"
- /var/lib/ceph
changed_when: false
- when: sestatus.stdout != 'Disabled'
+ when:
+ - sestatus.stdout != 'Disabled'
---
-- include: copy_configs.yml
-- include: start_docker_restapi.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
+
+- name: include start_docker_restapi.yml
+ include: start_docker_restapi.yml
---
-- include: pre_requisite.yml
- when: not containerized_deployment
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
+ when:
+ - not containerized_deployment
-- include: start_restapi.yml
- when: not containerized_deployment
+- name: include start_restapi.yml
+ include: start_restapi.yml
+ when:
+ - not containerized_deployment
-- include: ./docker/main.yml
- when: containerized_deployment
+- name: include docker/main.yml
+ include: docker/main.yml
+ when:
+ - containerized_deployment
owner: "ceph"
group: "ceph"
mode: "0600"
- when: cephx
+ when:
+ - cephx
- name: activate ceph rest api with upstart
file:
- done
- upstart
changed_when: false
- when: ansible_distribution == "Ubuntu"
+ when:
+ - ansible_distribution == 'Ubuntu'
- name: activate ceph rest api with sysvinit
file:
with_items:
- done
- sysvinit
- when: ansible_distribution != "Ubuntu"
+ when:
+ - ansible_distribution != 'Ubuntu'
# NOTE (leseb): will uncomment this when this https://github.com/ceph/ceph/pull/4144 lands
#- name: start and add that the Ceph REST API service to the init sequence (Ubuntu)
- name: start ceph rest api
shell: "nohup ceph-rest-api --conf /etc/ceph/{{ cluster }}.conf &"
changed_when: false
- when: restapi_status.rc != 0
+ when:
+ - restapi_status.rc != 0
with_together:
- "{{ ceph_config_keys }}"
- "{{ statconfig.results }}"
- when: item.1.stat.exists == true
+ when:
+ - item.1.stat.exists == true
- name: set selinux permissions
shell: |
- "{{ ceph_conf_key_directory }}"
- /var/lib/ceph
changed_when: false
- when: sestatus.stdout != 'Disabled'
+ when:
+ - sestatus.stdout != 'Disabled'
---
-- name: set config and keys paths
+- name: set_fact rgw_config_keys
set_fact:
rgw_config_keys:
- "/var/lib/ceph/radosgw/{{ ansible_hostname }}/keyring"
- when: nfs_obj_gw
+ when:
+ - nfs_obj_gw
- name: wait for rgw keyring
wait_for:
---
-- include: copy_configs.yml
-- include: start_docker_rgw.yml
+- name: include copy_configs.yml
+ include: copy_configs.yml
-- include: copy_rgw_configs.yml
+- name: include start_docker_rgw.yml
+ include: start_docker_rgw.yml
+
+- name: include copy_rgw_configs.yml
+ include: copy_rgw_configs.yml
---
-- include: pre_requisite.yml
- when: not containerized_deployment
+- name: include pre_requisite.yml
+ include: pre_requisite.yml
+ when:
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: openstack-keystone.yml
+- name: include openstack-keystone.yml
+ include: openstack-keystone.yml
when:
- radosgw_keystone|bool
- radosgw_keystone_ssl|bool
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: start_radosgw.yml
- when: not containerized_deployment
+- name: include start_radosgw.yml
+ include: start_radosgw.yml
+ when:
+ - not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: rgw_pool_pgs.yml
- when: create_pools is defined
+- name: include rgw_pool_pgs.yml
+ include: rgw_pool_pgs.yml
+ when:
+ - create_pools is defined
static: False
-- name: include rgw multisite playbooks
+- name: include multisite/main.yml
include: multisite/main.yml
when:
- rgw_zone is defined
- rgw_multisite
- - ( ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel )
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.jewel
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./docker/main.yml
- when: containerized_deployment
+- name: include docker/main.yml
+ include: docker/main.yml
+ when:
+ - containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
dest: "/etc/ceph/{{ cluster }}.conf"
regexp: "rgw_zone = {{ rgw_zonegroup }}-{{ rgw_zone }}"
state: absent
- when: "rgw_zone is defined and rgw_zonegroup is defined"
+ when:
+ - rgw_zone is defined
+ - rgw_zonegroup is defined
notify:
- restart rgw
# Include the tasks depending on the zone type
- name: include master multisite tasks
include: master.yml
- when: "rgw_zonemaster is defined and rgw_zonemaster"
+ when:
+ - rgw_zonemaster is defined
+ - rgw_zonemaster
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: include secondary multisite tasks
include: secondary.yml
- when: "rgw_zonesecondary is defined and rgw_zonesecondary"
+ when:
+ - rgw_zonesecondary is defined
+ - rgw_zonesecondary
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
- name: create the realm
command: radosgw-admin realm create --rgw-realm={{ rgw_realm }} --default
run_once: true
- when: ("No such file or directory" in realmcheck.stderr) and rgw_zonemaster
+ when:
+ - 'No such file or directory' in realmcheck.stderr
+ - rgw_zonemaster
notify:
- update period
-- name: create the zonegroup
+- name: create the zonegroup
command: radosgw-admin zonegroup create --rgw-zonegroup={{ rgw_zonegroup }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --master --default
run_once: true
- when: ("No such file or directory" in zonegroupcheck.stderr) and rgw_zonemaster
+ when:
+ - 'No such file or directory' in zonegroupcheck.stderr
+ - rgw_zonemaster
notify:
- update period
- name: create the zone
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_fqdn }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default --master
run_once: true
- when: ("No such file or directory" in zonecheck.stderr) and rgw_zonemaster
+ when:
+ - 'No such file or directory' in zonecheck.stderr
+ - rgw_zonemaster
notify:
- update period
- name: create the zone user
command: radosgw-admin user create --uid=zone.user --display-name="Zone User" --access-key={{ system_access_key }} --secret={{ system_secret_key }} --system
run_once: true
- when: "'could not fetch user info: no user info saved' in usercheck.stderr"
+ when:
+ - 'could not fetch user info: no user info saved' in usercheck.stderr
notify:
- update period
- name: fetch the realm
command: radosgw-admin realm pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}
run_once: true
- when: ("No such file or directory" in realmcheck.stderr)
+ when:
+ - 'No such file or directory' in realmcheck.stderr
notify:
- update period
- name: fetch the period
command: radosgw-admin period pull --url={{ rgw_pull_proto }}://{{ rgw_pullhost }}:{{ rgw_pull_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }}
run_once: true
- when: ("No such file or directory" in realmcheck.stderr)
+ when:
+ - 'No such file or directory' in realmcheck.stderr
notify:
- update period
- name: create the zone
command: radosgw-admin zone create --rgw-zonegroup={{ rgw_zonegroup }} --rgw-zone={{ rgw_zone }} --endpoints=http://{{ ansible_hostname }}:{{ radosgw_civetweb_port }} --access-key={{ system_access_key }} --secret={{ system_secret_key }} --default
run_once: true
- when: ("No such file or directory" in zonecheck.stderr)
+ when:
+ - 'No such file or directory' in zonecheck.stderr
notify:
- update period
package:
name: nss-tools
state: present
- when: ansible_pkg_mgr == "yum" or ansible_pkg_mgr == "dnf"
+ when:
+ - ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
- name: install libnss3-tools on debian
package:
name: libnss3-tools
state: present
- when: ansible_pkg_mgr == 'apt'
+ when:
+ - ansible_pkg_mgr == 'apt'
- name: create nss directory for keystone certificates
file:
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}
changed_when: false
failed_when: false
- when: create_pools is defined
with_dict: "{{ create_pools }}"
+ when:
+ - create_pools is defined