- wait for server to boot
- remove data
when:
- - reboot_osd_node
+ - reboot_osd_node | bool
- remove_osd_mountpoints.failed is defined
- name: wipe table on dm-crypt devices
state: absent
when:
- ansible_pkg_mgr == 'yum'
- - purge_all_packages == true
+ - purge_all_packages | bool
- name: purge remaining ceph packages with dnf
dnf:
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- - purge_all_packages == true
+ - purge_all_packages | bool
- name: purge remaining ceph packages with apt
apt:
state: absent
when:
- ansible_pkg_mgr == 'apt'
- - purge_all_packages == true
+ - purge_all_packages | bool
- name: remove config
file:
file:
path: /etc/profile.d/ceph-aliases.sh
state: absent
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: set mon_host_count
set_fact:
delegate_to: "{{ item }}"
with_items: "{{ groups[mon_group_name] }}"
when:
- - cephx
+ - cephx | bool
- inventory_hostname == groups[mon_group_name][0]
- name: create potentially missing keys (rbd and rbd-mirror)
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
when:
- - cephx
+ - cephx | bool
- inventory_hostname == groups[mon_group_name][0]
# NOTE: we mask the service so the RPM can't restart it
enabled: no
masked: yes
ignore_errors: True
- when: not containerized_deployment
+ when: not containerized_deployment | bool
# NOTE: we mask the service so the RPM can't restart it
# after the package gets upgraded
enabled: no
masked: yes
ignore_errors: True
- when: not containerized_deployment
+ when: not containerized_deployment | bool
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
delegate_to: "{{ mon_host }}"
when:
- inventory_hostname == groups[mon_group_name][0]
- - not containerized_deployment
+ - not containerized_deployment | bool
- name: set containerized osd flags
command: >
delegate_to: "{{ mon_host }}"
when:
- inventory_hostname == groups[mon_group_name][0]
- - containerized_deployment
+ - containerized_deployment | bool
- import_role:
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
name: ceph-mon@{{ monitor_name }}
state: started
enabled: yes
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: start ceph mgr
systemd:
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: restart containerized ceph mon
systemd:
state: restarted
enabled: yes
daemon_reload: yes
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: non container | waiting for the monitor to join the quorum...
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: container | waiting for the containerized monitor to join the quorum...
command: >
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: upgrade ceph mgr nodes when implicitly collocated on monitors
vars:
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
register: osd_ids
changed_when: false
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: get osd unit names - container
shell: systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([a-z0-9]+).service"
register: osd_names
changed_when: false
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: set num_osds for container
set_fact:
num_osds: "{{ osd_names.stdout_lines|default([])|length }}"
- when:
- - containerized_deployment
+ when: containerized_deployment | bool
- name: stop ceph osd
systemd:
enabled: no
masked: yes
with_items: "{{ osd_ids.stdout_lines }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: set num_osds for non container
set_fact:
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
- when:
- - not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-defaults
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
register: osd_ids
changed_when: false
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: start ceph osd
systemd:
enabled: yes
masked: no
with_items: "{{ osd_ids.stdout_lines }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: restart containerized ceph osd
systemd:
masked: no
daemon_reload: yes
with_items: "{{ osd_names.stdout_lines }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: scan ceph-disk osds with ceph-volume if deploying nautilus
command: "ceph-volume --cluster={{ cluster }} simple scan"
CEPH_VOLUME_DEBUG: 1
when:
- ceph_release in ["nautilus", "octopus"]
- - not containerized_deployment
+ - not containerized_deployment | bool
- name: activate scanned ceph-disk osds and migrate to ceph-volume if deploying nautilus
command: "ceph-volume --cluster={{ cluster }} simple activate --all"
CEPH_VOLUME_DEBUG: 1
when:
- ceph_release in ["nautilus", "octopus"]
- - not containerized_deployment
+ - not containerized_deployment | bool
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: get osd versions
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} versions"
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: unset osd flags
command: "{{ container_exec_cmd_update_osd|default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
state: stopped
enabled: no
masked: yes
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-defaults
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
state: started
enabled: yes
masked: no
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: restart ceph mds
systemd:
enabled: yes
masked: no
daemon_reload: yes
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: upgrade ceph rgws cluster
enabled: no
masked: yes
with_items: "{{ rgw_instances }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
masked: no
daemon_reload: yes
with_items: "{{ rgw_instances }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: upgrade ceph rbd mirror node
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
state: started
enabled: yes
masked: no
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: restart containerized ceph rbd mirror
systemd:
enabled: yes
masked: no
daemon_reload: yes
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: upgrade ceph nfs node
enabled: no
masked: yes
failed_when: false
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-defaults
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
enabled: yes
masked: no
when:
- - not containerized_deployment
- - ceph_nfs_enable_service
+ - not containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
- name: systemd restart nfs container
systemd:
masked: no
daemon_reload: yes
when:
- - ceph_nfs_enable_service
- - containerized_deployment
+ - ceph_nfs_enable_service | bool
+ - containerized_deployment | bool
- name: upgrade ceph iscsi gateway node
- rbd-target-api
- rbd-target-gw
- tcmu-runner
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-defaults
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
- tcmu-runner
- rbd-target-api
- rbd-target-gw
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: upgrade ceph client node
name: ceph-handler
- import_role:
name: ceph-common
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-container-common
- when: containerized_deployment
+ when: containerized_deployment | bool
- import_role:
name: ceph-config
- import_role:
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph osd require-osd-release nautilus"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: non container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality
command: "ceph --cluster {{ cluster }} osd require-osd-release nautilus"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: container | enable msgr2 protocol
command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph mon enable-msgr2"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: non container | enable msgr2 protocol
command: "ceph --cluster {{ cluster }} mon enable-msgr2"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- import_role:
name: ceph-handler
- name: set_fact container_exec_cmd_status
set_fact:
container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: show ceph status
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
with_items:
osds_dir_stat.results
when:
- - osds_dir_stat is defined
+ - osds_dir_stat is defined | bool
- item.stat.exists == false
- name: install sgdisk(gdisk)
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
with_items: "{{ osd_hosts }}"
delegate_to: "{{ item }}"
failed_when: false
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: fail when admin key is not present
fail:
msg: "The Ceph admin key is not present on the OSD node, please add it and remove it after the playbook is done."
with_items: "{{ ceph_admin_key.results }}"
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- item.stat.exists == false
# NOTE(leseb): using '>' is the only way I could have the command working
- "{{ osd_to_replace.split(',') }}"
register: osd_to_replace_disks
delegate_to: "{{ item.0 }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: zapping osd(s) - container
shell: >
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: zapping osd(s) - non container
command: ceph-disk zap --cluster {{ cluster }} {{ item.1 }}
- "{{ osd_hosts }}"
- "{{ osd_to_replace_disks.results }}"
delegate_to: "{{ item.0 }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: destroying osd(s)
command: ceph-disk destroy --cluster {{ cluster }} --destroy-by-id {{ item.1 }} --zap
- "{{ osd_hosts }}"
- "{{ osd_to_replace.split(',') }}"
delegate_to: "{{ item.0 }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: replace osd(s) - prepare - non container
command: ceph-disk prepare {{ item.1 }} --osd-id {{ item.2 }} --osd-uuid $(uuidgen)
location: "{{ hostvars[item]['osd_crush_location'] }}"
containerized: "{{ container_exec_cmd }}"
with_items: "{{ groups[osd_group_name] }}"
- when: crush_rule_config
+ when: crush_rule_config | bool
'''
RETURN = '''# '''
name: "{{ item }}"
groups: _filtered_clients
with_items: "{{ groups[client_group_name] }}"
- when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment)
+ when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment | bool)
- name: set_fact delegated_node
set_fact:
- "{{ ceph_nfs_ceph_user | default([]) }}"
delegate_to: "{{ delegated_node }}"
when:
- - cephx
+ - cephx | bool
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
register: slurp_client_keys
delegate_to: "{{ delegated_node }}"
when:
- - cephx
+ - cephx | bool
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
- name: pool related tasks
when:
- - condition_copy_admin_key
+ - condition_copy_admin_key | bool
- inventory_hostname == groups.get('_filtered_clients', []) | first
block:
- name: list existing pool(s)
- name: include create_users_keys.yml
include_tasks: create_users_keys.yml
- when: user_config
+ when: user_config | bool
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- - cephx
- - copy_admin_key
+ - cephx | bool
+ - copy_admin_key | bool
with_items:
- "{{ rbd_client_admin_socket_path }}"
- "{{ rbd_client_log_path }}"
- when: rbd_client_directories
+ when: rbd_client_directories | bool
file:
path: /tmp
state: directory
- when: use_installer
+ when: use_installer | bool
- name: use mktemp to create name for rundep
command: "mktemp /tmp/rundep.XXXXXXXX"
register: rundep_location
- when: use_installer
+ when: use_installer | bool
- name: copy rundep
copy:
src: "{{ ansible_dir }}/rundep"
dest: "{{ item }}"
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
- when: use_installer
+ when: use_installer | bool
- name: install ceph dependencies
script: "{{ ansible_dir }}/rundep_installer.sh {{ item }}"
become: true
with_items: "{{ (rundep_location|default({})).stdout_lines|default([]) }}"
- when: use_installer
+ when: use_installer | bool
- name: ensure rsync is installed
package:
---
- name: include create_ceph_initial_dirs.yml
include_tasks: create_ceph_initial_dirs.yml
- when: containerized_deployment|bool
+ when: containerized_deployment | bool
- name: config file operations related to OSDs
when:
- inventory_hostname in groups.get(osd_group_name, [])
# the rolling_update.yml playbook sets num_osds to the number of currently
# running osds
- - not rolling_update
+ - not rolling_update | bool
block:
- name: count number of osds for lvm scenario
set_fact:
# ceph-common
- name: config file operation for non-containerized scenarios
- when: not containerized_deployment|bool
+ when: not containerized_deployment | bool
block:
- name: create ceph conf directory
file:
state: directory
mode: "0755"
delegate_to: localhost
- when: ceph_conf_local
+ when: ceph_conf_local | bool
- name: "generate {{ cluster }}.conf configuration file locally"
config_template:
config_type: ini
when:
- inventory_hostname in groups[mon_group_name]
- - ceph_conf_local
+ - ceph_conf_local | bool
- name: config file operations for containerized scenarios
- when: containerized_deployment|bool
+ when: containerized_deployment | bool
block:
- name: create a local fetch directory if it does not exist
file:
become: false
run_once: true
when:
- - (cephx or generate_fsid)
+ - (cephx or generate_fsid) | bool
- ((inventory_hostname in groups.get(mon_group_name, [])) or
(groups.get(nfs_group_name, []) | length > 0) and inventory_hostname == groups.get(nfs_group_name, [])[0])
until: docker_image.rc == 0
retries: "{{ docker_pull_retry }}"
delay: 10
- when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is undefined or not ceph_docker_dev_image | bool)
- name: "inspecting {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image after pulling"
command: "{{ container_binary }} inspect {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
copy:
src: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
dest: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: load ceph dev image
command: "{{ container_binary }} load -i /tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
- when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
- name: remove tmp ceph dev image file
file:
name: "/tmp/{{ ceph_docker_username }}-{{ ceph_docker_imagename }}-{{ ceph_docker_image_tag }}.tar"
state: absent
- when: (ceph_docker_dev_image is defined and ceph_docker_dev_image)
+ when: (ceph_docker_dev_image is defined and ceph_docker_dev_image | bool)
enabled: yes
when:
- ansible_distribution == 'CentOS'
- - ceph_docker_enable_centos_extra_repo
+ - ceph_docker_enable_centos_extra_repo | bool
tags:
with_pkg
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: disable SSL for dashboard
command: "{{ container_exec_cmd }} ceph config set mgr mgr/dashboard/ssl false"
group: root
mode: 0440
when:
- - dashboard_crt
+ - dashboard_crt | bool
- dashboard_protocol == "https"
- name: copy dashboard SSL certificate key
group: root
mode: 0440
when:
- - dashboard_key
+ - dashboard_key | bool
- dashboard_protocol == "https"
- name: generate a Self Signed OpenSSL certificate for dashboard
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-dashboard' -days 3650 -keyout /etc/ceph/ceph-dashboard.key -out /etc/ceph/ceph-dashboard.crt -extensions v3_ca
when:
- dashboard_protocol == "https"
- - not dashboard_key or not dashboard_crt
+ - not dashboard_key | bool or not dashboard_crt | bool
- name: import dashboard certificate file
command: "{{ container_exec_cmd }} ceph config-key set mgr/dashboard/crt -i /etc/ceph/ceph-dashboard.crt"
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-host {{ dashboard_rgw_api_host }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: dashboard_rgw_api_host
+ when: dashboard_rgw_api_host | bool
- name: set the rgw port
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-port {{ dashboard_rgw_api_port }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: dashboard_rgw_api_port
+ when: dashboard_rgw_api_port | bool
- name: set the rgw scheme
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-scheme {{ dashboard_rgw_api_scheme }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: dashboard_rgw_api_scheme
+ when: dashboard_rgw_api_scheme | bool
- name: set the rgw admin resource
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-admin-resource {{ dashboard_rgw_api_admin_resource }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: dashboard_rgw_api_admin_resource
+ when: dashboard_rgw_api_admin_resource | bool
- name: disable ssl verification for rgw
command: "{{ container_exec_cmd }} ceph dashboard set-rgw-api-ssl-verify False"
- name: set_fact monitor_name ansible_hostname
set_fact:
monitor_name: "{{ ansible_hostname }}"
- when: not mon_use_fqdn
+ when: not mon_use_fqdn | bool
- name: set_fact monitor_name ansible_fqdn
set_fact:
monitor_name: "{{ ansible_fqdn }}"
- when: mon_use_fqdn
+ when: mon_use_fqdn | bool
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
when:
- - containerized_deployment
+ - containerized_deployment | bool
- groups.get(mon_group_name, []) | length > 0
# this task shouldn't run in a rolling_update situation
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - not rolling_update
+ - not rolling_update | bool
- groups.get(mon_group_name, []) | length > 0
# set this as a default when performing a rolling_update
delegate_to: localhost
changed_when: false
become: false
- when: cephx or generate_fsid
+ when: cephx | bool or generate_fsid | bool
- name: get current fsid
command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }} config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
- when: rolling_update
+ when: rolling_update | bool
- name: set_fact fsid
set_fact:
fsid: "{{ (rolling_update_fsid.stdout | from_json).fsid }}"
- when: rolling_update
+ when: rolling_update | bool
- name: set_fact ceph_current_status (convert to json)
set_fact:
ceph_current_status: "{{ ceph_current_status.stdout | from_json }}"
when:
- - not rolling_update
+ - not rolling_update | bool
- ceph_current_status.rc == 0
- name: set_fact fsid from ceph_current_status
- name: fsid realted tasks
when:
- - generate_fsid
+ - generate_fsid | bool
- ceph_current_status.fsid is undefined
- - not rolling_update
+ - not rolling_update | bool
block:
- name: generate cluster fsid
shell: python -c 'import uuid; print(str(uuid.uuid4()))'
- name: set_fact mds_name ansible_hostname
set_fact:
mds_name: "{{ ansible_hostname }}"
- when: not mds_use_fqdn
+ when: not mds_use_fqdn | bool
- name: set_fact mds_name ansible_fqdn
set_fact:
mds_name: "{{ ansible_fqdn }}"
- when: mds_use_fqdn
+ when: mds_use_fqdn | bool
- name: set_fact rbd_client_directory_owner ceph
set_fact:
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
+ - not osd_auto_discovery | default(False) | bool
- name: set_fact build devices from resolved symlinks
set_fact:
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
+ - not osd_auto_discovery | default(False) | bool
- name: set_fact build final devices list
set_fact:
when:
- devices is defined
- inventory_hostname in groups.get(osd_group_name, [])
- - not osd_auto_discovery|default(False)
+ - not osd_auto_discovery | default(False) | bool
- name: set_fact devices generate device list when osd_auto_discovery
set_fact:
devices: "{{ devices | default([]) + [ item.key | regex_replace('^', '/dev/') ] }}"
with_dict: "{{ ansible_devices }}"
when:
- - osd_auto_discovery|default(False)
+ - osd_auto_discovery | default(False) | bool
- ansible_devices is defined
- item.value.removable == "0"
- item.value.sectors != "0"
set_fact:
ceph_uid: 64045
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ansible_os_family == 'Debian'
- name: set_fact ceph_uid for red hat or suse based system - non container
set_fact:
ceph_uid: 167
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ansible_os_family in ['RedHat', 'Suse']
- name: set_fact ceph_uid for debian based system - container
set_fact:
ceph_uid: 64045
when:
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_docker_image_tag | string is search("ubuntu")
- name: set_fact ceph_uid for red hat based system - container
set_fact:
ceph_uid: 167
when:
- - containerized_deployment
+ - containerized_deployment | bool
- (ceph_docker_image_tag | string is search("latest") or ceph_docker_image_tag | string is search("centos") or ceph_docker_image_tag | string is search("fedora")
or (ansible_distribution == 'RedHat'))
set_fact:
ceph_uid: 167
when:
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_docker_image is search("rhceph")
- name: set_fact rgw_hostname
dest: "/etc/grafana/ceph-dashboard.crt"
mode: 0640
when:
- - grafana_crt
+ - grafana_crt | bool
- dashboard_protocol == "https"
- name: copy grafana SSL certificate key
dest: "/etc/grafana/ceph-dashboard.key"
mode: 0440
when:
- - grafana_key
+ - grafana_key | bool
- dashboard_protocol == "https"
- name: generate a Self Signed OpenSSL certificate for dashboard
openssl req -new -nodes -x509 -subj '/O=IT/CN=ceph-grafana' -days 3650 -keyout /etc/grafana/ceph-dashboard.key -out /etc/grafana/ceph-dashboard.crt -extensions v3_ca
when:
- dashboard_protocol == "https"
- - not grafana_key or not grafana_crt
+ - not grafana_key | bool or not grafana_crt | bool
- name: set owner/group on /etc/grafana
file:
---
- name: handlers
- when: not rolling_update
+ when: not rolling_update | bool
block:
- name: update apt cache
apt:
listen: "restart ceph mons"
when:
- mon_group_name in group_names
- - not rolling_update
+ - not rolling_update | bool
- name: restart ceph mon daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_mon_daemon.sh
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mon_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_mon_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_mon_handler_called'] | default(False) | bool
- mon_socket_stat.rc == 0
- - not rolling_update
+ - not rolling_update | bool
with_items: "{{ groups[mon_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mon_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_mon_container_stat.get('rc') == 0
- - hostvars[item]['_mon_handler_called'] | default(False)
+ - hostvars[item]['_mon_handler_called'] | default(False) | bool
- ceph_mon_container_stat.get('stdout_lines', [])|length != 0
- - not rolling_update
+ - not rolling_update | bool
with_items: "{{ groups[mon_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
listen: "restart ceph osds"
when:
- osd_group_name in group_names
- - not rolling_update
+ - not rolling_update | bool
- name: restart ceph osds daemon(s) - non container
command: /usr/bin/env bash /tmp/restart_osd_daemon.sh
listen: "restart ceph osds"
when:
- osd_group_name in group_names
- - not containerized_deployment
- - not rolling_update
+ - not containerized_deployment | bool
+ - not rolling_update | bool
# We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- osd_socket_stat.rc == 0
- ceph_current_status.fsid is defined
- - handler_health_osd_check
- - hostvars[item]['_osd_handler_called'] | default(False)
+ - handler_health_osd_check | bool
+ - hostvars[item]['_osd_handler_called'] | default(False) | bool
with_items: "{{ groups[osd_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
# We do not want to run these checks on initial deployment (`socket_osd_container_stat.results[n].rc == 0`)
# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified
- osd_group_name in group_names
- - containerized_deployment
- - not rolling_update
+ - containerized_deployment | bool
+ - not rolling_update | bool
- inventory_hostname == groups.get(osd_group_name) | last
- ceph_osd_container_stat.get('rc') == 0
- ceph_osd_container_stat.get('stdout_lines', [])|length != 0
- - handler_health_osd_check
+ - handler_health_osd_check | bool
- hostvars[item]['_osd_handler_called'] | default(False)
with_items: "{{ groups[osd_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mds_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_mds_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_mds_handler_called'] | default(False) | bool
- mds_socket_stat.rc == 0
with_items: "{{ groups[mds_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mds_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_mds_container_stat.get('rc') == 0
- - hostvars[item]['_mds_handler_called'] | default(False)
+ - hostvars[item]['_mds_handler_called'] | default(False) | bool
- ceph_mds_container_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[mds_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rgw_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_rgw_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_rgw_handler_called'] | default(False) | bool
- rgw_socket_stat.rc == 0
with_items: "{{ groups[rgw_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rgw_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_rgw_container_stat.get('rc') == 0
- - hostvars[item]['_rgw_handler_called'] | default(False)
+ - hostvars[item]['_rgw_handler_called'] | default(False) | bool
- ceph_rgw_container_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[rgw_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- nfs_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_nfs_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_nfs_handler_called'] | default(False) | bool
- nfs_socket_stat.rc == 0
with_items: "{{ groups[nfs_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- nfs_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_nfs_container_stat.get('rc') == 0
- - hostvars[item]['_nfs_handler_called'] | default(False)
+ - hostvars[item]['_nfs_handler_called'] | default(False) | bool
- ceph_nfs_container_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[nfs_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rbdmirror_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_rbdmirror_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
- rbd_mirror_socket_stat.rc == 0
with_items: "{{ groups[rbdmirror_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- rbdmirror_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_rbd_mirror_container_stat.get('rc') == 0
- - hostvars[item]['_rbdmirror_handler_called'] | default(False)
+ - hostvars[item]['_rbdmirror_handler_called'] | default(False) | bool
- ceph_rbd_mirror_container_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[rbdmirror_group_name] }}"
delegate_to: "{{ item }}"
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mgr_group_name in group_names
- - not containerized_deployment
- - hostvars[item]['_mgr_handler_called'] | default(False)
+ - not containerized_deployment | bool
+ - hostvars[item]['_mgr_handler_called'] | default(False) | bool
- mgr_socket_stat.rc == 0
- - not rolling_update
+ - not rolling_update | bool
with_items: "{{ groups[mgr_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
when:
# We do not want to run these checks on initial deployment (`socket.rc == 0`)
- mgr_group_name in group_names
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_mgr_container_stat.get('rc') == 0
- - hostvars[item]['_mgr_handler_called'] | default(False)
+ - hostvars[item]['_mgr_handler_called'] | default(False) | bool
- ceph_mgr_container_stat.get('stdout_lines', [])|length != 0
- - not rolling_update
+ - not rolling_update | bool
with_items: "{{ groups[mgr_group_name] }}"
delegate_to: "{{ item }}"
run_once: True
when:
- iscsi_gw_group_name in group_names
- ceph_tcmu_runner_stat.get('rc') == 0
- - hostvars[item]['_tcmu_runner_handler_called'] | default(False)
+ - hostvars[item]['_tcmu_runner_handler_called'] | default(False) | bool
- ceph_tcmu_runner_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
when:
- iscsi_gw_group_name in group_names
- ceph_rbd_target_gw_stat.get('rc') == 0
- - hostvars[item]['_rbd_target_gw_handler_called'] | default(False)
+ - hostvars[item]['_rbd_target_gw_handler_called'] | default(False) | bool
- ceph_rbd_target_gw_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
when:
- iscsi_gw_group_name in group_names
- ceph_rbd_target_api_stat.get('rc') == 0
- - hostvars[item]['_rbd_target_api_handler_called'] | default(False)
+ - hostvars[item]['_rbd_target_api_handler_called'] | default(False) | bool
- ceph_rbd_target_api_stat.get('stdout_lines', [])|length != 0
with_items: "{{ groups[iscsi_gw_group_name] }}"
delegate_to: "{{ item }}"
---
- name: include check_running_containers.yml
include_tasks: check_running_containers.yml
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include check_socket_non_container.yml
include_tasks: check_socket_non_container.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
check_mode: no
changed_when: false
tags: firewall
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- when: (firewalld_pkg_query.get('rc', 1) == 0
- or is_atomic)
+ or is_atomic | bool)
block:
- name: start firewalld
service:
- name: include_tasks configure_firewall.yml
include_tasks: configure_firewall.yml
when:
- - configure_firewall
+ - configure_firewall | bool
- ansible_os_family in ['RedHat', 'Suse']
tags: configure_firewall
- name: include_tasks setup_ntp.yml
include_tasks: setup_ntp.yml
- when: ntp_service_enabled
+ when: ntp_service_enabled | bool
tags: configure_ntp
# Installation of NTP daemons needs to be a separate task since installations
# can't happen on Atomic
- name: install the ntp daemon
- when: not is_atomic
+ when: not is_atomic | bool
block:
- name: install ntpd
package:
owner: "root"
group: "root"
mode: "{{ ceph_keyring_permissions }}"
- when: cephx
+ when: cephx | bool
- name: deploy gateway settings, used by the ceph_iscsi_config modules
template:
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: check if a rbd pool exists
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool ls --format json"
- name: include non-container/prerequisites.yml
include_tasks: non-container/prerequisites.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
# deploy_ssl_keys used the ansible controller to create self-signed crt/key/pub files
# and transfers them to /etc/ceph directory on each controller. SSL certs are used by
# the API for https support.
- name: include deploy_ssl_keys.yml
include_tasks: deploy_ssl_keys.yml
- when: generate_crt|bool
+ when: generate_crt | bool
- name: include non-container/configure_iscsi.yml
include_tasks: non-container/configure_iscsi.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include containerized.yml
include_tasks: container/containerized.yml
- when: containerized_deployment
+ when: containerized_deployment | bool
when:
- ceph_origin == 'repository'
- ceph_repository == 'dev'
- - ceph_iscsi_config_dev
+ - ceph_iscsi_config_dev | bool
block:
- name: set_fact ceph_iscsi_repos
set_fact:
include_tasks: create_mds_filesystems.yml
when:
- inventory_hostname == groups[mds_group_name] | first
- - not rolling_update
+ - not rolling_update | bool
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include common.yml
include_tasks: common.yml
- name: non_containerized.yml
include_tasks: non_containerized.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: containerized.yml
include_tasks: containerized.yml
- when: containerized_deployment
+ when: containerized_deployment | bool
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
- when: cephx
+ when: cephx | bool
- name: copy ceph keyring(s) if needed
copy:
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
when:
- - cephx
+ - cephx | bool
- groups.get(mgr_group_name, []) | length > 0
- copy_admin_key | bool
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
- when: cephx
+ when: cephx | bool
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd_mgr: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include common.yml
include_tasks: common.yml
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include start_mgr.yml
include_tasks: start_mgr.yml
owner: "root"
group: "root"
mode: "0644"
- when: containerized_deployment
+ when: containerized_deployment | bool
notify: restart ceph mgrs
- name: systemd start mgr
changed_when: false
- name: tasks for MONs when cephx is enabled
- when: cephx
+ when: cephx | bool
block:
- name: fetch ceph initial keys
ceph_key:
register: config_crush_hierarchy
when:
- inventory_hostname == groups.get(mon_group_name) | last
- - create_crush_tree
+ - create_crush_tree | bool
- hostvars[item]['osd_crush_location'] is defined
- name: create configured crush rules
cp /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
/etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: create (and fix ownership of) monitor directory
file:
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
register: create_custom_admin_secret
when:
- - cephx
+ - cephx | bool
- admin_secret != 'admin_secret'
- name: set_fact ceph-authtool container command
/var/lib/ceph/tmp/{{ cluster }}.mon..keyring --import-keyring /etc/ceph/{{ cluster }}.client.admin.keyring
when:
- not create_custom_admin_secret.get('skipped')
- - cephx
+ - cephx | bool
- admin_secret != 'admin_secret'
- name: set_fact ceph-mon container command
--keyring /var/lib/ceph/tmp/{{ cluster }}.mon..keyring
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
- when: cephx
+ when: cephx | bool
- name: ceph monitor mkfs without keyring
command: >
--fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
- when: not cephx
+ when: not cephx | bool
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include deploy_monitors.yml
include_tasks: deploy_monitors.yml
when:
# we test for both container and non-container
- (mon_socket_stat is defined and mon_socket_stat.get('rc') != 0) or (ceph_mon_container_stat is defined and ceph_mon_container_stat.get('stdout_lines', [])|length == 0)
- - not switch_to_containers | default(False)
+ - not switch_to_containers | default(False) | bool
- name: include start_monitor.yml
include_tasks: start_monitor.yml
- name: include_tasks ceph_keys.yml
include_tasks: ceph_keys.yml
- when: not switch_to_containers | default(False)
+ when: not switch_to_containers | default(False) | bool
- name: include secure_cluster.yml
include_tasks: secure_cluster.yml
when:
- - secure_cluster
+ - secure_cluster | bool
- inventory_hostname == groups[mon_group_name] | first
- name: crush_rules.yml
include_tasks: crush_rules.yml
- when: crush_rule_config
-
+ when: crush_rule_config | bool
state: directory
path: "/etc/systemd/system/ceph-mon@.service.d/"
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
config_overrides: "{{ ceph_mon_systemd_overrides | default({}) }}"
config_type: "ini"
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- ansible_service_mgr == 'systemd'
group: "root"
mode: "0644"
notify: restart ceph mons
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: start the monitor service
systemd:
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
- - cephx
- - item.copy_key|bool
+ - cephx | bool
+ - item.copy_key | bool
- groups.get(mon_group_name, []) | length > 0
- name: set_fact container_exec_cmd_nfs
set_fact:
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: check if "{{ ceph_nfs_rgw_user }}" exists
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user info --uid={{ ceph_nfs_rgw_user }}"
changed_when: false
failed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: nfs_obj_gw
+ when: nfs_obj_gw | bool
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
command: "{{ container_exec_cmd_nfs | default('') }} radosgw-admin --cluster {{ cluster }} user create --uid={{ ceph_nfs_rgw_user }} --display-name='RGW NFS User'"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - nfs_obj_gw
+ - nfs_obj_gw | bool
- rgwuser_exists.get('rc', 1) != 0
- name: set_fact ceph_nfs_rgw_access_key
ceph_nfs_rgw_access_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['access_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['access_key'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - nfs_obj_gw
+ - nfs_obj_gw | bool
- ceph_nfs_rgw_access_key is not defined
- name: set_fact ceph_nfs_rgw_secret_key
ceph_nfs_rgw_secret_key: "{{ (rgwuser.stdout | from_json)['keys'][0]['secret_key'] if rgwuser_exists.get('rc', 1) != 0 else (rgwuser_exists.stdout | from_json)['keys'][0]['secret_key'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
- - nfs_obj_gw
+ - nfs_obj_gw | bool
- ceph_nfs_rgw_secret_key is not defined
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-nfs-{{ ansible_hostname }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include common.yml
include_tasks: common.yml
- name: include pre_requisite_non_container.yml
include_tasks: pre_requisite_non_container.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include pre_requisite_container.yml
include_tasks: pre_requisite_container.yml
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include create_rgw_nfs_user.yml
import_tasks: create_rgw_nfs_user.yml
- name: include ganesha_selinux_fix.yml
import_tasks: ganesha_selinux_fix.yml
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ansible_os_family == 'RedHat'
- name: copy rgw keyring when deploying internal ganesha with external ceph cluster
set_fact:
admin_keyring:
- "/etc/ceph/{{ cluster }}.client.admin.keyring"
- when: copy_admin_key
+ when: copy_admin_key | bool
- name: set_fact ceph_config_keys
set_fact:
- name: merge ceph_config_keys and admin_keyring
set_fact:
ceph_config_keys: "{{ ceph_config_keys + admin_keyring }}"
- when: copy_admin_key
+ when: copy_admin_key | bool
- name: stat for config and keys
stat:
- name: reload dbus configuration
command: "killall -SIGHUP dbus-daemon"
- when: ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports | bool
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/log/ceph", create: true }
- { name: "/var/run/ceph", create: true }
- when: item.create|bool
+ when: item.create | bool
- name: cephx related tasks
when:
- - cephx
+ - cephx | bool
- groups.get(mon_group_name, []) | length > 0
block:
- name: copy bootstrap cephx keys
mode: "0600"
with_items:
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: "{{ nfs_obj_gw }}" }
- when: item.copy_key|bool
+ when: item.copy_key | bool
- name: nfs object gateway related tasks
- when: nfs_obj_gw
+ when: nfs_obj_gw | bool
block:
- name: create rados gateway keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
block:
- name: stable repos specific tasks
when:
- - nfs_ganesha_stable
+ - nfs_ganesha_stable | bool
- ceph_repository == 'community'
block:
- name: add nfs-ganesha stable repository
- name: debian based systems - dev repos specific tasks
when:
- - nfs_ganesha_dev
+ - nfs_ganesha_dev | bool
- ceph_repository == 'dev'
block:
- name: fetch nfs-ganesha development repository
allow_unauthenticated: yes
register: result
until: result is succeeded
- when: nfs_obj_gw
+ when: nfs_obj_gw | bool
- name: install nfs rgw/cephfs gateway - debian
apt:
name: nfs-ganesha-ceph
allow_unauthenticated: yes
register: result
until: result is succeeded
- when: nfs_file_gw
+ when: nfs_file_gw | bool
- name: debian based systems - rhcs installation
when:
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when: nfs_file_gw
+ when: nfs_file_gw | bool
- name: install red hat storage nfs obj gateway
apt:
name: nfs-ganesha-rgw
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when: nfs_obj_gw
+ when: nfs_obj_gw | bool
- name: set_fact container_exec_cmd_nfs
set_fact:
container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: check if rados index object exists
shell: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} ls|grep {{ ceph_nfs_rados_export_index }}"
failed_when: false
register: rados_index_exists
check_mode: no
- when: ceph_nfs_rados_backend
+ when: ceph_nfs_rados_backend | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
- name: create an empty rados index object
command: "{{ container_exec_cmd_nfs | default('') }} rados -p {{ cephfs_data }} --cluster {{ cluster }} put {{ ceph_nfs_rados_export_index }} /dev/null"
when:
- - ceph_nfs_rados_backend
+ - ceph_nfs_rados_backend | bool
- rados_index_exists.rc != 0
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: true
owner: "root"
group: "root"
mode: "0755"
- when: ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports | bool
- name: create exports dir index file
copy:
owner: "root"
group: "root"
mode: "0644"
- when: ceph_nfs_dynamic_exports
+ when: ceph_nfs_dynamic_exports | bool
- name: generate systemd unit file
become: true
owner: "root"
group: "root"
mode: "0644"
- when: containerized_deployment
+ when: containerized_deployment | bool
notify: restart ceph nfss
- name: systemd start nfs container
masked: no
daemon_reload: yes
when:
- - containerized_deployment
- - ceph_nfs_enable_service
+ - containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
- name: start nfs gateway service
systemd:
enabled: yes
masked: no
when:
- - not containerized_deployment
- - ceph_nfs_enable_service
+ - not containerized_deployment | bool
+ - ceph_nfs_enable_service | bool
owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
mode: "0755"
- when: cephx
+ when: cephx | bool
with_items:
- /var/lib/ceph/bootstrap-osd/
- /var/lib/ceph/osd/
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
- - cephx
- - item.copy_key|bool
+ - cephx | bool
+ - item.copy_key | bool
set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0
when:
- - containerized_deployment
+ - containerized_deployment | bool
- osd_objectstore == 'filestore'
- - not dmcrypt
+ - not dmcrypt | bool
- name: set_fact docker_env_args '-e osd_bluestore=0 -e osd_filestore=1 -e osd_dmcrypt=1'
set_fact:
docker_env_args: -e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1
when:
- - containerized_deployment
+ - containerized_deployment | bool
- osd_objectstore == 'filestore'
- - dmcrypt
+ - dmcrypt | bool
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=0'
set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0
when:
- - containerized_deployment
+ - containerized_deployment | bool
- osd_objectstore == 'bluestore'
- - not dmcrypt
+ - not dmcrypt | bool
- name: set_fact docker_env_args '-e osd_bluestore=1 -e osd_filestore=0 -e osd_dmcrypt=1'
set_fact:
docker_env_args: -e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1
when:
- - containerized_deployment
+ - containerized_deployment | bool
- osd_objectstore == 'bluestore'
- - dmcrypt
+ - dmcrypt | bool
register: result
until: result is succeeded
when:
- - not containerized_deployment
+ - not containerized_deployment | bool
- ansible_os_family != 'ClearLinux'
- name: install numactl when needed
register: result
until: result is succeeded
when:
- - containerized_deployment
+ - containerized_deployment | bool
- ceph_osd_numactl_opts != ""
tags: with_pkg
name: lvm2
register: result
until: result is succeeded
- when: not is_atomic
+ when: not is_atomic | bool
tags: with_pkg
- name: include_tasks common.yml
include_tasks: scenarios/lvm.yml
when:
- lvm_volumes|length > 0
- - not rolling_update|default(False)
+ - not rolling_update|default(False) | bool
- name: include_tasks scenarios/lvm-batch.yml
include_tasks: scenarios/lvm-batch.yml
when:
- devices|length > 0
- - not rolling_update|default(False)
+ - not rolling_update|default(False) | bool
- name: include_tasks start_osds.yml
include_tasks: start_osds.yml
openstack_keys_tmp: "{{ openstack_keys_tmp|default([]) + [ { 'key': item.key, 'name': item.name, 'caps': { 'mon': item.mon_cap, 'osd': item.osd_cap|default(''), 'mds': item.mds_cap|default(''), 'mgr': item.mgr_cap|default('') } , 'mode': item.mode } ] }}"
with_items: "{{ openstack_keys }}"
when:
- - not add_osd|default(False)
- - openstack_config
+ - not add_osd|default(False) | bool
+ - openstack_config | bool
- item.get('mon_cap', None)
# it's enough to assume we are running an old-fashionned syntax simply by checking the presence of mon_cap since every key needs this cap
set_fact:
openstack_keys: "{{ openstack_keys_tmp }}"
when:
- - not add_osd|default(False)
+ - not add_osd|default(False) | bool
- openstack_keys_tmp is defined
# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include_tasks: openstack_config.yml
when:
- - not add_osd|default(False)
- - openstack_config
+ - not add_osd|default(False) | bool
+ - openstack_config | bool
- inventory_hostname == groups[osd_group_name] | last
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ openstack_keys }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: cephx
+ when: cephx | bool
- name: fetch openstack cephx key(s)
fetch:
- "{{ openstack_keys }}"
delegate_to: "{{ item.0 }}"
when:
- - cephx
- - openstack_config
+ - cephx | bool
+ - openstack_config | bool
- item.0 != groups[mon_group_name]
---
- name: container specific tasks
- when: containerized_deployment
+ when: containerized_deployment | bool
block:
- name: umount ceph disk (if on openstack)
mount:
src: /dev/vdb
fstype: ext3
state: unmounted
- when: ceph_docker_on_openstack
+ when: ceph_docker_on_openstack | bool
- name: generate ceph osd docker run script
become: true
group: "root"
mode: "0644"
notify: restart ceph osds
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: systemd start osd
systemd:
group: "root"
mode: "0755"
register: "tmpfiles_d"
- when: disable_transparent_hugepage
+ when: disable_transparent_hugepage | bool
- name: disable transparent hugepage
template:
mode: "0644"
force: "yes"
validate: "systemd-tmpfiles --create %s"
- when: disable_transparent_hugepage
+ when: disable_transparent_hugepage | bool
- name: get default vm.min_free_kbytes
command: sysctl -b vm.min_free_kbytes
with_items:
- { name: "fs.aio-max-nr", value: "1048576", enable: (osd_objectstore == 'bluestore') }
- "{{ os_tuning_params }}"
- when: item.enable | default(true)
+ when: item.enable | default(true) | bool
-o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
args:
creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: set rbd-mirror key permissions
file:
owner: "ceph"
group: "ceph"
mode: "{{ ceph_keyring_permissions }}"
- when: not containerized_deployment
+ when: not containerized_deployment | bool
---
- name: include pre_requisite.yml
include_tasks: pre_requisite.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include common.yml
include_tasks: common.yml
- when: cephx
+ when: cephx | bool
- name: tasks for non-containerized deployment
- when: not containerized_deployment
+ when: not containerized_deployment | bool
block:
- name: include start_rbd_mirror.yml
include_tasks: start_rbd_mirror.yml
- name: include configure_mirroring.yml
include_tasks: configure_mirroring.yml
- when: ceph_rbd_mirror_configure
+ when: ceph_rbd_mirror_configure | bool
- name: tasks for containerized deployment
- when: containerized_deployment
+ when: containerized_deployment | bool
block:
- name: set_fact container_exec_cmd
set_fact:
- { name: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
- - cephx
- - item.copy_key|bool
+ - cephx | bool
+ - item.copy_key | bool
- name: include_tasks pre_requisite.yml
include_tasks: pre_requisite.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include_tasks openstack-keystone.yml
include_tasks: openstack-keystone.yml
- when: radosgw_keystone_ssl|bool
+ when: radosgw_keystone_ssl | bool
- name: include_tasks start_radosgw.yml
include_tasks: start_radosgw.yml
- when: not containerized_deployment
+ when: not containerized_deployment | bool
- name: include_tasks docker/main.yml
include_tasks: docker/main.yml
- when: containerized_deployment
+ when: containerized_deployment | bool
- name: include_tasks multisite/main.yml
include_tasks: multisite/main.yml
- when: rgw_multisite
+ when: rgw_multisite | bool
- name: rgw pool related tasks
when: rgw_create_pools is defined
- name: include_tasks master.yml
include_tasks: master.yml
when:
- - rgw_zonemaster
- - not rgw_zonesecondary
+ - rgw_zonemaster | bool
+ - not rgw_zonesecondary | bool
- name: include_tasks secondary.yml
include_tasks: secondary.yml
when:
- - not rgw_zonemaster
- - rgw_zonesecondary
+ - not rgw_zonemaster | bool
+ - rgw_zonesecondary | bool
# Continue with common tasks
- name: add zone to rgw stanza in ceph.conf
creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring
changed_when: false
with_items: "{{ rgw_instances }}"
- when: cephx
+ when: cephx | bool
- name: set rados gateway instance key permissions
file:
group: "ceph"
mode: "0600"
with_items: "{{ rgw_instances }}"
- when: cephx
+ when: cephx | bool
fail:
msg: "fqdn configuration is not supported anymore. Use 'use_fqdn_yes_i_am_sure: true' if you really want to use it. See release notes for more details"
when:
- - mon_use_fqdn or mds_use_fqdn
- - not use_fqdn_yes_i_am_sure
+ - mon_use_fqdn | bool or mds_use_fqdn | bool
+ - not use_fqdn_yes_i_am_sure | bool
- name: debian based systems tasks
when: ansible_os_family == 'Debian'
fail:
msg: "ntp_daemon_type must be one of chronyd, ntpd, or timesyncd"
when:
- - ntp_service_enabled
+ - ntp_service_enabled | bool
- ntp_daemon_type not in ['chronyd', 'ntpd', 'timesyncd']
# Since NTPd can not be installed on Atomic...
fail:
msg: installation can't happen on Atomic and ntpd needs to be installed
when:
- - is_atomic | default(False)
+ - is_atomic | default(False) | bool
- ansible_os_family == 'RedHat'
- ntp_daemon_type == 'ntpd'
include_tasks: check_devices.yml
when:
- osd_group_name in group_names
- - not osd_auto_discovery | default(False)
+ - not osd_auto_discovery | default(False) | bool
- devices|default([])|length > 0
- name: include check_eth_mon.yml
include_tasks: check_rgw_multisite.yml
when:
- inventory_hostname in groups.get(rgw_group_name, [])
- - rgw_multisite
+ - rgw_multisite | bool
- name: include check_iscsi.yml
include_tasks: check_iscsi.yml
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
when: inventory_hostname == groups.get('clients', ['']) | first
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-container-common
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-config
tags: ['ceph_update_config']
name: ceph-prometheus
- import_role:
name: ceph-grafana
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
become: true
tags: ['ceph_update_config']
- import_role:
name: ceph-dashboard
- when: dashboard_enabled
+ when: dashboard_enabled | bool
name: ceph-container-engine
- import_role:
name: ceph-node-exporter
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- hosts: grafana-server
name: ceph-prometheus
- import_role:
name: ceph-grafana
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- hosts: '{{ (groups["grafana-server"] | default(groups["mgrs"]) | default(groups["mons"]))[0] | default(omit) }}'
become: true
- import_role:
name: ceph-defaults
tags: ['ceph_update_config']
- when: dashboard_enabled
+ when: dashboard_enabled | bool
- import_role:
name: ceph-dashboard
- when: dashboard_enabled
+ when: dashboard_enabled | bool
state: present
register: result
until: result is succeeded
- when: not is_atomic
+ when: not is_atomic | bool
- name: create physical volume
command: pvcreate /dev/sdb
failed_when: false
state: present
register: result
until: result is succeeded
- when: not is_atomic
+ when: not is_atomic | bool
- name: generate and upload a random 10Mb file - containerized deployment
command: >
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c 'dd if=/dev/urandom of=/tmp/testinfra.img bs=1M count=10; {{ s3cmd_cmd }} mb s3://testinfra; {{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra'
when:
- - rgw_zonemaster
- - containerized_deployment | default(False)
+ - rgw_zonemaster | bool
+ - containerized_deployment | default(False) | bool
- name: generate and upload a random a 10Mb file - non containerized
shell: >
{{ s3cmd_cmd }} mb s3://testinfra;
{{ s3cmd_cmd }} put /tmp/testinfra.img s3://testinfra
when:
- - rgw_zonemaster | default(False)
- - not containerized_deployment | default(False)
+ - rgw_zonemaster | default(False) | bool
+ - not containerized_deployment | default(False) | bool
- name: get info from replicated file - containerized deployment
command: >
docker run --rm --name=rgw_multisite_test --entrypoint=/bin/bash {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} -c '{{ s3cmd_cmd }} info s3://testinfra/testinfra.img'
register: s3cmd_info_status
when:
- - not rgw_zonemaster | default(False)
- - containerized_deployment | default(False)
+ - not rgw_zonemaster | default(False) | bool
+ - containerized_deployment | default(False) | bool
retries: 10
delay: 2
until: s3cmd_info_status.get('rc', 1) == 0
{{ s3cmd_cmd }} info s3://testinfra/testinfra.img
register: s3cmd_info_status
when:
- - not rgw_zonemaster | default(False)
- - not containerized_deployment | default(False)
+ - not rgw_zonemaster | default(False) | bool
+ - not containerized_deployment | default(False) | bool
retries: 10
delay: 2
until: s3cmd_info_status.get('rc', 1) == 0
dest: /etc/yum.repos.d
owner: root
group: root
- when: not is_atomic
+ when: not is_atomic | bool
- name: enable the rhel-7-extras-nightly repo
command: "yum-config-manager --enable rhel-7-extras-nightly"
baseurl: "{{ repo_url }}/MON/x86_64/os/"
gpgcheck: no
enabled: yes
- when: not is_atomic
+ when: not is_atomic | bool
- hosts: osds
gather_facts: false
baseurl: "{{ repo_url }}/OSD/x86_64/os/"
gpgcheck: no
enabled: yes
- when: not is_atomic
+ when: not is_atomic | bool
- name: set MTU on eth2
command: "ifconfig eth2 mtu 1400 up"
baseurl: "{{ repo_url }}/Tools/x86_64/os/"
gpgcheck: no
enabled: yes
- when: not is_atomic
+ when: not is_atomic | bool
state: present
register: result
until: result is succeeded
- when: not is_atomic
+ when: not is_atomic | bool
- name: centos based systems - configure repos
block:
state: absent
when:
- ansible_distribution == 'CentOS'
- - not is_atomic
+ - not is_atomic | bool
- name: resize logical volume for root partition to fill remaining free space
lvol:
vg: atomicos
size: +100%FREE
resizefs: yes
- when: is_atomic
+ when: is_atomic | bool