#centos_package_dependencies:
# - epel-release
-# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
# usually has newer Ceph releases than the normal distro repository.
#
#
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
#ceph_conf_key_directory: /etc/ceph
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
# global:
# foo: 1234
# bar: 5678
-# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
-#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
-#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
-#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_api_docker_cpu_limit: 1
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
-#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
#ceph_config_keys: [] # DON'T TOUCH ME
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mgr_docker_cpu_limit: 1
#ceph_mgr_docker_extra_env:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mon_docker_cpu_limit: 1
#ceph_mon_container_listen_port: 3300
#ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-#rgw_client_name: client.rgw.{{ ansible_hostname }}
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_osd_docker_cpu_limit: 4
# The next two variables are undefined, and thus, unused by default.
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_mirror_docker_cpu_limit: 1
#ceph_rbd_mirror_docker_extra_env:
#centos_package_dependencies:
# - epel-release
-# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
# usually has newer Ceph releases than the normal distro repository.
#
#
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
#ceph_conf_key_directory: /etc/ceph
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
# global:
# foo: 1234
# bar: 5678
-# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: manage nodes with cephadm
- command: "{{ ceph_cmd }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
+ command: "{{ ceph_cmd }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: add ceph label for core component
- command: "{{ ceph_cmd }} orch host label add {{ ansible_hostname }} ceph"
+ command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
- name: adopt mon daemon
cephadm_adopt:
- name: "mon.{{ ansible_hostname }}"
+ name: "mon.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mon systemd unit
- command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}' # noqa 303
+ command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
changed_when: false
register: ceph_health_raw
until: >
- ansible_hostname in (ceph_health_raw.stdout | from_json)["quorum_names"]
+ ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
environment:
- name: adopt mgr daemon
cephadm_adopt:
- name: "mgr.{{ ansible_hostname }}"
+ name: "mgr.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mgr systemd unit
- command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}' # noqa 303
+ command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
- name: stop and disable ceph-mds systemd service
service:
- name: 'ceph-mds@{{ ansible_hostname }}'
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
when: not containerized_deployment | bool
- name: reset failed ceph-mds systemd unit
- command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}' # noqa 303
+ command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
- name: remove legacy ceph mds data
file:
- path: '/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}'
+ path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
state: absent
- name: rgw realm/zonegroup/zone requirements
- name: stop and disable ceph-radosgw systemd service
service:
- name: 'ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: false
failed_when: false
when: not containerized_deployment | bool
- name: reset failed ceph-radosgw systemd unit
- command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}' # noqa 303
+ command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303
changed_when: false
failed_when: false
loop: '{{ rgw_instances }}'
- name: remove legacy ceph radosgw data
file:
- path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: absent
loop: '{{ rgw_instances }}'
- name: remove legacy ceph radosgw directory
file:
- path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}'
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
state: absent
- name: redeploy rbd-mirror daemons
- name: stop and disable rbd-mirror systemd service
service:
- name: 'ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
when: not containerized_deployment | bool
- name: reset failed rbd-mirror systemd unit
- command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}' # noqa 303
+ command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
- name: adopt alertmanager daemon
cephadm_adopt:
- name: "alertmanager.{{ ansible_hostname }}"
+ name: "alertmanager.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ alertmanager_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
- name: adopt prometheus daemon
cephadm_adopt:
- name: "prometheus.{{ ansible_hostname }}"
+ name: "prometheus.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ prometheus_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
- name: adopt grafana daemon
cephadm_adopt:
- name: "grafana.{{ ansible_hostname }}"
+ name: "grafana.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ grafana_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: manage nodes with cephadm
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: add ceph label for core component
- command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_hostname }} ceph"
+ command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
run_once: true
- name: get all nfs-ganesha mount points
- command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+ command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
name: nfs-ganesha
state: stopped
failed_when: false
- when: ansible_service_mgr == 'systemd'
+ when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
- name: stop ceph mdss with systemd
service:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
- name: stop ceph mgrs with systemd
service:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
- when: ansible_service_mgr == 'systemd'
+ when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
- name: stop ceph rgws with systemd
service:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
- name: stop ceph rbd mirror with systemd
service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
failed_when: false
become: false
wait_for:
port: 22
- host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+ host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
- when: ansible_service_mgr == 'systemd'
+ when: ansible_facts['service_mgr'] == 'systemd'
- name: remove ceph udev rules
file:
- name: stop ceph mons with systemd
service:
- name: "ceph-{{ item }}@{{ ansible_hostname }}"
+ name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
yum:
name: "{{ ceph_packages }}"
state: absent
- when: ansible_pkg_mgr == 'yum'
+ when: ansible_facts['pkg_mgr'] == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ ceph_packages }}"
state: absent
- when: ansible_pkg_mgr == 'dnf'
+ when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ ceph_packages }}"
state: absent
purge: true
- when: ansible_pkg_mgr == 'apt'
+ when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'yum'
+ - ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge remaining ceph packages with dnf
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'dnf'
+ - ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge remaining ceph packages with apt
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'apt'
+ - ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: purge extra packages with yum
name: "{{ extra_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'yum'
+ - ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge extra packages with dnf
name: "{{ extra_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'dnf'
+ - ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge extra packages with apt
name: "{{ extra_packages }}"
state: absent
when:
- - ansible_pkg_mgr == 'apt'
+ - ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: remove config and any ceph socket left
- name: purge dnf cache
command: dnf clean all
- when: ansible_pkg_mgr == 'dnf'
+ when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge rpm cache in /tmp
file:
- name: clean apt
command: apt-get clean # noqa 303
- when: ansible_pkg_mgr == 'apt'
+ when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge ceph repo file in /etc/yum.repos.d
file:
- ceph-dev
- ceph_stable
- rh_storage
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
- name: check for anything running ceph
command: "ps -u ceph -U ceph"
path: "{{ item.path }}"
state: absent
with_items: "{{ systemd_files.files }}"
- when: ansible_service_mgr == 'systemd'
+ when: ansible_facts['service_mgr'] == 'systemd'
- name: purge fetch directory
run_once: true
- name: get all nfs-ganesha mount points
- command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+ command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
- name: disable ceph nfs service
service:
- name: "ceph-nfs@{{ ansible_hostname }}"
+ name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- - name: remove ceph nfs directories for "{{ ansible_hostname }}"
+ - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
file:
path: "{{ item }}"
state: absent
- name: disable ceph mds service
service:
- name: "ceph-mds@{{ ansible_hostname }}"
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
- name: disable ceph mgr service
service:
- name: "ceph-mgr@{{ ansible_hostname }}"
+ name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
- name: disable ceph rgw service
service:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
- name: disable ceph rbd-mirror service
service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
enabled: no
ignore_errors: true
with_items:
- - "ceph-mgr@{{ ansible_hostname }}"
- - "ceph-mon@{{ ansible_hostname }}"
+ - "ceph-mgr@{{ ansible_facts['hostname'] }}"
+ - "ceph-mon@{{ ansible_facts['hostname'] }}"
- name: remove ceph mon and mgr service
file:
tasks:
- name: stop ceph-crash container
service:
- name: "ceph-crash@{{ ansible_hostname }}"
+ name: "ceph-crash@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
state: absent
update_cache: yes
autoremove: yes
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
- name: red hat based systems tasks
block:
args:
warn: no
when:
- ansible_pkg_mgr == "yum"
+ ansible_facts['pkg_mgr'] == "yum"
- name: dnf related tasks on red hat
block:
args:
warn: no
when:
- ansible_pkg_mgr == "dnf"
+ ansible_facts['pkg_mgr'] == "dnf"
when:
- ansible_os_family == 'RedHat' and
+ ansible_facts['os_family'] == 'RedHat' and
not is_atomic
- name: find any service-cid file left
become: true
tasks:
- - name: purge ceph directories for "{{ ansible_hostname }}" and ceph socket
+ - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
file:
path: "{{ item }}"
state: absent
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: get iscsi gateway list
enabled: no
masked: yes
with_items:
- - "{{ ansible_hostname }}"
- - "{{ ansible_fqdn }}"
+ - "{{ ansible_facts['hostname'] }}"
+ - "{{ ansible_facts['fqdn'] }}"
# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
- name: mask the mgr service
systemd:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
masked: yes
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
- name: start ceph mgr
systemd:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
- hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+ - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: not containerized_deployment | bool
- name: container | waiting for the containerized monitor to join the quorum...
command: >
- {{ container_binary }} exec ceph-mon-{{ ansible_hostname }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+ {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
- hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+ - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: containerized_deployment | bool
block:
- name: stop ceph mgr
systemd:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
masked: yes
# or if we run a Ceph cluster before Luminous
- name: stop ceph mgr
systemd:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
- name: set_fact container_exec_cmd_osd
set_fact:
- container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: stop ceph osd
set_fact:
mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
with_items: "{{ groups[mds_group_name] }}"
- when: hostvars[item]['ansible_hostname'] == mds_active_name
+ when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
- name: create standby_mdss group
add_host:
- name: stop standby ceph mds
systemd:
- name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+ name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
state: stopped
enabled: no
delegate_to: "{{ item }}"
# somehow, having a single task doesn't work in containerized context
- name: mask systemd units for standby ceph mds
systemd:
- name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+ name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
masked: yes
delegate_to: "{{ item }}"
with_items: "{{ groups['standby_mdss'] }}"
- name: prevent restart from the packaging
systemd:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
enabled: no
masked: yes
when: not containerized_deployment | bool
- name: restart ceph mds
systemd:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
state: restarted
enabled: yes
masked: no
when: not containerized_deployment | bool
- name: restart active mds
- command: "{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}"
+ command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
changed_when: false
when: containerized_deployment | bool
- name: prevent restarts from the packaging
systemd:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
enabled: no
masked: yes
when: not containerized_deployment | bool
- name: stop ceph rgw when upgrading from stable-3.2
systemd:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
- name: stop ceph rgw
systemd:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: stopped
enabled: no
masked: yes
tasks:
- name: stop ceph rbd mirror
systemd:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
masked: yes
- name: systemd stop nfs container
systemd:
- name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: stopped
enabled: no
masked: yes
tasks:
- name: stop the ceph-crash service
systemd:
- name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
enabled: no
masked: yes
tasks_from: container_binary.yml
- name: container | disallow pre-quincy OSDs and enable all new quincy-only functionality
- command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release quincy"
+ command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release quincy"
delegate_to: "{{ groups[mon_group_name][0] }}"
run_once: True
when:
- name: set_fact container_exec_cmd_status
set_fact:
- container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: show ceph status
- name: set_fact container_exec_cmd for mon0
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
- name: set_fact mds_to_kill_hostname
set_fact:
- mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_hostname'] }}"
+ mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
tasks:
# get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
- name: set_fact container_exec_cmd
when: containerized_deployment | bool
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
- name: set_fact mgr_to_kill_hostname
set_fact:
- mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_hostname'] }}"
+ mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
tasks:
- name: stop manager services and verify it
- name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
- name: set_fact mon_to_kill_hostname
set_fact:
- mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_hostname'] }}"
+ mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
- name: stop monitor service(s)
service:
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
with_nested:
- "{{ groups.get(osd_group_name) }}"
- "{{ osd_hosts }}"
- when: hostvars[item.0]['ansible_hostname'] == item.1
+ when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
- name: get ceph-volume lvm list data
ceph_volume:
- name: set_fact container_exec_cmd for mon0
when: containerized_deployment | bool
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
- name: set_fact rbdmirror_to_kill_hostname
set_fact:
- rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_hostname'] }}"
+ rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
- name: set_fact rbdmirror_gids
set_fact:
- name: set_fact container_exec_cmd for mon0
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
set_fact:
rgw_host: '{{ item }}'
with_items: '{{ groups[rgw_group_name] }}'
- when: hostvars[item]['ansible_hostname'] == rgw_to_kill.split('.')[0]
+ when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
- name: stop rgw service
service:
- name: stop non-containerized ceph mon
service:
- name: "ceph-mon@{{ ansible_hostname }}"
+ name: "ceph-mon@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
when: ldb_files.rc == 0
- name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
- command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
+ command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
args:
creates: /etc/ceph/{{ cluster }}.mon.keyring
changed_when: false
- name: waiting for the monitor to join the quorum...
command: "{{ container_binary }} run --rm -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
register: ceph_health_raw
- until: ansible_hostname in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
+ until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
changed_when: false
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
# will not exist
- name: stop non-containerized ceph mgr(s)
service:
- name: "ceph-mgr@{{ ansible_hostname }}"
+ name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
post_tasks:
- name: container - waiting for clean pgs...
command: >
- {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
+ {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
- name: stop non-containerized ceph mds(s)
service:
- name: "ceph-mds@{{ ansible_hostname }}"
+ name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
tasks:
- name: stop non-containerized ceph rgw(s)
service:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
with_items: "{{ rgw_instances }}"
pre_tasks:
- name: stop non-containerized ceph rbd mirror(s)
service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
- name: Check if the node has be migrated already
stat: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
register: migration_completed
failed_when: false
- name: Check for failed run
stat: >
- path=/var/lib/ceph/{{ ansible_hostname }}.tar
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: mon_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
- name: Compress the store as much as possible
- command: ceph tell mon.{{ ansible_hostname }} compact
+ command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
when: migration_completed.stat.exists == False
- name: Check if sysvinit
stat: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
- name: Check if upstart
stat: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
service: >
name=ceph-mon
state=restarted
- args=id={{ ansible_hostname }}
+ args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Restart the Monitor after compaction (Sysvinit)
service: >
name=ceph-mon
state=stopped
- args=id={{ ansible_hostname }}
+ args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Stop the monitor (Sysvinit)
# NOTE (leseb): should we convert upstart to sysvinit here already?
- name: Archive monitor stores
shell: >
- tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+ tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
- creates={{ ansible_hostname }}.tar
+ creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Scp the Monitor store
fetch: >
- src=/var/lib/ceph/{{ ansible_hostname }}.tar
- dest={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+ dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
flat=yes
when: migration_completed.stat.exists == False
- name: Check if sysvinit
stat: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
register: monsysvinit
changed_when: False
- name: Check if upstart
stat: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
register: monupstart
changed_when: False
service: >
name=ceph-mon
state=stopped
- args=id={{ ansible_hostname }}
+ args=id={{ ansible_facts['hostname'] }}
when: monupstart.stat.exists == True and migration_completed.stat.exists == False
- name: Make sure the monitor is stopped (Sysvinit)
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the monitor store
unarchive: >
- src={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+ src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600
- name: Waiting for the monitor to join the quorum...
shell: >
- ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_hostname }}
+ ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
register: result
until: result.rc == 0
retries: 5
- name: Done moving to the next monitor
file: >
- path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+ path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
state=touch
owner=root
group=root
- name: Check for failed run
stat: >
- path=/var/lib/ceph/{{ ansible_hostname }}.tar
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: osd_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
- name: Archive ceph configs
shell: >
- tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_hostname }}.tar
+ tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
- creates={{ ansible_hostname }}.tar
+ creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Create backup directory
- name: Scp OSDs dirs and configs
fetch: >
- src=/var/lib/ceph/{{ ansible_hostname }}.tar
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
dest={{ backup_dir }}/osds-backups/
flat=yes
when: migration_completed.stat.exists == False
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the OSD configs
unarchive: >
- src={{ backup_dir }}/osds-backups/{{ ansible_hostname }}.tar
+ src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600
- name: Check for failed run
stat: >
- path=/var/lib/ceph/{{ ansible_hostname }}.tar
+ path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
register: rgw_archive_leftover
- fail: msg="Looks like an archive is already there, please remove it!"
- name: Archive rados gateway configs
shell: >
- tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+ tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
chdir=/var/lib/ceph/
- creates={{ ansible_hostname }}.tar
+ creates={{ ansible_facts['hostname'] }}.tar
when: migration_completed.stat.exists == False
- name: Create backup directory
- name: Scp RGWs dirs and configs
fetch: >
- src=/var/lib/ceph/{{ ansible_hostname }}.tar
+ src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
dest={{ backup_dir }}/rgws-backups/
flat=yes
when: migration_completed.stat.exists == False
# NOTE (leseb): 'creates' was added in Ansible 1.6
- name: Copy and unarchive the OSD configs
unarchive: >
- src={{ backup_dir }}/rgws-backups/{{ ansible_hostname }}.tar
+ src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar
dest=/var/lib/ceph/
copy=yes
mode=0600
include_vars: "{{ item }}"
with_first_found:
- files:
- - "host_vars/{{ ansible_hostname }}.yml"
+ - "host_vars/{{ ansible_facts['hostname'] }}.yml"
- "host_vars/default.yml"
skip: true
- name: exit playbook, if devices not defined
fail:
- msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
+ msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
when: devices is not defined
- name: install sgdisk(gdisk)
- set_fact:
owner: 167
group: 167
- when: ansible_os_family == "RedHat"
+ when: ansible_facts['os_family'] == "RedHat"
- set_fact:
owner: 64045
group: 64045
- when: ansible_os_family == "Debian"
+ when: ansible_facts['os_family'] == "Debian"
- name: change partitions ownership
file:
post_tasks:
- name: set_fact container_exec_cmd build docker exec command (containerized)
set_fact:
- container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: exit playbook, if can not connect to the cluster
ceph_conf_overrides:
- "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "2"
"rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password"
"rgw keystone token cache size": "10000"
"rgw keystone revocation interval": "900"
"rgw s3 auth use keystone": "true"
- "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+ "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
# NOTE (leseb): to authentivate with Keystone you have two options:
ceph_conf_overrides:
- "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
"rgw keystone api version": "3"
"rgw keystone url": "http://192.168.0.1:35357"
"rgw keystone admin token": "password"
"rgw keystone token cache size": "10000"
"rgw keystone revocation interval": "900"
"rgw s3 auth use keystone": "true"
- "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+ "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
# NOTE (leseb): to authentivate with Keystone you have two options:
# The double quotes are important, do NOT remove them.
ceph_conf_overrides:
- "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable static website = true
rgw dns s3website name = objects-website-region.domain.com
# The double quotes are important, do NOT remove them.
ceph_conf_overrides:
- "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+ "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
rgw enable usage log = true
rgw usage log tick interval = 30
rgw usage log flush threshold = 1024
group_by:
key: _filtered_clients
parents: "{{ client_group_name }}"
- when: (ansible_architecture == 'x86_64') or (not containerized_deployment | bool)
+ when: (ansible_facts['architecture'] == 'x86_64') or (not containerized_deployment | bool)
- name: set_fact delegated_node
set_fact:
create: yes
line: "CLUSTER={{ cluster }}"
regexp: "^CLUSTER="
- when: ansible_os_family in ["RedHat", "Suse"]
+ when: ansible_facts['os_family'] in ["RedHat", "Suse"]
# NOTE(leseb): we are performing the following check
# to ensure any Jewel installation will not fail.
# - All previous versions from Canonical
# - Infernalis from ceph.com
- name: debian based systems - configure cluster name
- when: ansible_os_family == "Debian"
+ when: ansible_facts['os_family'] == "Debian"
block:
- name: check /etc/default/ceph exist
stat:
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
when:
- - ansible_os_family == 'Debian'
+ - ansible_facts['os_family'] == 'Debian'
- etc_default_ceph.stat.exists
notify: restart ceph osds
create: yes
regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
notify: restart ceph osds
args:
warn: no
changed_when: false
- when: ansible_pkg_mgr == 'yum'
+ when: ansible_facts['pkg_mgr'] == 'yum'
- name: configure debian ceph stable community repository
apt_repository:
- repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+ repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
update_cache: yes
- name: configure debian custom repository
apt_repository:
- repo: "deb {{ ceph_custom_repo }} {{ ansible_distribution_release }} main"
+ repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
state: present
update_cache: yes
---
- name: fetch ceph debian development repository
uri:
- url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/repo
+ url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo
return_content: yes
register: ceph_dev_deb_repo
name: "{{ debian_ceph_pkgs | unique }}"
update_cache: no
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded
state: present
register: result
until: result is succeeded
- when: ansible_distribution == 'RedHat'
+ when: ansible_facts['distribution'] == 'RedHat'
- name: install centos dependencies
yum:
state: present
register: result
until: result is succeeded
- when: ansible_distribution == 'CentOS'
+ when: ansible_facts['distribution'] == 'CentOS'
- name: install redhat ceph packages
package:
---
- name: enable red hat storage monitor repository
rhsm_repository:
- name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_architecture }}-rpms"
+ name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: (mon_group_name in group_names or mgr_group_name in group_names)
- name: enable red hat storage osd repository
rhsm_repository:
- name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_architecture }}-rpms"
+ name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: osd_group_name in group_names
- name: enable red hat storage tools repository
rhsm_repository:
- name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_architecture }}-rpms"
+ name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
when: (mgr_group_name in group_names or rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names or monitoring_group_name in group_names)
register: result
until: result is succeeded
tags: with_pkg
- when: ansible_distribution_major_version | int == 7
+ when: ansible_facts['distribution_major_version'] | int == 7
- name: configure red hat ceph community repository stable key
rpm_key:
gpgcheck: yes
state: present
gpgkey: "{{ ceph_stable_key }}"
- baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/$basearch"
+ baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
file: ceph_stable
priority: 2
register: result
gpgcheck: yes
state: present
gpgkey: "{{ ceph_stable_key }}"
- baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/noarch"
+ baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
file: ceph_stable
priority: 2
register: result
---
- name: get latest available build
uri:
- url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+ url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
return_content: yes
run_once: true
register: latest_build
---
- name: include_tasks installs/install_on_redhat.yml
include_tasks: installs/install_on_redhat.yml
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
tags: package-install
- name: include_tasks installs/install_on_suse.yml
include_tasks: installs/install_on_suse.yml
- when: ansible_os_family == 'Suse'
+ when: ansible_facts['os_family'] == 'Suse'
tags: package-install
- name: include installs/install_on_debian.yml
include_tasks: installs/install_on_debian.yml
tags: package-install
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
- name: include_tasks installs/install_on_clear.yml
include_tasks: installs/install_on_clear.yml
- when: ansible_os_family == 'ClearLinux'
+ when: ansible_facts['os_family'] == 'ClearLinux'
tags: package-install
- name: get ceph version
---
- name: create rados gateway instance directories
file:
- path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- name: generate environment file
copy:
- dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile"
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
owner: "root"
group: "root"
mode: "0644"
{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
mon initial members = {% for host in groups[mon_group_name] %}
- {% if hostvars[host]['ansible_hostname'] is defined -%}
- {{ hostvars[host]['ansible_hostname'] }}
+ {% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
+ {{ hostvars[host]['ansible_facts']['hostname'] }}
{%- endif %}
{%- if not loop.last %},{% endif %}
{% endfor %}
[osd]
{% if is_hci | bool and _num_osds > 0 %}
{# hci_safety_factor is the safety factor for HCI deployments #}
-{% if ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
{% endif %}
{% elif _num_osds > 0 %}
{# non_hci_safety_factor is the safety factor for dedicated nodes #}
-{% if ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
{% endif %}
{% endif %}
osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
{% endif %}
{% if inventory_hostname in groups.get(rgw_group_name, []) %}
-{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_hostname']) %}
+{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
{# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
{% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
{% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
[client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
{% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
{%- macro frontend_line(frontend_type) -%}
{%- if frontend_type == 'civetweb' -%}
{% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
{% for host in groups[nfs_group_name] %}
-{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
+{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
{% if nfs_obj_gw | bool %}
[client.rgw.{{ _rgw_hostname }}]
host = {{ _rgw_hostname }}
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
{% endif %}
{% endfor %}
{% endif %}
- name: add docker's gpg key
apt_key:
- url: 'https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg'
+ url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
register: result
until: result is succeeded
when: container_package_name == 'docker-ce'
- name: add docker repository
apt_repository:
- repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
+ repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
when: container_package_name == 'docker-ce'
- name: add podman ppa repository
repo: "ppa:projectatomic/ppa"
when:
- container_package_name == 'podman'
- - ansible_distribution == 'Ubuntu'
+ - ansible_facts['distribution'] == 'Ubuntu'
- name: include specific variables
include_vars: "{{ item }}"
with_first_found:
- - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
- - "{{ ansible_os_family }}.yml"
+ - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
+ - "{{ ansible_facts['os_family'] }}.yml"
- name: debian based systems tasks
include_tasks: debian_prerequisites.yml
when:
- - ansible_os_family == 'Debian'
+ - ansible_facts['os_family'] == 'Debian'
tags: with_pkg
- name: install container packages
- name: start the ceph-crash service
systemd:
- name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
+ name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
state: started
enabled: yes
masked: no
---
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: set_fact container_run_cmd
- name: add iscsi gateways - ipv4
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
- stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+ stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: add iscsi gateways - ipv6
command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
args:
- stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+ stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
stdin_add_newline: no
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
---
- name: get current mgr backend - ipv4
set_fact:
- mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+ mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
when: ip_version == 'ipv4'
- name: get current mgr backend - ipv6
set_fact:
- mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
+ mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
when: ip_version == 'ipv6'
- name: config the current dashboard backend
- command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_hostname'] }}/server_addr {{ mgr_server_addr }}"
+ command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_facts']['hostname'] }}/server_addr {{ mgr_server_addr }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
run_once: true
- name: print dashboard URL
debug:
- msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_fqdn }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
+ msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
run_once: true
centos_package_dependencies:
- epel-release
- - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+ - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
redhat_package_dependencies: []
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
#
ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
ceph_stable_openstack_release_uca: queens
-ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
# usually has newer Ceph releases than the normal distro repository.
#
#
-ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
ceph_conf_key_directory: /etc/ceph
-ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
ceph_keyring_permissions: '0600'
# global:
# foo: 1234
# bar: 5678
-# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
ceph_conf_overrides: {}
container_exec_cmd:
docker: false
-ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
\ No newline at end of file
+ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
- name: set_fact container_binary
set_fact:
- container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
\ No newline at end of file
+ container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
\ No newline at end of file
- name: set_fact monitor_name ansible_hostname
set_fact:
- monitor_name: "{{ hostvars[item]['ansible_hostname'] }}"
+ monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
delegate_to: "{{ item }}"
delegate_facts: true
with_items: "{{ groups.get(mon_group_name, []) }}"
block:
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
- name: find a running mon container
- command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+ command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
register: find_running_mon_container
failed_when: false
run_once: true
- name: set_fact _container_exec_cmd
set_fact:
- _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_hostname'] }}"
+ _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
when:
- containerized_deployment | bool
when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
- name: get current fsid
- command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}.asok config get fsid"
+ command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
register: rolling_update_fsid
delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
until: rolling_update_fsid is succeeded
- name: set_fact devices generate device list when osd_auto_discovery
set_fact:
devices: "{{ (devices | default([]) + [ item.key | regex_replace('^', '/dev/') ]) | unique }}"
- with_dict: "{{ ansible_devices }}"
+ with_dict: "{{ ansible_facts['devices'] }}"
when:
- osd_auto_discovery | default(False) | bool
- inventory_hostname in groups.get(osd_group_name, [])
- - ansible_devices is defined
+ - ansible_facts['devices'] is defined
- item.value.removable == "0"
- item.value.sectors != "0"
- item.value.partitions|count == 0
- name: set_fact rgw_hostname
set_fact:
- rgw_hostname: "{% set _value = ansible_hostname -%}
+ rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
{% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
- {% if key == ansible_fqdn -%}
+ {% if key == ansible_facts['fqdn'] -%}
{% set _value = key -%}
{% endif -%}
{% endfor -%}
- name: set grafana_server_addr fact - ipv4
set_fact:
- grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+ grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- ip_version == 'ipv4'
- name: set grafana_server_addr fact - ipv6
set_fact:
- grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
+ grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- ip_version == 'ipv6'
- name: set grafana_server_addrs fact - ipv4
set_fact:
- grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
+ grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
- name: set grafana_server_addrs fact - ipv6
set_fact:
- grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
+ grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
with_items: "{{ groups.get(monitoring_group_name, []) }}"
when:
- groups.get(monitoring_group_name, []) | length > 0
---
- name: set_fact _monitor_addresses to monitor_address_block ipv4
set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- name: set_fact _monitor_addresses to monitor_address_block ipv6
set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- name: set_fact _monitor_addresses to monitor_interface - ipv4
set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
- name: set_fact _monitor_addresses to monitor_interface - ipv6
set_fact:
- _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+ _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
when:
- "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
---
- name: set_fact _radosgw_address to radosgw_address_block ipv4
set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
- name: set_fact _radosgw_address to radosgw_address_block ipv6
set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
when:
- radosgw_address_block is defined
- radosgw_address_block != 'subnet'
block:
- name: set_fact _interface
set_fact:
- _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
+ _interface: "{{ (radosgw_interface | replace('-', '_')) }}"
- name: set_fact _radosgw_address to radosgw_interface - ipv4
set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version]['address'] }}"
when: ip_version == 'ipv4'
- name: set_fact _radosgw_address to radosgw_interface - ipv6
set_fact:
- _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] | ipwrap }}"
+ _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version][0]['address'] | ipwrap }}"
when: ip_version == 'ipv6'
- name: set_fact rgw_instances without rgw multisite
until: result is succeeded
when:
- not containerized_deployment | bool
- - ansible_os_family in ['RedHat', 'Suse']
+ - ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: package-install
- name: make sure grafana is down
with_items: "{{ grafana_dashboard_files }}"
when:
- not containerized_deployment | bool
- - not ansible_os_family in ['RedHat', 'Suse']
+ - not ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: write grafana.ini
config_template:
[server]
cert_file = /etc/grafana/ceph-dashboard.crt
cert_key = /etc/grafana/ceph-dashboard.key
-domain = {{ ansible_fqdn }}
+domain = {{ ansible_facts['fqdn'] }}
protocol = {{ dashboard_protocol }}
http_port = {{ grafana_port }}
http_addr = {{ grafana_server_addr }}
---
- name: check for a mon container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
register: ceph_mon_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(osd_group_name, [])
- name: check for a mds container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
register: ceph_mds_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(mds_group_name, [])
- name: check for a rgw container
- command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
register: ceph_rgw_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(rgw_group_name, [])
- name: check for a mgr container
- command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
register: ceph_mgr_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(mgr_group_name, [])
- name: check for a rbd mirror container
- command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
register: ceph_rbd_mirror_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(rbdmirror_group_name, [])
- name: check for a nfs container
- command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
register: ceph_nfs_container_stat
changed_when: false
failed_when: false
when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
- name: check for a ceph-crash container
- command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_hostname }}'"
+ command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
register: ceph_crash_container_stat
changed_when: false
failed_when: false
- name: restart the ceph-crash service
systemd:
- name: ceph-crash@{{ ansible_hostname }}
+ name: ceph-crash@{{ ansible_facts['hostname'] }}
state: restarted
enabled: yes
masked: no
RETRIES="{{ handler_health_mds_check_retries }}"
DELAY="{{ handler_health_mds_check_delay }}"
-MDS_NAME="{{ ansible_hostname }}"
+MDS_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok
# First, restart the daemon
systemctl restart ceph-mds@${MDS_NAME}
RETRIES="{{ handler_health_mgr_check_retries }}"
DELAY="{{ handler_health_mgr_check_delay }}"
-MGR_NAME="{{ ansible_hostname }}"
+MGR_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok
systemctl reset-failed ceph-mgr@${MGR_NAME}
# First, restart the daemon
DELAY="{{ handler_health_mon_check_delay }}"
MONITOR_NAME="{{ monitor_name }}"
{% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
{% endif %}
# if daemon is uninstalled, no restarting is needed; so exit with success
-systemctl status ceph-mon@{{ ansible_hostname }} > /dev/null
+systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null
if [[ $? -ne 0 ]]; then
exit 0
fi
# Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok
check_quorum() {
while [ $RETRIES -ne 0 ]; do
}
# First, restart the daemon
-systemctl restart ceph-mon@{{ ansible_hostname }}
+systemctl restart ceph-mon@{{ ansible_facts['hostname'] }}
COUNT=10
# Wait and ensure the socket exists after restarting the daemon
done
# If we reach this point, it means the socket is not present.
echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:"
-journalctl -u ceph-mon@{{ ansible_hostname }}
+journalctl -u ceph-mon@{{ ansible_facts['hostname'] }}
exit 1
RETRIES="{{ handler_health_nfs_check_retries }}"
DELAY="{{ handler_health_nfs_check_delay }}"
-NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
PID=/var/run/ganesha.pid
{% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
{% endif %}
# First, restart the daemon
RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
DELAY="{{ handler_health_rbd_mirror_check_delay }}"
-RBD_MIRROR_NAME="{{ ansible_hostname }}"
+RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}"
{% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
{% endif %}
# Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok
# First, restart the daemon
systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
RETRIES="{{ handler_health_rgw_check_retries }}"
DELAY="{{ handler_health_rgw_check_delay }}"
-HOST_NAME="{{ ansible_hostname }}"
+HOST_NAME="{{ ansible_facts['hostname'] }}"
RGW_NUMS={{ rgw_instances | length | int }}
RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }}
if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then
block:
- name: install firewalld python binding
package:
- name: "python{{ ansible_python.version.major }}-firewall"
+ name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
tags: with_pkg
when: not is_atomic | bool
- name: update cache for Debian based OSs
apt:
update_cache: yes
- when: ansible_os_family == "Debian"
+ when: ansible_facts['os_family'] == "Debian"
register: result
until: result is succeeded
include_tasks: configure_firewall.yml
when:
- configure_firewall | bool
- - ansible_os_family in ['RedHat', 'Suse']
+ - ansible_facts['os_family'] in ['RedHat', 'Suse']
tags: configure_firewall
- name: include_tasks setup_ntp.yml
set_fact:
chrony_daemon_name: chrony
ntp_service_name: ntp
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
- name: set ntp service and chrony daemon name for RedHat and Suse family
set_fact:
chrony_daemon_name: chronyd
ntp_service_name: ntpd
- when: ansible_os_family in ['RedHat', 'Suse']
+ when: ansible_facts['os_family'] in ['RedHat', 'Suse']
# Installation of NTP daemons needs to be a separate task since installations
# can't happen on Atomic
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
-ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
-ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
-ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_target_api_docker_cpu_limit: 1
- name: add mgr ip address to trusted list with dashboard - ipv4
set_fact:
- trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
+ trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- name: add mgr ip address to trusted list with dashboard - ipv6
set_fact:
- trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
+ trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
when:
- dashboard_enabled | bool
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: containerized_deployment | bool
delegate_to: "{{ groups[mon_group_name][0] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
\ No newline at end of file
+ CEPH_CONTAINER_BINARY: "{{ container_binary }}"
command: >
openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
-x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
- -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_hostname }}"
+ -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
delegate_to: localhost
run_once: True
with_items: "{{ crt_files_exist.results }}"
---
- name: red hat based systems tasks
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
block:
- name: set_fact common pkgs and repos
set_fact:
block:
- name: ceph-iscsi dependency repositories
get_url:
- url: 'https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo'
+ url: "https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
force: true
register: result
- name: ceph-iscsi development repository
get_url:
- url: 'https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo'
+ url: "https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
force: true
register: result
- name: ceph-iscsi stable repository
get_url:
- url: 'https://download.ceph.com/ceph-iscsi/{{ "3" if use_new_ceph_iscsi | bool else "2" }}/rpm/el{{ ansible_distribution_major_version }}/ceph-iscsi.repo'
+ url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
dest: /etc/yum.repos.d/ceph-iscsi.repo
force: true
register: result
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
-ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
ceph_config_keys: [] # DON'T TOUCH ME
mode: "{{ ceph_directories_mode }}"
with_items:
- /var/lib/ceph/bootstrap-mds/
- - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}
+ - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}
- name: get keys from monitors
ceph_key:
- name: systemd start mds container
systemd:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
daemon_reload: yes
- name: wait for mds socket to exist
- command: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
+ command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
changed_when: false
register: multi_mds_socket
retries: 5
apt:
name: ceph-mds
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
when:
- mds_group_name in group_names
- - ansible_os_family == 'Debian'
+ - ansible_facts['os_family'] == 'Debian'
register: result
until: result is succeeded
until: result is succeeded
when:
- mds_group_name in group_names
- - ansible_os_family in ['Suse', 'RedHat']
+ - ansible_facts['os_family'] in ['Suse', 'RedHat']
- name: create mds keyring
ceph_key:
- name: "mds.{{ ansible_hostname }}"
+ name: "mds.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
user: client.bootstrap-mds
user_key: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
mon: "allow profile mds"
mds: "allow"
osd: "allow rwx"
- dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}/keyring"
+ dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
import_key: false
owner: ceph
group: ceph
path: "/etc/systemd/system/ceph-mds@.service.d/"
when:
- ceph_mds_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mds systemd service overrides
config_template:
config_type: "ini"
when:
- ceph_mds_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: start and add that the metadata service to the init sequence
service:
- name: ceph-mds@{{ ansible_hostname }}
+ name: ceph-mds@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
{% else %}
After=network.target
{% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %}
[Service]
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }}
{% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
{% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
-e CEPH_DAEMON=MDS \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_mds_docker_extra_env }} \
- --name=ceph-mds-{{ ansible_hostname }} \
+ --name=ceph-mds-{{ ansible_facts['hostname'] }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
{% endif %}
KillMode=none
Restart=always
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mgr_docker_cpu_limit: 1
ceph_mgr_docker_extra_env:
---
- name: create mgr directory
file:
- path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}
+ path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
state: directory
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- name: fetch ceph mgr keyring
ceph_key:
- name: "mgr.{{ ansible_hostname }}"
+ name: "mgr.{{ ansible_facts['hostname'] }}"
caps:
mon: allow profile mgr
osd: allow *
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "0400"
- dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
+ dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
block:
- name: create ceph mgr keyring(s) on a mon node
ceph_key:
- name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+ name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
caps:
mon: allow profile mgr
osd: allow *
set_fact:
_mgr_keys:
- { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key }
- - { 'name': "mgr.{{ ansible_hostname }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring", 'copy_key': true }
+ - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
- name: get keys from monitors
ceph_key:
- name: set mgr key permissions
file:
- path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
+ path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
mode: "{{ ceph_keyring_permissions }}"
---
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
---
- name: set_fact ceph_mgr_packages for sso
set_fact:
- ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_distribution_major_version | int == 8 else 'python-saml']) }}"
+ ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
when:
- dashboard_enabled | bool
- - ansible_distribution == 'RedHat'
+ - ansible_facts['distribution'] == 'RedHat'
- name: set_fact ceph_mgr_packages for dashboard
set_fact:
set_fact:
ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}"
when:
- - ansible_os_family != 'RedHat'
- - ansible_distribution_major_version | int != 7
+ - ansible_facts['os_family'] != 'RedHat'
+ - ansible_facts['distribution_major_version'] | int != 7
- name: install ceph-mgr packages on RedHat or SUSE
package:
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
register: result
until: result is succeeded
- when: ansible_os_family in ['RedHat', 'Suse']
+ when: ansible_facts['os_family'] in ['RedHat', 'Suse']
- name: install ceph-mgr packages for debian
apt:
name: '{{ ceph_mgr_packages }}'
state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
- default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+ default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
register: result
until: result is succeeded
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
path: "/etc/systemd/system/ceph-mgr@.service.d/"
when:
- ceph_mgr_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mgr systemd service overrides
config_template:
config_type: "ini"
when:
- ceph_mgr_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: include_tasks systemd.yml
include_tasks: systemd.yml
- name: systemd start mgr
systemd:
- name: ceph-mgr@{{ ansible_hostname }}
+ name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }}
{% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
{% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
-e CEPH_DAEMON=MGR \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_mgr_docker_extra_env }} \
- --name=ceph-mgr-{{ ansible_hostname }} \
+ --name=ceph-mgr-{{ ansible_facts['hostname'] }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
{% endif %}
KillMode=none
Restart=always
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_mon_docker_cpu_limit: 1
ceph_mon_container_listen_port: 3300
{{ container_exec_cmd }}
ceph
--cluster {{ cluster }}
- daemon mon.{{ ansible_hostname }}
+ daemon mon.{{ ansible_facts['hostname'] }}
mon_status
--format json
register: ceph_health_raw
name: mon.
cluster: "{{ cluster }}"
user: mon.
- user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_hostname'] }}/keyring"
+ user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring"
output_format: json
state: info
environment:
---
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: include deploy_monitors.yml
when:
- not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-mon systemd service overrides
config_template:
when:
- not containerized_deployment | bool
- ceph_mon_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: include_tasks systemd.yml
include_tasks: systemd.yml
- name: start the monitor service
systemd:
- name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_hostname }}
+ name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
-v /var/run/ceph:/var/run/ceph:z \
-v /etc/localtime:/etc/localtime:ro \
-v /var/log/ceph:/var/log/ceph:z \
-{% if ansible_distribution == 'RedHat' -%}
+{% if ansible_facts['distribution'] == 'RedHat' -%}
-v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
{% endif -%}
{% if mon_docker_privileged | bool -%}
ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-rgw_client_name: client.rgw.{{ ansible_hostname }}
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #
---
- name: set_fact container_exec_cmd_nfs
set_fact:
- container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+ container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
state: present
register: result
until: result is succeeded
- when: ansible_distribution_major_version == '7'
+ when: ansible_facts['distribution_major_version'] == '7'
- name: install nfs-ganesha-selinux and python3-policycoreutils on RHEL 8
package:
state: present
register: result
until: result is succeeded
- when: ansible_distribution_major_version == '8'
+ when: ansible_facts['distribution_major_version'] == '8'
- name: add ganesha_t to permissive domain
selinux_permissive:
# global/common requirement
- name: stop nfs server service
systemd:
- name: "{{ 'nfs-server' if ansible_os_family == 'RedHat' else 'nfsserver' if ansible_os_family == 'Suse' else 'nfs-kernel-server' if ansible_os_family == 'Debian' }}"
+ name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
state: stopped
enabled: no
failed_when: false
import_tasks: ganesha_selinux_fix.yml
when:
- not containerized_deployment | bool
- - ansible_os_family == 'RedHat'
+ - ansible_facts['os_family'] == 'RedHat'
- name: nfs with external ceph cluster task related
when:
mode: "0755"
with_items:
- "{{ ceph_nfs_ceph_user }}"
- - "{{ ansible_hostname }}"
+ - "{{ ansible_facts['hostname'] }}"
- name: set_fact rgw_client_name
set_fact:
group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
with_nested:
- "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
- - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring']
+ - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
when:
- not item.0.get('skipped', False)
- item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
block:
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
---
- name: include red hat based system related tasks
include_tasks: pre_requisite_non_container_red_hat.yml
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
- name: include debian based system related tasks
include_tasks: pre_requisite_non_container_debian.yml
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
- name: install nfs rgw/cephfs gateway - SUSE/openSUSE
zypper:
when:
- (ceph_origin == 'repository' or ceph_origin == 'distro')
- ceph_repository != 'rhcs'
- - ansible_os_family == 'Suse'
+ - ansible_facts['os_family'] == 'Suse'
- item.install | bool
register: result
until: result is succeeded
with_items:
- { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
- { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
- - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}", create: "{{ nfs_obj_gw }}" }
+ - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
- { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
- { name: "/var/log/ceph", create: true }
- { name: "/var/log/ganesha", create: true, owner: root, group: root }
block:
- name: create rados gateway keyring
ceph_key:
- name: "client.rgw.{{ ansible_hostname }}"
+ name: "client.rgw.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
user: client.bootstrap-rgw
user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
caps:
mon: "allow rw"
osd: "allow rwx"
- dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring"
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
import_key: false
owner: ceph
group: ceph
block:
- name: add nfs-ganesha stable repository
apt_repository:
- repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+ repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
state: present
update_cache: no
register: add_ganesha_apt_repo
block:
- name: fetch nfs-ganesha development repository
uri:
- url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/flavors/{{ nfs_ganesha_flavor }}/repo
+ url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo
return_content: yes
register: nfs_ganesha_dev_apt_repo
block:
- name: add nfs-ganesha dev repo
get_url:
- url: 'https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/flavors/{{ nfs_ganesha_flavor }}/repo'
+ url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo"
dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
force: true
when:
- name: set_fact container_exec_cmd_nfs - internal
set_fact:
- exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }} rados"
+ exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
delegate_node: "{{ groups[mon_group_name][0] }}"
when: groups.get(mon_group_name, []) | length > 0
- name: systemd start nfs container
systemd:
- name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+ name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
state: started
enabled: yes
masked: no
ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
{% endif %}
ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
-ExecStartPre={{ '/bin/mkdir' if ansible_os_family == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
-e CEPH_DAEMON=NFS \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
{{ ceph_nfs_docker_extra_env }} \
- --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
+ --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_osd_docker_cpu_limit: 4
# The next two variables are undefined, and thus, unused by default.
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
with_items: "{{ groups.get(mon_group_name, []) }}"
delegate_to: "{{ item }}"
delegate_facts: true
until: result is succeeded
when:
- not containerized_deployment | bool
- - ansible_os_family != 'ClearLinux'
+ - ansible_facts['os_family'] != 'ClearLinux'
- name: install numactl when needed
package:
path: "/etc/systemd/system/ceph-osd@.service.d/"
when:
- ceph_osd_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-osd systemd service overrides
config_template:
config_type: "ini"
when:
- ceph_osd_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present
file:
- name: debian based systems tasks
when:
- osd_objectstore == 'filestore'
- - ansible_os_family == "Debian"
+ - ansible_facts['os_family'] == "Debian"
block:
- name: disable osd directory parsing by updatedb
command: updatedb -e /var/lib/ceph
- name: set_fact vm_min_free_kbytes
set_fact:
- vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
+ vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
- name: apply operating system tuning
sysctl:
{% else %}
After=network.target
{% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %}
[Service]
EnvironmentFile=-/etc/environment
-v /var/run/ceph:/var/run/ceph:z \
-v /var/run/udev/:/var/run/udev/ \
-v /var/log/ceph:/var/log/ceph:z \
- {% if ansible_distribution == 'Ubuntu' -%}
+ {% if ansible_facts['distribution'] == 'Ubuntu' -%}
--security-opt apparmor:unconfined \
{% endif -%}
{{ container_env_args }} \
--cluster.peer={{ peer }}:{{ alertmanager_cluster_port }} \
{% endfor %}
--storage.path=/alertmanager \
- --web.external-url=http://{{ ansible_fqdn }}:{{ alertmanager_port }}/ \
+ --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ alertmanager_port }}/ \
--web.listen-address={{ grafana_server_addr }}:{{ alertmanager_port }}
{% if container_binary == 'podman' %}
ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
- name: 'ceph-dashboard'
webhook_configs:
{% for host in groups['mgrs'] | default(groups['mons']) %}
- - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
+ - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
{% endfor %}
{{ prometheus_container_image }} \
--config.file=/etc/prometheus/prometheus.yml \
--storage.tsdb.path=/prometheus \
- --web.external-url=http://{{ ansible_fqdn }}:{{ prometheus_port }}/ \
+ --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ prometheus_port }}/ \
--web.listen-address={{ grafana_server_addr }}:{{ prometheus_port }}
{% if container_binary == 'podman' %}
ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% for host in (groups['all'] | difference(groups[monitoring_group_name] | union(groups.get(client_group_name, [])))) %}
- targets: ['{{ host }}:{{ node_exporter_port }}']
labels:
- instance: "{{ hostvars[host]['ansible_nodename'] }}"
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
{% endfor %}
- job_name: 'grafana'
static_configs:
{% for host in groups[monitoring_group_name] %}
- targets: ['{{ host }}:{{ node_exporter_port }}']
labels:
- instance: "{{ hostvars[host]['ansible_nodename'] }}"
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
{% endfor %}
{% if iscsi_gw_group_name in groups %}
- job_name: 'iscsi-gws'
{% for host in groups[iscsi_gw_group_name] %}
- targets: ['{{ host }}:9287']
labels:
- instance: "{{ hostvars[host]['ansible_nodename'] }}"
+ instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
{% endfor %}
{% endif %}
alerting:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
ceph_rbd_mirror_docker_cpu_limit: 1
ceph_rbd_mirror_docker_extra_env:
- name: create rbd-mirror keyring
ceph_key:
- name: "client.rbd-mirror.{{ ansible_hostname }}"
+ name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
user: client.bootstrap-rbd-mirror
user_key: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring"
caps:
mon: "profile rbd-mirror"
osd: "profile rbd"
- dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring"
+ dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
import_key: false
owner: ceph
group: ceph
---
- name: enable mirroring on the pool
- command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
+ command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
changed_when: false
- name: list mirroring peer
- command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool info {{ ceph_rbd_mirror_pool }}"
+ command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool info {{ ceph_rbd_mirror_pool }}"
changed_when: false
register: mirror_peer
- name: add a mirroring peer
- command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
+ command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
changed_when: false
when: ceph_rbd_mirror_remote_user in mirror_peer.stdout
block:
- name: set_fact container_exec_cmd
set_fact:
- container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+ container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
- name: include start_container_rbd_mirror.yml
include_tasks: start_container_rbd_mirror.yml
- name: systemd start rbd mirror container
systemd:
- name: ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}
+ name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
state: started
enabled: yes
masked: no
path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/"
when:
- ceph_rbd_mirror_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: add ceph-rbd-mirror systemd service overrides
config_template:
config_type: "ini"
when:
- ceph_rbd_mirror_systemd_overrides is defined
- - ansible_service_mgr == 'systemd'
+ - ansible_facts['service_mgr'] == 'systemd'
- name: stop and remove the generic rbd-mirror service instance
service:
- name: start and add the rbd-mirror service instance
service:
- name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: started
enabled: yes
masked: no
EnvironmentFile=-/etc/environment
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
{% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
{% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
-e CLUSTER={{ cluster }} \
-e CEPH_DAEMON=RBD_MIRROR \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
- --name=ceph-rbd-mirror-{{ ansible_hostname }} \
+ --name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }} \
{{ ceph_rbd_mirror_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
{% endif %}
KillMode=none
Restart=always
option httpchk HEAD /
{% for host in groups[rgw_group_name] %}
{% for instance in hostvars[host]['rgw_instances'] %}
- server {{ 'server-' + hostvars[host]['ansible_hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
+ server {{ 'server-' + hostvars[host]['ansible_facts']['hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
{% endfor %}
{% endfor %}
---
- name: restart rgw
service:
- name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: restarted
with_items: "{{ rgw_instances }}"
state: present
register: result
until: result is succeeded
- when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
+ when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf'
- name: install libnss3-tools on debian
package:
state: present
register: result
until: result is succeeded
- when: ansible_pkg_mgr == 'apt'
+ when: ansible_facts['pkg_mgr'] == 'apt'
- name: create nss directory for keystone certificates
file:
---
- name: create rgw keyrings
ceph_key:
- name: "client.rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+ name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
cluster: "{{ cluster }}"
user: "client.bootstrap-rgw"
user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring"
+ dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
caps:
osd: 'allow rwx'
mon: 'allow rw'
- name: systemd start rgw container
systemd:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started
enabled: yes
masked: no
- name: start rgw instance
service:
- name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+ name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
state: started
enabled: yes
masked: no
{% else %}
After=network.target
{% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_rgw_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_rgw_docker_cpu_limit|int %}
[Service]
EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
{% if container_binary == 'podman' %}
ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
{% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
{% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
{% if container_binary == 'podman' %}
-d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
-v /var/run/ceph:/var/run/ceph:z \
-v /etc/localtime:/etc/localtime:ro \
-v /var/log/ceph:/var/log/ceph:z \
- {% if ansible_distribution == 'RedHat' -%}
+ {% if ansible_facts['distribution'] == 'RedHat' -%}
-v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
{% endif -%}
{% if radosgw_frontend_ssl_certificate -%}
{% endif -%}
-e CEPH_DAEMON=RGW \
-e CLUSTER={{ cluster }} \
- -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \
+ -e RGW_NAME={{ ansible_facts['hostname'] }}.${INST_NAME} \
-e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
- --name=ceph-rgw-{{ ansible_hostname }}-${INST_NAME} \
+ --name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \
{{ ceph_rgw_docker_extra_env }} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
{% if container_binary == 'podman' %}
ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
{% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
{% endif %}
KillMode=none
Restart=always
- name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
- when: monitor_interface not in ansible_interfaces
+ when: monitor_interface not in ansible_facts['interfaces']
- name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
- when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
+ when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active']
- name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv4"
- - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv4'] is not defined
+ - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined
- name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
fail:
msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv6"
- - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv6'] is not defined
+ - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv6'] is not defined
- name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
- when: radosgw_interface not in ansible_interfaces
+ when: radosgw_interface not in ansible_facts['interfaces']
- name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
- when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
+ when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false"
- name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv4"
- - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
+ - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
- name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
fail:
msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
when:
- ip_version == "ipv6"
- - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
+ - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
- name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
fail:
msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
- when: hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
+ when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
- name: fail on unsupported distribution for iscsi gateways
fail:
msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
- when: ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
+ when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora']
- name: make sure gateway_ip_list is configured
fail:
- " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list"
- name: fail on unsupported distribution version for iscsi gateways
- command: 'grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_kernel }}'
+ command: "grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
register: iscsi_kernel
changed_when: false
failed_when: iscsi_kernel.rc != 0
- CONFIG_TARGET_CORE
- CONFIG_TCM_USER2
- CONFIG_ISCSI_TARGET
- when: ansible_distribution in ['RedHat', 'CentOS']
+ when: ansible_facts['distribution'] in ['RedHat', 'CentOS']
msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
when:
- ceph_origin == 'distro'
- - ansible_distribution == 'openSUSE Leap'
+ - ansible_facts['distribution'] == 'openSUSE Leap'
- name: fail on unsupported system
fail:
- msg: "System not supported {{ ansible_system }}"
- when: ansible_system not in ['Linux']
+ msg: "System not supported {{ ansible_facts['system'] }}"
+ when: ansible_facts['system'] not in ['Linux']
- name: fail on unsupported architecture
fail:
- msg: "Architecture not supported {{ ansible_architecture }}"
- when: ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
+ msg: "Architecture not supported {{ ansible_facts['architecture'] }}"
+ when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
- name: fail on unsupported distribution
fail:
- msg: "Distribution not supported {{ ansible_os_family }}"
- when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
+ msg: "Distribution not supported {{ ansible_facts['os_family'] }}"
+ when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
- name: fail on unsupported CentOS release
fail:
- msg: "CentOS release {{ ansible_distribution_major_version }} not supported with dashboard"
+ msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard"
when:
- - ansible_distribution == 'CentOS'
- - ansible_distribution_major_version | int == 7
+ - ansible_facts['distribution'] == 'CentOS'
+ - ansible_facts['distribution_major_version'] | int == 7
- not containerized_deployment | bool
- dashboard_enabled | bool
- name: red hat based systems tasks
when:
- ceph_repository == 'rhcs'
- - ansible_distribution == 'RedHat'
+ - ansible_facts['distribution'] == 'RedHat'
block:
- name: fail on unsupported distribution for red hat ceph storage
fail:
- msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL >= 8.2"
- when: ansible_distribution_version is version('8.2', '<')
+ msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL >= 8.2"
+ when: ansible_facts['distribution_version'] is version('8.2', '<')
- name: subscription manager related tasks
when: ceph_repository_type == 'cdn'
- name: fail on unsupported distribution for ubuntu cloud archive
fail:
- msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_distribution }}"
+ msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}"
when:
- ceph_repository == 'uca'
- - ansible_distribution != 'Ubuntu'
+ - ansible_facts['distribution'] != 'Ubuntu'
- name: "fail on unsupported SUSE/openSUSE distribution (only 15.x supported)"
fail:
- msg: "Distribution not supported: {{ ansible_distribution }} {{ ansible_distribution_major_version }}"
+ msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}"
when:
- - ansible_distribution == 'openSUSE Leap' or ansible_distribution == 'SUSE'
- - ansible_distribution_major_version != '15'
+ - ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE'
+ - ansible_facts['distribution_major_version'] != '15'
- name: fail if systemd is not present
fail:
msg: "Systemd must be present"
- when: ansible_service_mgr != 'systemd'
+ when: ansible_facts['service_mgr'] != 'systemd'
with_items: '{{ lvm_volumes }}'
- name: debian based systems tasks
- when: ansible_os_family == 'Debian'
+ when: ansible_facts['os_family'] == 'Debian'
block:
- name: fail if local scenario is enabled on debian
fail:
# - ceph_origin == 'distro'
# - ceph_origin == 'repository' and ceph_repository == 'obs'
- name: SUSE/openSUSE Leap based system tasks
- when: ansible_os_family == 'Suse'
+ when: ansible_facts['os_family'] == 'Suse'
block:
- name: Check ceph_origin definition on SUSE/openSUSE Leap
fail:
msg: installation can't happen on Atomic and ntpd needs to be installed
when:
- is_atomic | default(False) | bool
- - ansible_os_family == 'RedHat'
+ - ansible_facts['os_family'] == 'RedHat'
- ntp_daemon_type == 'ntpd'
- name: make sure journal_size configured
- name: set_fact container_binary
set_fact:
- container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
+ container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
- name: get ceph status from the first monitor
command: >
- {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
+ {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} -s
register: ceph_status
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
cluster: ceph
public_network: "192.168.55.0/24"
cluster_network: "192.168.56.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.53.0/24"
cluster_network: "192.168.54.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.67.0/24"
cluster_network: "192.168.68.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.65.0/24"
cluster_network: "192.168.66.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.71.0/24"
cluster_network: "192.168.72.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.69.0/24"
cluster_network: "192.168.70.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.19.0/24"
containerized_deployment: False
ceph_origin: repository
ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
openstack_config: True
dashboard_enabled: False
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
[mons]
mon0 monitor_address=192.168.17.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.17.12
[mgrs]
ceph_repository: community
public_network: "192.168.1.0/24"
cluster_network: "192.168.2.0/24"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.1.12
[mgrs]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.1.12
[mgrs]
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.15.0/24"
containerized_deployment: False
ceph_origin: repository
ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.15.0/24"
cluster_network: "192.168.16.0/24"
container_binary: docker
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.58.0/24"
cluster_network: "192.168.59.0/24"
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
radosgw_num_instances: 2
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.31.0/24"
containerized_deployment: False
ceph_origin: repository
ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
openstack_config: True
dashboard_enabled: False
ceph_repository: community
public_network: "192.168.43.0/24"
cluster_network: "192.168.44.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
copy_admin_key: true
containerized_deployment: true
ceph_repository: community
public_network: "192.168.41.0/24"
cluster_network: "192.168.42.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
crush_device_class: test
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
crush_device_class: test
copy_admin_key: true
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 2048
osd_objectstore: "bluestore"
crush_device_class: test
cluster: ceph
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
crush_device_class: test
copy_admin_key: true
ceph_repository: community
public_network: "192.168.33.0/24"
cluster_network: "192.168.34.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
copy_admin_key: true
containerized_deployment: true
ceph_repository: community
public_network: "192.168.39.0/24"
cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
copy_admin_key: true
# test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.30.0/24"
cluster_network: "192.168.31.0/24"
cluster: ceph
public_network: "192.168.105.0/24"
cluster_network: "192.168.106.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.107.0/24"
cluster_network: "192.168.108.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.101.0/24"
cluster_network: "192.168.102.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
cluster: ceph
public_network: "192.168.103.0/24"
cluster_network: "192.168.104.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
journal_size: 100
osd_objectstore: "bluestore"
copy_admin_key: true
state: present
register: result
until: result is succeeded
- when: ansible_os_family == 'RedHat'
+ when: ansible_facts['os_family'] == 'RedHat'
- name: allow insecure docker registries
lineinfile:
- name: get root mount information
set_fact:
- rootmount: "{{ ansible_mounts|json_query('[?mount==`/`]|[0]') }}"
+ rootmount: "{{ ansible_facts['mounts']|json_query('[?mount==`/`]|[0]') }}"
# mount -o remount doesn't work on RHEL 8 for now
- name: add mount options to /
mount:
path: '{{ rootmount.mount }}'
src: '{{ rootmount.device }}'
- opts: 'noatime,nodiratime{% if ansible_os_family == "RedHat" and ansible_distribution_major_version | int < 8 %},nobarrier{% endif %}'
+ opts: "noatime,nodiratime{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] | int < 8 %},nobarrier{% endif %}"
fstype: '{{ rootmount.fstype }}'
state: mounted
option: metalink
state: absent
when:
- - ansible_distribution == 'CentOS'
- - ansible_distribution_major_version | int == 7
+ - ansible_facts['distribution'] == 'CentOS'
+ - ansible_facts['distribution_major_version'] | int == 7
- not is_atomic | bool
- name: resize logical volume for root partition to fill remaining free space
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.79.0/24"
cluster_network: "192.168.80.0/24"
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.83.0/24"
cluster_network: "192.168.84.0/24"
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.1.12
[osds]
[mons]
mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
mon2 monitor_address=192.168.1.12
[osds]
docker: True
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
public_network: "192.168.73.0/24"
cluster_network: "192.168.74.0/24"
public_network: "192.168.87.0/24"
cluster_network: "192.168.88.0/24"
containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_conf_overrides:
global:
containerized_deployment: True
public_network: "192.168.91.0/24"
cluster_network: "192.168.92.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_conf_overrides:
global:
ceph_repository: dev
public_network: "192.168.89.0/24"
cluster_network: "192.168.90.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
osd_objectstore: "bluestore"
copy_admin_key: true
ceph_conf_overrides: