# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
-# ceph_nfs_service_suffix: ansible_hostname
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
######################
# NFS Ganesha Config #
#bluestore_wal_devices: []
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
-# Device discovery is based on the Ansible fact 'ansible_devices'
+# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
# this option since ceph-volume has a built-in check which looks for empty devices.
set_fact:
ceph_release: "{{ ceph_stable_release }}"
-- name: set_fact monitor_name ansible_hostname
+- name: set_fact monitor_name ansible_facts['hostname']
set_fact:
monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
delegate_to: "{{ item }}"
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
-# ceph_nfs_service_suffix: ansible_hostname
+# ceph_nfs_service_suffix: "{{ ansible_facts['hostname'] }}"
######################
# NFS Ganesha Config #
bluestore_wal_devices: []
#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
-# Device discovery is based on the Ansible fact 'ansible_devices'
+# Device discovery is based on the Ansible fact 'ansible_facts["devices"]'
# which reports all the devices on a system. If chosen, all the disks
# found will be passed to ceph-volume lvm batch. You should not be worried on using
# this option since ceph-volume has a built-in check which looks for empty devices.