]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Use ansible_facts
authorAlex Schultz <aschultz@redhat.com>
Wed, 3 Mar 2021 14:43:50 +0000 (07:43 -0700)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 25 Mar 2021 23:16:58 +0000 (00:16 +0100)
It has come to our attention that using ansible_* vars that are
populated with INJECT_FACTS_AS_VARS=True is not very performant.  In
order to be able to support setting that to off, we need to update the
references to use ansible_facts[<thing>] instead of ansible_<thing>.

Related: ansible#73654
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1935406
Signed-off-by: Alex Schultz <aschultz@redhat.com>
(cherry picked from commit a7f2fa73e63e69dba2e41aaac9732397eec437c9)

175 files changed:
group_vars/all.yml.sample
group_vars/iscsigws.yml.sample
group_vars/mdss.yml.sample
group_vars/mgrs.yml.sample
group_vars/mons.yml.sample
group_vars/nfss.yml.sample
group_vars/osds.yml.sample
group_vars/rbdmirrors.yml.sample
group_vars/rhcs.yml.sample
infrastructure-playbooks/filestore-to-bluestore.yml
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/purge-container-cluster.yml
infrastructure-playbooks/purge-iscsi-gateways.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/shrink-mds.yml
infrastructure-playbooks/shrink-mgr.yml
infrastructure-playbooks/shrink-mon.yml
infrastructure-playbooks/shrink-osd.yml
infrastructure-playbooks/shrink-rbdmirror.yml
infrastructure-playbooks/shrink-rgw.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml
infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml
infrastructure-playbooks/untested-by-ci/replace-osd.yml
profiles/rgw-keystone-v2
profiles/rgw-keystone-v3
profiles/rgw-radosgw-static-website
profiles/rgw-usage-log
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-common/tasks/configure_cluster_name.yml
roles/ceph-common/tasks/configure_memory_allocator.yml
roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml
roles/ceph-common/tasks/installs/debian_community_repository.yml
roles/ceph-common/tasks/installs/debian_custom_repository.yml
roles/ceph-common/tasks/installs/debian_dev_repository.yml
roles/ceph-common/tasks/installs/install_debian_packages.yml
roles/ceph-common/tasks/installs/install_redhat_packages.yml
roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml
roles/ceph-common/tasks/installs/redhat_community_repository.yml
roles/ceph-common/tasks/installs/redhat_dev_repository.yml
roles/ceph-common/tasks/main.yml
roles/ceph-config/tasks/rgw_systemd_environment_file.yml
roles/ceph-config/templates/ceph.conf.j2
roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml
roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml
roles/ceph-crash/tasks/main.yml
roles/ceph-dashboard/tasks/configure_dashboard.yml
roles/ceph-dashboard/tasks/configure_dashboard_backends.yml
roles/ceph-dashboard/tasks/main.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-facts/tasks/container_binary.yml
roles/ceph-facts/tasks/facts.yml
roles/ceph-facts/tasks/grafana.yml
roles/ceph-facts/tasks/set_monitor_address.yml
roles/ceph-facts/tasks/set_radosgw_address.yml
roles/ceph-grafana/tasks/configure_grafana.yml
roles/ceph-grafana/templates/grafana.ini.j2
roles/ceph-handler/tasks/check_running_containers.yml
roles/ceph-handler/tasks/handler_crash.yml
roles/ceph-handler/templates/restart_mds_daemon.sh.j2
roles/ceph-handler/templates/restart_mgr_daemon.sh.j2
roles/ceph-handler/templates/restart_mon_daemon.sh.j2
roles/ceph-handler/templates/restart_nfs_daemon.sh.j2
roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2
roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-infra/tasks/main.yml
roles/ceph-iscsi-gw/defaults/main.yml
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml
roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml
roles/ceph-mds/defaults/main.yml
roles/ceph-mds/tasks/containerized.yml
roles/ceph-mds/tasks/non_containerized.yml
roles/ceph-mds/templates/ceph-mds.service.j2
roles/ceph-mgr/defaults/main.yml
roles/ceph-mgr/tasks/common.yml
roles/ceph-mgr/tasks/main.yml
roles/ceph-mgr/tasks/pre_requisite.yml
roles/ceph-mgr/tasks/start_mgr.yml
roles/ceph-mgr/templates/ceph-mgr.service.j2
roles/ceph-mon/defaults/main.yml
roles/ceph-mon/tasks/ceph_keys.yml
roles/ceph-mon/tasks/deploy_monitors.yml
roles/ceph-mon/tasks/main.yml
roles/ceph-mon/tasks/start_monitor.yml
roles/ceph-mon/templates/ceph-mon.service.j2
roles/ceph-nfs/defaults/main.yml
roles/ceph-nfs/tasks/create_rgw_nfs_user.yml
roles/ceph-nfs/tasks/ganesha_selinux_fix.yml
roles/ceph-nfs/tasks/main.yml
roles/ceph-nfs/tasks/pre_requisite_container.yml
roles/ceph-nfs/tasks/pre_requisite_non_container.yml
roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml
roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml
roles/ceph-nfs/tasks/start_nfs.yml
roles/ceph-nfs/templates/ceph-nfs.service.j2
roles/ceph-osd/defaults/main.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/start_osds.yml
roles/ceph-osd/tasks/system_tuning.yml
roles/ceph-osd/templates/ceph-osd.service.j2
roles/ceph-prometheus/templates/alertmanager.service.j2
roles/ceph-prometheus/templates/alertmanager.yml.j2
roles/ceph-prometheus/templates/prometheus.service.j2
roles/ceph-prometheus/templates/prometheus.yml.j2
roles/ceph-rbd-mirror/defaults/main.yml
roles/ceph-rbd-mirror/tasks/common.yml
roles/ceph-rbd-mirror/tasks/configure_mirroring.yml
roles/ceph-rbd-mirror/tasks/main.yml
roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml
roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml
roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2
roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2
roles/ceph-rgw-loadbalancer/templates/keepalived.conf.j2
roles/ceph-rgw/handlers/main.yml
roles/ceph-rgw/tasks/openstack-keystone.yml
roles/ceph-rgw/tasks/pre_requisite.yml
roles/ceph-rgw/tasks/start_docker_rgw.yml
roles/ceph-rgw/tasks/start_radosgw.yml
roles/ceph-rgw/templates/ceph-radosgw.service.j2
roles/ceph-validate/tasks/check_eth_mon.yml
roles/ceph-validate/tasks/check_eth_rgw.yml
roles/ceph-validate/tasks/check_ipaddr_mon.yml
roles/ceph-validate/tasks/check_iscsi.yml
roles/ceph-validate/tasks/check_nfs.yml
roles/ceph-validate/tasks/check_system.yml
roles/ceph-validate/tasks/main.yml
site-container.yml.sample
tests/functional/add-osds/container/group_vars/all
tests/functional/add-osds/group_vars/all
tests/functional/add-rbdmirrors/container/group_vars/all
tests/functional/add-rbdmirrors/group_vars/all
tests/functional/add-rgws/container/group_vars/all
tests/functional/add-rgws/group_vars/all
tests/functional/all-in-one/container/group_vars/all
tests/functional/all-in-one/group_vars/all
tests/functional/all_daemons/container/group_vars/all
tests/functional/all_daemons/container/hosts
tests/functional/all_daemons/container/hosts-ubuntu
tests/functional/all_daemons/container/hosts-upgrade-to-octopus
tests/functional/all_daemons/group_vars/all
tests/functional/all_daemons/hosts
tests/functional/all_daemons/hosts-switch-to-containers
tests/functional/all_daemons/hosts-upgrade-to-octopus
tests/functional/collocation/container/group_vars/all
tests/functional/collocation/group_vars/all
tests/functional/docker2podman/group_vars/all
tests/functional/external_clients/container/inventory/group_vars/all
tests/functional/external_clients/inventory/group_vars/all
tests/functional/filestore-to-bluestore/container/group_vars/all
tests/functional/filestore-to-bluestore/group_vars/all
tests/functional/lvm-auto-discovery/container/group_vars/all
tests/functional/lvm-auto-discovery/group_vars/all
tests/functional/lvm-batch/container/group_vars/all
tests/functional/lvm-batch/group_vars/all
tests/functional/lvm-osds/container/group_vars/all
tests/functional/lvm-osds/group_vars/all
tests/functional/podman/group_vars/all
tests/functional/rgw-multisite/container/group_vars/all
tests/functional/rgw-multisite/container/secondary/group_vars/all
tests/functional/rgw-multisite/group_vars/all
tests/functional/rgw-multisite/secondary/group_vars/all
tests/functional/rhcs_setup.yml
tests/functional/setup.yml
tests/functional/shrink_mds/container/group_vars/all
tests/functional/shrink_mgr/container/group_vars/all
tests/functional/shrink_mon/container/group_vars/all
tests/functional/shrink_mon/hosts
tests/functional/shrink_mon/hosts-switch-to-containers
tests/functional/shrink_mon/hosts-ubuntu
tests/functional/shrink_osd/container/group_vars/all
tests/functional/shrink_rbdmirror/container/group_vars/all
tests/functional/shrink_rgw/container/group_vars/all
tests/functional/shrink_rgw/group_vars/all

index 200e036f13e5bf5be5bf661c07fdb38ecc796f33..a72a7ef2f10d930c9917673fcff8824c745cf5d3 100644 (file)
@@ -80,7 +80,7 @@ dummy:
 
 #centos_package_dependencies:
 #  - epel-release
-#  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+#  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 #redhat_package_dependencies: []
 
@@ -150,7 +150,7 @@ dummy:
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 4.0)
@@ -178,7 +178,7 @@ dummy:
 #
 #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 #ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -188,7 +188,7 @@ dummy:
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -251,7 +251,7 @@ dummy:
 
 #ceph_conf_key_directory: /etc/ceph
 
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 #ceph_keyring_permissions: '0600'
@@ -549,7 +549,7 @@ dummy:
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 #ceph_conf_overrides: {}
index 8fcfdfbb0a7f4a1cdd0fb0cdefc058d1911364fe..67c1bb26e73563acdbb8152d953e0a9764c199ae 100644 (file)
@@ -43,14 +43,14 @@ dummy:
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
 
 # TCMU_RUNNER resource limitation
-#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_tcmu_runner_docker_cpu_limit: 1
 
 # RBD_TARGET_GW resource limitation
-#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_target_gw_docker_cpu_limit: 1
 
 # RBD_TARGET_API resource limitation
-#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_target_api_docker_cpu_limit: 1
 
index 2412ea730941251631d3e4cc4336465f3ad9bb31..14b1bfb8acd5e0a78a075ff98419c17bf149f1a0 100644 (file)
@@ -27,13 +27,13 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mds_docker_cpu_limit: 4
 
 # we currently for MDS_NAME to hostname because of a bug in ceph-docker
 # fix here: https://github.com/ceph/ceph-docker/pull/770
 # this will go away soon.
-#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
 #ceph_config_keys: [] # DON'T TOUCH ME
 
 
index ab0d1116411b8a3549001b94a2e9ebf669eafc36..55a01f41d172880a48aba882c34b3ca80d77ad32 100644 (file)
@@ -43,7 +43,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mgr_docker_cpu_limit: 1
 
 #ceph_mgr_docker_extra_env:
index 002d62b220b55ab692b64eb08b0d6b0ec0128360..ad59172b79a49fb70f044530ccb40b897e23676c 100644 (file)
@@ -45,7 +45,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mon_docker_cpu_limit: 1
 #ceph_mon_container_listen_port: 3300
 
index e3820bdb72f5a764ff851d768acbad031299c016..2abf4554d86dad0a2ce2d76d3122967d8be416c1 100644 (file)
@@ -25,7 +25,7 @@ dummy:
 #ceph_nfs_enable_service: true
 
 # ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
 # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
 # such case it's better to have constant instance id instead which
 # can be set by 'ceph_nfs_service_suffix'
@@ -95,7 +95,7 @@ dummy:
 # they must be configered.
 #ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
 #ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-#rgw_client_name: client.rgw.{{ ansible_hostname }}
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
 
 ###################
 # CONFIG OVERRIDE #
index 60b2bfbd21602b3029252278c2f713a369a99761..6e418a7ce695fd175aad5572e886af4cc5f0d111 100644 (file)
@@ -169,7 +169,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_osd_docker_cpu_limit: 4
 
 # The next two variables are undefined, and thus, unused by default.
index 2c565fe01f3f988af0819fd4b3e7becba03f46d5..00984136168c6851a90dab3cca757ee3e6634160 100644 (file)
@@ -50,7 +50,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_mirror_docker_cpu_limit: 1
 
 #ceph_rbd_mirror_docker_extra_env:
index 8b52434e30d181eb621ef4aa70b5bc18773a1982..3d5f36eccfffa91d49cf1150497ffb6b8a4308f7 100644 (file)
@@ -80,7 +80,7 @@ dummy:
 
 #centos_package_dependencies:
 #  - epel-release
-#  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+#  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 #redhat_package_dependencies: []
 
@@ -150,7 +150,7 @@ ceph_repository: rhcs
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 4.0)
@@ -178,7 +178,7 @@ ceph_rhcs_version: 4
 #
 #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 #ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -188,7 +188,7 @@ ceph_rhcs_version: 4
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -251,7 +251,7 @@ ceph_iscsi_config_dev: false
 
 #ceph_conf_key_directory: /etc/ceph
 
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 #ceph_keyring_permissions: '0600'
@@ -549,7 +549,7 @@ ceph_iscsi_config_dev: false
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 #ceph_conf_overrides: {}
index b3be2eaec488e80a3fa17a9d5ebbc2ece684df24..149de7a8cccca933019f93856ae53a73a4fcfb68 100644 (file)
@@ -36,7 +36,7 @@
     - name: set_fact container_run_cmd, container_exec_cmd
       set_fact:
         container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment | bool else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else '' }}"
-        container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }}"
+        container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }}"
 
     - name: get ceph osd tree data
       command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree -f json"
index b29dd93a94e86e00d863dcc373b9d213f48cb5cb..5bed86875deeb965d9b6a28df7d5341877d43fa6 100644 (file)
@@ -71,7 +71,7 @@
           run_once: true
 
         - name: get all nfs-ganesha mount points
-          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+          command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
           register: nfs_ganesha_mount_points
           failed_when: false
           with_items: "{{ groups[nfs_group_name] }}"
       name: nfs-ganesha
       state: stopped
     failed_when: false
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 - name: purge node-exporter
   hosts:
 
   - name: stop ceph mdss with systemd
     service:
-      name: ceph-mds@{{ ansible_hostname }}
+      name: ceph-mds@{{ ansible_facts['hostname'] }}
       state: stopped
       enabled: no
     failed_when: false
 
   - name: stop ceph mgrs with systemd
     service:
-      name: ceph-mgr@{{ ansible_hostname }}
+      name: ceph-mgr@{{ ansible_facts['hostname'] }}
       state: stopped
       enabled: no
     failed_when: false
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 - name: purge rgwloadbalancer cluster
 
 
     - name: stop ceph rgws with systemd
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       failed_when: false
 
   - name: stop ceph rbd mirror with systemd
     service:
-      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
       state: stopped
     failed_when: false
 
     become: false
     wait_for:
       port: 22
-      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+      host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
       state: started
       delay: 10
       timeout: 500
       state: stopped
       enabled: no
     with_items: "{{ osd_ids.stdout_lines }}"
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
   - name: remove ceph udev rules
     file:
 
   - name: stop ceph mons with systemd
     service:
-      name: "ceph-{{ item }}@{{ ansible_hostname }}"
+      name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     failed_when: false
     yum:
       name: "{{ ceph_packages }}"
       state: absent
-    when: ansible_pkg_mgr == 'yum'
+    when: ansible_facts['pkg_mgr'] == 'yum'
 
   - name: purge ceph packages with dnf
     dnf:
       name: "{{ ceph_packages }}"
       state: absent
-    when: ansible_pkg_mgr == 'dnf'
+    when: ansible_facts['pkg_mgr'] == 'dnf'
 
   - name: purge ceph packages with apt
     apt:
       name: "{{ ceph_packages }}"
       state: absent
       purge: true
-    when: ansible_pkg_mgr == 'apt'
+    when: ansible_facts['pkg_mgr'] == 'apt'
 
   - name: purge remaining ceph packages with yum
     yum:
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'yum'
+      - ansible_facts['pkg_mgr'] == 'yum'
       - purge_all_packages | bool
 
   - name: purge remaining ceph packages with dnf
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'dnf'
+      - ansible_facts['pkg_mgr'] == 'dnf'
       - purge_all_packages | bool
 
   - name: purge remaining ceph packages with apt
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'apt'
+      - ansible_facts['pkg_mgr'] == 'apt'
       - purge_all_packages | bool
 
   - name: purge extra packages with yum
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'yum'
+      - ansible_facts['pkg_mgr'] == 'yum'
       - purge_all_packages | bool
 
   - name: purge extra packages with dnf
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'dnf'
+      - ansible_facts['pkg_mgr'] == 'dnf'
       - purge_all_packages | bool
 
   - name: purge extra packages with apt
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'apt'
+      - ansible_facts['pkg_mgr'] == 'apt'
       - purge_all_packages | bool
 
   - name: remove config and any ceph socket left
 
   - name: purge dnf cache
     command: dnf clean all
-    when: ansible_pkg_mgr == 'dnf'
+    when: ansible_facts['pkg_mgr'] == 'dnf'
 
   - name: purge rpm cache in /tmp
     file:
 
   - name: clean apt
     command: apt-get clean  # noqa 303
-    when: ansible_pkg_mgr == 'apt'
+    when: ansible_facts['pkg_mgr'] == 'apt'
 
   - name: purge ceph repo file in /etc/yum.repos.d
     file:
       - ceph-dev
       - ceph_stable
       - rh_storage
-    when: ansible_os_family == 'RedHat'
+    when: ansible_facts['os_family'] == 'RedHat'
 
   - name: check for anything running ceph
     command: "ps -u ceph -U ceph"
       path: "{{ item.path }}"
       state: absent
     with_items: "{{ systemd_files.files }}"
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 
 - name: purge fetch directory
index 358cee7834880de00d889ef4e4cd2df349e90970..0935dd2ea96bae1483a05858d19f04eebc5089df 100644 (file)
@@ -55,7 +55,7 @@
           run_once: true
 
         - name: get all nfs-ganesha mount points
-          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+          command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
           register: nfs_ganesha_mount_points
           failed_when: false
           with_items: "{{ groups[nfs_group_name] }}"
 
   - name: disable ceph nfs service
     service:
-      name: "ceph-nfs@{{ ansible_hostname }}"
+      name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
       path: /etc/systemd/system/ceph-nfs@.service
       state: absent
 
-  - name: remove ceph nfs directories for "{{ ansible_hostname }}"
+  - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
     file:
       path: "{{ item }}"
       state: absent
 
   - name: disable ceph mds service
     service:
-      name: "ceph-mds@{{ ansible_hostname }}"
+      name: "ceph-mds@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
 
   - name: disable ceph mgr service
     service:
-      name: "ceph-mgr@{{ ansible_hostname }}"
+      name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
 
     - name: disable ceph rgw service
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       failed_when: false
 
   - name: disable ceph rbd-mirror service
     service:
-      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
       enabled: no
     ignore_errors: true
     with_items:
-      - "ceph-mgr@{{ ansible_hostname }}"
-      - "ceph-mon@{{ ansible_hostname }}"
+      - "ceph-mgr@{{ ansible_facts['hostname'] }}"
+      - "ceph-mon@{{ ansible_facts['hostname'] }}"
 
   - name: remove ceph mon and mgr service
     file:
   tasks:
     - name: stop ceph-crash container
       service:
-        name: "ceph-crash@{{ ansible_hostname }}"
+        name: "ceph-crash@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
       failed_when: false
         state: absent
         update_cache: yes
         autoremove: yes
-      when: ansible_os_family == 'Debian'
+      when: ansible_facts['os_family'] == 'Debian'
 
     - name: red hat based systems tasks
       block:
               args:
                 warn: no
           when:
-            ansible_pkg_mgr == "yum"
+            ansible_facts['pkg_mgr'] == "yum"
 
         - name: dnf related tasks on red hat
           block:
               args:
                 warn: no
           when:
-            ansible_pkg_mgr == "dnf"
+            ansible_facts['pkg_mgr'] == "dnf"
       when:
-        ansible_os_family == 'RedHat' and
+        ansible_facts['os_family'] == 'RedHat' and
         not is_atomic
 
     - name: find any service-cid file left
   become: true
 
   tasks:
-  - name: purge ceph directories for "{{ ansible_hostname }}" and ceph socket
+  - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
     file:
       path: "{{ item }}"
       state: absent
index 7c2482bd25d71b924da73fd8c7a6ddb651a4edaf..ec2247d2f4679abb6e6478a92721a02cb2807c23 100644 (file)
@@ -83,7 +83,7 @@
 
         - name: set_fact container_exec_cmd
           set_fact:
-            container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+            container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
           when: containerized_deployment | bool
 
         - name: get iscsi gateway list
index c84397099a61bd63567fea6c2b08c447e587ac89..5627b5af479649f2471eb9dad5ba842946d4870c 100644 (file)
     # after the package gets upgraded
     - name: stop ceph mon - shortname
       systemd:
-        name: ceph-mon@{{ ansible_hostname }}
+        name: ceph-mon@{{ ansible_facts['hostname'] }}
         state: stopped
         enabled: no
         masked: yes
     # after the package gets upgraded
     - name: stop ceph mon - fqdn
       systemd:
-        name: ceph-mon@{{ ansible_fqdn }}
+        name: ceph-mon@{{ ansible_facts['fqdn'] }}
         state: stopped
         enabled: no
         masked: yes
     # after ALL monitors, even when collocated
     - name: mask the mgr service
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         masked: yes
       when: inventory_hostname in groups[mgr_group_name] | default([])
             or groups[mgr_group_name] | default([]) | length == 0
 
     - name: start ceph mgr
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         state: started
         enabled: yes
       ignore_errors: True # if no mgr collocated with mons
       register: ceph_health_raw
       until:
         - ceph_health_raw.rc == 0
-        - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') |  from_json)["quorum_names"] or
-          hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+        - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') |  from_json)["quorum_names"] or
+          hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
       when: not containerized_deployment | bool
 
     - name: container | waiting for the containerized monitor to join the quorum...
       command: >
-        {{ container_binary }} exec ceph-mon-{{ ansible_hostname }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+        {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
       register: ceph_health_raw
       until:
         - ceph_health_raw.rc == 0
-        - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
-          hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+        - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+          hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
       when: containerized_deployment | bool
       block:
         - name: stop ceph mgr
           systemd:
-            name: ceph-mgr@{{ ansible_hostname }}
+            name: ceph-mgr@{{ ansible_facts['hostname'] }}
             state: stopped
             masked: yes
 
     # or if we run a Ceph cluster before Luminous
     - name: stop ceph mgr
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         state: stopped
         enabled: no
         masked: yes
 
     - name: set_fact container_exec_cmd_osd
       set_fact:
-        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: stop ceph osd
 
     - name: set_fact container_exec_cmd_osd
       set_fact:
-        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: get osd versions
               set_fact:
                 mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
               with_items: "{{ groups[mds_group_name] }}"
-              when: hostvars[item]['ansible_hostname'] == mds_active_name
+              when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
 
             - name: create standby_mdss group
               add_host:
 
             - name: stop standby ceph mds
               systemd:
-                name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+                name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
                 state: stopped
                 enabled: no
               delegate_to: "{{ item }}"
             # somehow, having a single task doesn't work in containerized context
             - name: mask systemd units for standby ceph mds
               systemd:
-                name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+                name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
                 masked: yes
               delegate_to: "{{ item }}"
               with_items: "{{ groups['standby_mdss'] }}"
 
     - name: prevent restart from the packaging
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         enabled: no
         masked: yes
       when: not containerized_deployment | bool
 
     - name: restart ceph mds
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         state: restarted
         enabled: yes
         masked: no
       when: not containerized_deployment | bool
 
     - name: restart active mds
-      command: "{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}"
+      command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
       changed_when: false
       when: containerized_deployment | bool
 
 
     - name: prevent restarts from the packaging
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         enabled: no
         masked: yes
       when: not containerized_deployment | bool
 
     - name: stop ceph rgw when upgrading from stable-3.2
       systemd:
-        name: ceph-radosgw@rgw.{{ ansible_hostname }}
+        name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
         state: stopped
         enabled: no
         masked: yes
 
     - name: stop ceph rgw
       systemd:
-        name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+        name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
         state: stopped
         enabled: no
         masked: yes
   tasks:
     - name: stop ceph rbd mirror
       systemd:
-        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
         masked: yes
 
     - name: systemd stop nfs container
       systemd:
-        name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+        name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
         state: stopped
         enabled: no
         masked: yes
         name: ceph-facts
 
     - name: container | disallow pre-nautilus OSDs and enable all new nautilus-only functionality
-      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release nautilus"
+      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release nautilus"
       delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: True
       when:
         - groups.get(mon_group_name, []) | length > 0
 
     - name: container | enable msgr2 protocol
-      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} mon enable-msgr2"
+      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} mon enable-msgr2"
       delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: True
       when:
 
     - name: set_fact container_exec_cmd_status
       set_fact:
-        container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: show ceph status
index 3441e196048a1784d51cdea1732ed4d2916156ba..afbcd0df3ffd15a860125ba69e82684053900049 100644 (file)
@@ -61,7 +61,7 @@
 
     - name: set_fact container_exec_cmd for mon0
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -74,7 +74,7 @@
 
     - name: set_fact mds_to_kill_hostname
       set_fact:
-        mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_hostname'] }}"
+        mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
 
   tasks:
     # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
index 02de185ecdeb2ed8f2e0ef28f217db682dd1d927..f99726890ee40829ca778c90c306923bf3a83282 100644 (file)
@@ -39,7 +39,7 @@
     - name: set_fact container_exec_cmd
       when: containerized_deployment | bool
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 
     - name: exit playbook, if can not connect to the cluster
       command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
@@ -92,7 +92,7 @@
 
     - name: set_fact mgr_to_kill_hostname
       set_fact:
-        mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_hostname'] }}"
+        mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
 
   tasks:
     - name: stop manager services and verify it
index df2ae60bd46cf1d15e70a7aff995fa74d16046c9..05d6c2be543e1483cf028d85ec5a0e2ee864f623 100644 (file)
@@ -76,7 +76,7 @@
 
     - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -90,7 +90,7 @@
 
     - name: set_fact mon_to_kill_hostname
       set_fact:
-        mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_hostname'] }}"
+        mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
 
     - name: stop monitor service(s)
       service:
index 9c0454db6f65f63664524eb855514843307b57d9..6dd00b3b04fbbd55e07b395643ebd4096c0d08d7 100644 (file)
@@ -65,7 +65,7 @@
   post_tasks:
     - name: set_fact container_exec_cmd build docker exec command (containerized)
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: set_fact container_run_cmd
@@ -97,7 +97,7 @@
       with_nested:
         - "{{ groups.get(osd_group_name) }}"
         - "{{ osd_hosts }}"
-      when: hostvars[item.0]['ansible_hostname'] == item.1
+      when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
 
     - name: get ceph-volume lvm list data
       command: "{{ container_run_cmd }} lvm list --format json"
index 3e65393306e666dcfac41c52fae3ed03df9e8eb7..db7cc3a213c810c32ec875fa038a5507b558039a 100644 (file)
@@ -64,7 +64,7 @@
     - name: set_fact container_exec_cmd for mon0
       when: containerized_deployment | bool
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 
     - name: exit playbook, if can not connect to the cluster
       command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
@@ -76,7 +76,7 @@
 
     - name: set_fact rbdmirror_to_kill_hostname
       set_fact:
-        rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_hostname'] }}"
+        rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
 
     - name: set_fact rbdmirror_gids
       set_fact:
index 7739628fb393c11ad353c692ab1c2aa3bc691135..b4162897525c10ce4a0ef07786660875c7463f44 100644 (file)
@@ -66,7 +66,7 @@
 
     - name: set_fact container_exec_cmd for mon0
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -95,7 +95,7 @@
       set_fact:
         rgw_host: '{{ item }}'
       with_items: '{{ groups[rgw_group_name] }}'
-      when: hostvars[item]['ansible_hostname'] == rgw_to_kill.split('.')[0]
+      when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
 
     - name: stop rgw service
       service:
index 8b0190fa7ab2f75c40e6d32c795f94bb2799c57c..e554b4293aefcb86786cdc124a1352494f167ae2 100644 (file)
@@ -74,7 +74,7 @@
 
     - name: stop non-containerized ceph mon
       service:
-        name: "ceph-mon@{{ ansible_hostname }}"
+        name: "ceph-mon@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
       when: ldb_files.rc == 0
 
     - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
-      command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
+      command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
       args:
         creates: /etc/ceph/{{ cluster }}.mon.keyring
       changed_when: false
     - name: waiting for the monitor to join the quorum...
       command: "{{ container_binary }} run --rm  -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
       register: ceph_health_raw
-      until: ansible_hostname in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
       changed_when: false
+      until: hostvars[mon_host]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
 
     # will not exist
     - name: stop non-containerized ceph mgr(s)
       service:
-        name: "ceph-mgr@{{ ansible_hostname }}"
+        name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
       failed_when: false
   post_tasks:
     - name: get num_pgs
       command: >
-        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
+        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
       register: ceph_pgs
       delegate_to: "{{ groups[mon_group_name][0] }}"
       changed_when: false
 
     - name: container - waiting for clean pgs...
       command: >
-        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
+        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
       register: ceph_health_post
       until: >
         (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
 
     - name: stop non-containerized ceph mds(s)
       service:
-        name: "ceph-mds@{{ ansible_hostname }}"
+        name: "ceph-mds@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
   tasks:
     - name: stop non-containerized ceph rgw(s)
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       with_items: "{{ rgw_instances }}"
   pre_tasks:
     - name: stop non-containerized ceph rbd mirror(s)
       service:
-        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
index ab92b7351bd401ccd638b68b1ad5904d95a4573f..e7f9485b9bbcf634a4cd5647ee4ac1e4314f0362 100644 (file)
 
     - name: Check if the node has be migrated already
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
       register: migration_completed
       failed_when: false
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: mon_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
       when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
 
     - name: Compress the store as much as possible
-      command: ceph tell mon.{{ ansible_hostname }} compact
+      command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
       when: migration_completed.stat.exists == False
 
     - name: Check if sysvinit
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
       register: monsysvinit
       changed_when: False
 
     - name: Check if upstart
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
       register: monupstart
       changed_when: False
 
@@ -70,7 +70,7 @@
       service: >
         name=ceph-mon
         state=restarted
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Restart the Monitor after compaction (Sysvinit)
@@ -92,7 +92,7 @@
       service: >
         name=ceph-mon
         state=stopped
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Stop the monitor (Sysvinit)
     # NOTE (leseb): should we convert upstart to sysvinit here already?
     - name: Archive monitor stores
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Scp the Monitor store
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
-        dest={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+        dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
         flat=yes
       when: migration_completed.stat.exists == False
 
 
     - name: Check if sysvinit
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
       register: monsysvinit
       changed_when: False
 
     - name: Check if upstart
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
       register: monupstart
       changed_when: False
 
       service: >
         name=ceph-mon
         state=stopped
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Make sure the monitor is stopped (Sysvinit)
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the monitor store
       unarchive: >
-        src={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
 
     - name: Waiting for the monitor to join the quorum...
       shell: >
-        ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_hostname }}
+        ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
       register: result
       until: result.rc == 0
       retries: 5
 
     - name: Done moving to the next monitor
       file: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
         state=touch
         owner=root
         group=root
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: osd_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
 
     - name: Archive ceph configs
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Create backup directory
 
     - name: Scp OSDs dirs and configs
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
         dest={{ backup_dir }}/osds-backups/
         flat=yes
       when: migration_completed.stat.exists == False
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the OSD configs
       unarchive: >
-        src={{ backup_dir }}/osds-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: rgw_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
 
     - name: Archive rados gateway configs
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Create backup directory
 
     - name: Scp RGWs dirs and configs
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
         dest={{ backup_dir }}/rgws-backups/
         flat=yes
       when: migration_completed.stat.exists == False
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the OSD configs
       unarchive: >
-        src={{ backup_dir }}/rgws-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
index c6aadfe18e461c0663c1f6209e53bb4a0c54245a..29f40433c6a8bc5850e47d73002c433deaa445f1 100644 (file)
     include_vars: "{{ item }}"
     with_first_found:
       - files:
-          - "host_vars/{{ ansible_hostname }}.yml"
+          - "host_vars/{{ ansible_facts['hostname'] }}.yml"
           - "host_vars/default.yml"
         skip: true
 
   - name: exit playbook, if devices not defined
     fail:
-      msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
+      msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
     when: devices is not defined
 
   - name: install sgdisk(gdisk)
   - set_fact:
       owner: 167
       group: 167
-    when: ansible_os_family == "RedHat"
+    when: ansible_facts['os_family'] == "RedHat"
   
   - set_fact:
       owner: 64045
       group: 64045
-    when: ansible_os_family == "Debian"
+    when: ansible_facts['os_family'] == "Debian"
 
   - name: change partitions ownership
     file:
index 9184598f94c4bc11eaac95076e24296f9ebdfde0..11f38691487283ca03fcdea22525a85337b24caf 100644 (file)
@@ -60,7 +60,7 @@
   post_tasks:
     - name: set_fact container_exec_cmd build docker exec command (containerized)
       set_fact:
-        container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
index 6ba9fcf43b2acbc6288f56848a3c46553ba99be4..0d1d0abfa2f27f47ba7955c9be0f5f781dfe3231 100644 (file)
@@ -7,7 +7,7 @@
 
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     "rgw keystone api version": "2"
     "rgw keystone url": "http://192.168.0.1:35357"
     "rgw keystone admin token": "password"
@@ -16,7 +16,7 @@ ceph_conf_overrides:
     "rgw keystone token cache size": "10000"
     "rgw keystone revocation interval": "900"
     "rgw s3 auth use keystone": "true"
-    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
 
 
 # NOTE (leseb): to authentivate with Keystone you have two options:
index 1044e6d446c26f60518b23070156ac0af2c9ecb3..327de816c70db4c613ecc86ad73f63cb01f25221 100644 (file)
@@ -7,7 +7,7 @@
 
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     "rgw keystone api version": "3"
     "rgw keystone url": "http://192.168.0.1:35357"
     "rgw keystone admin token": "password"
@@ -17,7 +17,7 @@ ceph_conf_overrides:
     "rgw keystone token cache size": "10000"
     "rgw keystone revocation interval": "900"
     "rgw s3 auth use keystone": "true"
-    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
 
 
 # NOTE (leseb): to authentivate with Keystone you have two options:
index cf2dd230de0bd3fe6c207fea84823863624bb544..1cd18c260723a0cc59c61c685fdb88fece69c659 100644 (file)
@@ -6,6 +6,6 @@
 # The double quotes are important, do NOT remove them.
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     rgw enable static website = true
     rgw dns s3website name = objects-website-region.domain.com
index 40645281c68d3dd3984c9b7a9f411d4ee013b207..af704fb7276a94a2ef4bb170378aa22b6afabc61 100644 (file)
@@ -6,7 +6,7 @@
 # The double quotes are important, do NOT remove them.
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     rgw enable usage log = true
     rgw usage log tick interval = 30
     rgw usage log flush threshold = 1024
index 64d68ab2748775d8179bcb5c56f14c612704c24f..b98565330d1c06abafbb2f72153aabd454d2a0cf 100644 (file)
@@ -20,7 +20,7 @@
     name: "{{ item }}"
     groups: _filtered_clients
   with_items: "{{ groups[client_group_name] | intersect(ansible_play_batch) }}"
-  when: (hostvars[item]['ansible_architecture'] == 'x86_64') or (not containerized_deployment | bool)
+  when: (hostvars[item]['ansible_facts']['architecture'] == 'x86_64') or (not containerized_deployment | bool)
 
 - name: set_fact delegated_node
   set_fact:
index ab7e518535f048dd90bd969d4ed10ef74bb0727e..7b7cb4293057318c2b7f0021aed3f3105c5369d4 100644 (file)
@@ -6,7 +6,7 @@
     create: yes
     line: "CLUSTER={{ cluster }}"
     regexp: "^CLUSTER="
-  when: ansible_os_family in ["RedHat", "Suse"]
+  when: ansible_facts['os_family'] in ["RedHat", "Suse"]
 
 # NOTE(leseb): we are performing the following check
 # to ensure any Jewel installation will not fail.
@@ -19,7 +19,7 @@
 # - All previous versions from Canonical
 # - Infernalis from ceph.com
 - name: debian based systems - configure cluster name
-  when: ansible_os_family == "Debian"
+  when: ansible_facts['os_family'] == "Debian"
   block:
     - name: check /etc/default/ceph exist
       stat:
index b76145777b722048580f8546778c8d76038a5dae..115ecf59ed10e255c58f70aa835cb1b7bc849ddd 100644 (file)
@@ -7,7 +7,7 @@
     regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
     line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
   when:
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
     - etc_default_ceph.stat.exists
   notify: restart ceph osds
 
@@ -18,5 +18,5 @@
     create: yes
     regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
     line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   notify: restart ceph osds
index a18801120bfbf6f0f91045d77c813117ddb6746e..a36355a1bb12cbf0e22cf69b9d6363505182dab4 100644 (file)
@@ -21,4 +21,4 @@
   args:
     warn: no
   changed_when: false
-  when: ansible_pkg_mgr == 'yum'
+  when: ansible_facts['pkg_mgr'] == 'yum'
index 0afa23befc8877dfe3c5c905ace656c9f5b65482..6832a3f5b1785e001e5fc9d2089f51feb86185bc 100644 (file)
@@ -15,6 +15,6 @@
 
 - name: configure debian ceph stable community repository
   apt_repository:
-    repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+    repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
     state: present
     update_cache: yes
index 10c5ea38f94e726d7014b7fd5067f11a59c40a84..607ce0896c3cb8581c1b7b31dae374befb0518b2 100644 (file)
@@ -9,6 +9,6 @@
 
 - name: configure debian custom repository
   apt_repository:
-    repo: "deb {{ ceph_custom_repo }} {{ ansible_distribution_release }} main"
+    repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
     state: present
     update_cache: yes
index 9bde8bd2b527a1b90da5e78474b1f82e4a4cfff6..c5c8c5c3a859daee1650efa36828d22325a11498 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: fetch ceph debian development repository
   uri:
-    url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/repo
+    url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo
     return_content: yes
   register: ceph_dev_deb_repo
 
index 6bdc03f4ab1c580aa583d485a69c9d23f976ab04..3c6db106f38a0660ce658c60bda9446e51447467 100644 (file)
@@ -4,6 +4,6 @@
     name: "{{ debian_ceph_pkgs | unique }}"
     update_cache: no
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   register: result
   until: result is succeeded
index 28b56fd9ac491b3e6603bbcc30720a3126c151ce..7e9f6d62fd8f1cc5c8c789cc6544fdc15566b1ba 100644 (file)
@@ -5,7 +5,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_distribution == 'RedHat'
+  when: ansible_facts['distribution'] == 'RedHat'
 
 - name: install centos dependencies
   yum:
@@ -13,7 +13,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_distribution == 'CentOS'
+  when: ansible_facts['distribution'] == 'CentOS'
 
 - name: install redhat ceph packages
   package:
index 51835a95645b649034971f34f5923cf4949153bb..f53302ef376ba3d2e8db71b04a1f4872058d19a9 100644 (file)
@@ -1,24 +1,24 @@
 ---
 - name: "rhcs {{ ceph_rhcs_version }} on rhel 8"
-  when: ansible_distribution_major_version | int == 8
+  when: ansible_facts['distribution_major_version'] | int == 8
   block:
     - name: enable red hat storage monitor repository
       rhsm_repository:
-        name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_architecture }}-rpms"
+        name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
       when: (mon_group_name in group_names or mgr_group_name in group_names)
 
     - name: enable red hat storage osd repository
       rhsm_repository:
-        name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_architecture }}-rpms"
+        name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
       when: osd_group_name in group_names
 
     - name: enable red hat storage tools repository
       rhsm_repository:
-        name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_architecture }}-rpms"
+        name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
       when: (mgr_group_name in group_names or rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names or monitoring_group_name in group_names)
 
 - name: "rhcs {{ ceph_rhcs_version }} on rhel 7"
-  when: ansible_distribution_major_version | int == 7
+  when: ansible_facts['distribution_major_version'] | int == 7
   block:
     - name: enable red hat storage monitor repository
       rhsm_repository:
index 5fd817d75c97b6baa7911befafe20b95408417c3..4db2737ccd073c8d9b8b815db11986453f961267 100644 (file)
@@ -5,7 +5,7 @@
   register: result
   until: result is succeeded
   tags: with_pkg
-  when: ansible_distribution_major_version | int == 7
+  when: ansible_facts['distribution_major_version'] | int == 7
 
 - name: configure red hat ceph community repository stable key
   rpm_key:
@@ -21,7 +21,7 @@
     gpgcheck: yes
     state: present
     gpgkey: "{{ ceph_stable_key }}"
-    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/$basearch"
+    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
     file: ceph_stable
     priority: 2
   register: result
@@ -34,7 +34,7 @@
     gpgcheck: yes
     state: present
     gpgkey: "{{ ceph_stable_key }}"
-    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/noarch"
+    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
     file: ceph_stable
     priority: 2
   register: result
index f6b7a0674044450dc8d301a3e4760be7fcc6717b..1b0af582fa9615ba998f480f7d9901ab487fb31f 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: get latest available build
   uri:
-    url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+    url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
     return_content: yes
   run_once: true
   register: latest_build
index 2239aad061bb1c5a95601099452a659997e45dd6..31ff66a884bae0e5d098a7f1e6434c15167c8652 100644 (file)
@@ -1,22 +1,22 @@
 ---
 - name: include_tasks installs/install_on_redhat.yml
   include_tasks: installs/install_on_redhat.yml
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   tags: package-install
 
 - name: include_tasks installs/install_on_suse.yml
   include_tasks: installs/install_on_suse.yml
-  when: ansible_os_family == 'Suse'
+  when: ansible_facts['os_family'] == 'Suse'
   tags: package-install
 
 - name: include installs/install_on_debian.yml
   include_tasks: installs/install_on_debian.yml
   tags: package-install
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: include_tasks installs/install_on_clear.yml
   include_tasks: installs/install_on_clear.yml
-  when: ansible_os_family == 'ClearLinux'
+  when: ansible_facts['os_family'] == 'ClearLinux'
   tags: package-install
 
 - name: get ceph version
index ee310f11084a102e057a693f7744933099da48de..d59ba4593de12b11415b70ec84b3ce422a637c40 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: create rados gateway instance directories
   file:
-    path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     state: directory
     owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
@@ -10,7 +10,7 @@
 
 - name: generate environment file
   copy:
-    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile"
+    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
     owner: "root"
     group: "root"
     mode: "0644"
index 09e3bd785413dc49627d00dbf5e0e08c9fae9b54..fef76271e47f53c49a559f1513b9fa04feca0f0b 100644 (file)
@@ -24,10 +24,10 @@ osd crush chooseleaf type = 0
 
 {% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
 mon initial members = {% for host in groups[mon_group_name] %}
-      {% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%}
-        {{ hostvars[host]['ansible_fqdn'] }}
-      {%- elif hostvars[host]['ansible_hostname'] is defined -%}
-        {{ hostvars[host]['ansible_hostname'] }}
+      {% if hostvars[host]['ansible_facts']['fqdn'] is defined and mon_use_fqdn -%}
+        {{ hostvars[host]['ansible_facts']['fqdn'] }}
+      {%- elif hostvars[host]['ansible_facts']['hostname'] is defined -%}
+        {{ hostvars[host]['ansible_facts']['hostname'] }}
       {%- endif %}
       {%- if not loop.last %},{% endif %}
     {% endfor %}
@@ -92,13 +92,13 @@ filestore xattr use omap = true
 [osd]
 {% if is_hci | bool and _num_osds > 0 %}
 {# hci_safety_factor is the safety factor for HCI deployments #}
-{% if ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
 {% endif %}
 {% elif _num_osds > 0 %}
 {# non_hci_safety_factor is the safety factor for dedicated nodes #}
-{% if ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
 {% endif %}
 {% endif %}
 osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
@@ -106,14 +106,14 @@ osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
 {% endif %}
 
 {% if inventory_hostname in groups.get(rgw_group_name, []) %}
-{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_hostname']) %}
+{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
 {# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
 {% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
 {% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
 [client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
 host = {{ _rgw_hostname }}
 keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
 {% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
 {%- macro frontend_line(frontend_type) -%}
 {%- if frontend_type == 'civetweb' -%}
@@ -139,12 +139,12 @@ rgw_zone = {{ instance['rgw_zone'] }}
 
 {% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
 {% for host in groups[nfs_group_name] %}
-{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
-{% if nfs_obj_gw %}
+{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
+{% if nfs_obj_gw | bool %}
 [client.rgw.{{ _rgw_hostname }}]
 host = {{ _rgw_hostname }}
 keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
 {% endif %}
 {% endfor %}
 {% endif %}
index 1adf86ca966aa36afac56f5cd68ac21813bdc8d9..7a965da985426adb7ceecac8b94823ac62c7ae07 100644 (file)
 
 - name: add docker's gpg key
   apt_key:
-    url: 'https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg'
+    url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
   register: result
   until: result is succeeded
   when: container_package_name == 'docker-ce'
 
 - name: add docker repository
   apt_repository:
-    repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
+    repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
   when: container_package_name == 'docker-ce'
 
 - name: add podman ppa repository
@@ -29,4 +29,4 @@
     repo: "ppa:projectatomic/ppa"
   when:
     - container_package_name == 'podman'
-    - ansible_distribution == 'Ubuntu'
+    - ansible_facts['distribution'] == 'Ubuntu'
index 5ed0127e7756ecd0ccadad263cfa823da7e6917e..baa373bfce5d4deaa318a72f2314d4d3ade5a138 100644 (file)
@@ -2,13 +2,13 @@
 - name: include specific variables
   include_vars: "{{ item }}"
   with_first_found:
-    - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
-    - "{{ ansible_os_family }}.yml"
+    - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
+    - "{{ ansible_facts['os_family'] }}.yml"
 
 - name: debian based systems tasks
   include_tasks: debian_prerequisites.yml
   when:
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
   tags: with_pkg
 
 # ensure extras enabled for docker
@@ -18,7 +18,7 @@
     state: present
     enabled: yes
   when:
-    - ansible_distribution == 'CentOS'
+    - ansible_facts['distribution'] == 'CentOS'
     - ceph_docker_enable_centos_extra_repo | bool
   tags:
     with_pkg
index 4bcb62274b789fd065ef091583506f014ccd47d7..e2e0806ceaf36c994d506491055db17f54b6820d 100644 (file)
@@ -52,7 +52,7 @@
 
 - name: start the ceph-crash service
   systemd:
-    name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
+    name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
     state: started
     enabled: yes
     masked: no
index 869b03a69bd29bfeb6452582f057eff47b2c2412..3e4139c37574f4ab29da204d6570c1796455fa15 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: set_fact container_run_cmd
     - name: add iscsi gateways - ipv4
       command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"  # noqa 304
       args:
-        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
         stdin_add_newline: no
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
         - dashboard_password_from_stdin | bool
 
     - name: add iscsi gateways - ipv4 (legacy)
-      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-add {{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-add {{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items: "{{ groups[iscsi_gw_group_name] }}"
     - name: add iscsi gateways - ipv6
       command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"  # noqa 304
       args:
-        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
         stdin_add_newline: no
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
         - dashboard_password_from_stdin | bool
 
     - name: add iscsi gateways - ipv6 (legacy)
-      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-add {{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+      command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard iscsi-gateway-add {{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items: "{{ groups[iscsi_gw_group_name] }}"
index 9538142a626193ded50f379eab2ea564f6a947c8..e2f7774b47e07d8afece2d023c2cd2882bbdecc9 100644 (file)
@@ -1,16 +1,16 @@
 ---
 - name: get current mgr backend - ipv4
   set_fact:
-    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
   when: ip_version == 'ipv4'
 
 - name: get current mgr backend - ipv6
   set_fact:
-    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
+    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
   when: ip_version == 'ipv6'
 
 - name: config the current dashboard backend
-  command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_hostname'] }}/server_addr {{ mgr_server_addr }}"
+  command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_facts']['hostname'] }}/server_addr {{ mgr_server_addr }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   changed_when: false
   run_once: true
index ee3cbc5282c07042da8c855ad098c99b8aeacfe4..723b3164969ae79666cfcb9025e57212cb8a2e69 100644 (file)
@@ -4,5 +4,5 @@
 
 - name: print dashboard URL
   debug:
-    msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_fqdn }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
+    msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
   run_once: true
index d16a0f8c05a174e57bfd896f89ef460c45db7b5b..8b86752c48ad3eed7cc7820fe498c87f370a4ccd 100644 (file)
@@ -72,7 +72,7 @@ debian_package_dependencies: []
 
 centos_package_dependencies:
   - epel-release
-  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 redhat_package_dependencies: []
 
@@ -142,7 +142,7 @@ nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_s
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 4.0)
@@ -170,7 +170,7 @@ ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default(
 #
 ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 ceph_stable_openstack_release_uca: queens
-ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -180,7 +180,7 @@ ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_sta
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -243,7 +243,7 @@ generate_fsid: true
 
 ceph_conf_key_directory: /etc/ceph
 
-ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 ceph_keyring_permissions: '0600'
@@ -541,7 +541,7 @@ rgw_zone: default
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 ceph_conf_overrides: {}
@@ -877,4 +877,4 @@ use_fqdn_yes_i_am_sure: false
 
 container_exec_cmd:
 docker: false
-ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0)  }}"
\ No newline at end of file
+ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0)  }}"
index 8e866bcbf15151b2983450cd8262ba19a7702bbc..44249419f4e3f4125b64723576fa52d1b3b67f41 100644 (file)
@@ -6,4 +6,4 @@
 
 - name: set_fact container_binary
   set_fact:
-    container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
\ No newline at end of file
+    container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
\ No newline at end of file
index 3bb2f39c54556c12b1aec30f87bf427563e0a0e0..a288b82ac5803d12e1160b9d3a2f56e7eb3b6fb4 100644 (file)
@@ -28,9 +28,9 @@
   set_fact:
     ceph_release: "{{ ceph_stable_release }}"
 
-- name: set_fact monitor_name ansible_hostname
+- name: set_fact monitor_name ansible_facts['hostname']
   set_fact:
-    monitor_name: "{{ hostvars[item]['ansible_hostname'] }}"
+    monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
   with_items: "{{ groups.get(mon_group_name, []) }}"
@@ -39,9 +39,9 @@
     - groups.get(mon_group_name, []) | length > 0
     - not mon_use_fqdn | bool
 
-- name: set_fact monitor_name ansible_fqdn
+- name: set_fact monitor_name ansible_facts['fqdn']
   set_fact:
-    monitor_name: "{{ hostvars[item]['ansible_fqdn'] }}"
+    monitor_name: "{{ hostvars[item]['ansible_facts']['fqdn'] }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
   with_items: "{{ groups.get(mon_group_name, []) }}"
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
       when:
         - containerized_deployment | bool
 
     - name: find a running mon container
-      command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+      command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
       register: find_running_mon_container
       failed_when: false
       run_once: true
 
     - name: set_fact _container_exec_cmd
       set_fact:
-        _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_hostname'] }}"
+        _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
       when:
         - containerized_deployment | bool
 
   when: rolling_update or groups.get(mon_group_name, []) | length == 0
 
 - name: get current fsid
-  command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}.asok config get fsid"
+  command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
   register: rolling_update_fsid
   delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
   until: rolling_update_fsid is succeeded
       set_fact:
         fsid: "{{ cluster_uuid.stdout }}"
 
-- name: set_fact mds_name ansible_hostname
+- name: set_fact mds_name ansible_facts['hostname']
   set_fact:
-    mds_name: "{{ ansible_hostname }}"
+    mds_name: "{{ ansible_facts['hostname'] }}"
   when: not mds_use_fqdn | bool
 
-- name: set_fact mds_name ansible_fqdn
+- name: set_fact mds_name ansible_facts['fqdn']
   set_fact:
-    mds_name: "{{ ansible_fqdn }}"
+    mds_name: "{{ ansible_facts['fqdn'] }}"
   when: mds_use_fqdn | bool
 
 - name: resolve device link(s)
 - name: set_fact devices generate device list when osd_auto_discovery
   set_fact:
     devices: "{{ (devices | default([]) + [ item.key | regex_replace('^', '/dev/') ]) | unique }}"
-  with_dict: "{{ ansible_devices }}"
+  with_dict: "{{ ansible_facts['devices'] }}"
   when:
     - osd_auto_discovery | default(False) | bool
-    - ansible_devices is defined
+    - ansible_facts['devices'] is defined
     - item.value.removable == "0"
     - item.value.sectors != "0"
     - item.value.partitions|count == 0
 
     - name: set_fact rgw_hostname
       set_fact:
-        rgw_hostname: "{% set _value = ansible_hostname -%}
+        rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
         {% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
-        {% if key == ansible_fqdn -%}
+        {% if key == ansible_facts['fqdn'] -%}
         {% set _value = key -%}
         {% endif -%}
         {% endfor -%}
   - name: set ntp service name for Debian family
     set_fact:
       ntp_service_name: ntp
-    when: ansible_os_family == 'Debian'
+    when: ansible_facts['os_family'] == 'Debian'
 
   - name: set ntp service name for Red Hat family
     set_fact:
       ntp_service_name: ntpd
-    when: ansible_os_family in ['RedHat', 'Suse']
+    when: ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 - name: set chrony daemon name RedHat and Ubuntu based OSs
   block:
     - name: set chronyd daemon name for RedHat based OSs
       set_fact:
         chrony_daemon_name: chronyd
-      when: ansible_os_family in ["RedHat", "Suse"]
+      when: ansible_facts['os_family'] in ["RedHat", "Suse"]
 
     - name: set chronyd daemon name for Ubuntu based OSs
       set_fact:
         chrony_daemon_name: chrony
-      when: ansible_os_family == "Debian"
+      when: ansible_facts['os_family'] == "Debian"
 
 - name: set_fact use_new_ceph_iscsi package or old ceph-iscsi-config/cli
   set_fact:
index 47bae2eaccd7c7a6d6e80ce3693cce224bf306da..b0f9b7842a9e99a56711d86f6c50226a193d9485 100644 (file)
@@ -1,6 +1,6 @@
 - name: set grafana_server_addr fact - ipv4
   set_fact:
-    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
   when:
     - groups.get(grafana_server_group_name, []) | length > 0
     - ip_version == 'ipv4'
@@ -9,7 +9,7 @@
 
 - name: set grafana_server_addr fact - ipv6
   set_fact:
-    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
+    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
   when:
     - groups.get(grafana_server_group_name, []) | length > 0
     - ip_version == 'ipv6'
@@ -18,7 +18,7 @@
 
 - name: set grafana_server_addrs fact - ipv4
   set_fact:
-    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
+    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
   with_items: "{{ groups.get(grafana_server_group_name, []) }}"
   when:
     - groups.get(grafana_server_group_name, []) | length > 0
@@ -27,7 +27,7 @@
 
 - name: set grafana_server_addrs fact - ipv6
   set_fact:
-    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
+    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
   with_items: "{{ groups.get(grafana_server_group_name, []) }}"
   when:
     - groups.get(grafana_server_group_name, []) | length > 0
index aee025aab8d130aa5f096293929d1231911f4314..fb8b4690d908b482102bb544f797806890542288 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact _monitor_address to monitor_address_block ipv4
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
@@ -11,7 +11,7 @@
 
 - name: set_fact _monitor_address to monitor_address_block ipv6
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
@@ -30,7 +30,7 @@
 
 - name: set_fact _monitor_address to monitor_interface - ipv4
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
@@ -41,7 +41,7 @@
 
 - name: set_fact _monitor_address to monitor_interface - ipv6
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
index e586a0401c5b08a4643d555b3fd8435f82eb1fbb..b662340b7857d65f663e61f04499003d54b3e0c1 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact _radosgw_address to radosgw_address_block ipv4
   set_fact:
-    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
+    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
   when:
     - radosgw_address_block is defined
     - radosgw_address_block != 'subnet'
@@ -9,7 +9,7 @@
 
 - name: set_fact _radosgw_address to radosgw_address_block ipv6
   set_fact:
-    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
+    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
   when:
     - radosgw_address_block is defined
     - radosgw_address_block != 'subnet'
   block:
     - name: set_fact _interface
       set_fact:
-        _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
+        _interface: "{{ (radosgw_interface | replace('-', '_')) }}"
 
     - name: set_fact _radosgw_address to radosgw_interface - ipv4
       set_fact:
-        _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
+        _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version]['address'] }}"
       when: ip_version == 'ipv4'
 
     - name: set_fact _radosgw_address to radosgw_interface - ipv6
       set_fact:
-        _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] | ipwrap }}"
+        _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version][0]['address'] | ipwrap }}"
       when: ip_version == 'ipv6'
 
 - name: set_fact rgw_instances without rgw multisite
index 5275b89f9ce0dfa23fbcd8d514088283bd04e20b..57928d3212e0784ed2e653ede3cecc14d191b21f 100644 (file)
@@ -7,7 +7,7 @@
   until: result is succeeded
   when:
     - not containerized_deployment | bool
-    - ansible_os_family in ['RedHat', 'Suse']
+    - ansible_facts['os_family'] in ['RedHat', 'Suse']
   tags: package-install
 
 - name: make sure grafana is down
@@ -41,7 +41,7 @@
   with_items: "{{ grafana_dashboard_files }}"
   when:
     - not containerized_deployment | bool
-    - not ansible_os_family in ['RedHat', 'Suse']
+    - not ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 - name: write grafana.ini
   template:
index 4c897b2dec126f7f36efedcdf0d253ecd8fc0133..61ef46c09294950a674c43c789d1346b0dbf7bca 100644 (file)
@@ -18,7 +18,7 @@ org_role = Viewer
 [server]
 cert_file = /etc/grafana/ceph-dashboard.crt
 cert_key = /etc/grafana/ceph-dashboard.key
-domain = {{ ansible_fqdn }}
+domain = {{ ansible_facts['fqdn'] }}
 protocol = {{ dashboard_protocol }}
 http_port = {{ grafana_port }}
 http_addr = {{ grafana_server_addr }}
index 32d90f4a12612fb604f473a65c3df45b0c2924c7..bcb98f857b9aebb8e05f7de7b84e525f0a1a972b 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: check for a mon container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
   register: ceph_mon_container_stat
   changed_when: false
   failed_when: false
@@ -16,7 +16,7 @@
   when: inventory_hostname in groups.get(osd_group_name, [])
 
 - name: check for a mds container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
   register: ceph_mds_container_stat
   changed_when: false
   failed_when: false
@@ -24,7 +24,7 @@
   when: inventory_hostname in groups.get(mds_group_name, [])
 
 - name: check for a rgw container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
   register: ceph_rgw_container_stat
   changed_when: false
   failed_when: false
@@ -32,7 +32,7 @@
   when: inventory_hostname in groups.get(rgw_group_name, [])
 
 - name: check for a mgr container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
   register: ceph_mgr_container_stat
   changed_when: false
   failed_when: false
@@ -40,7 +40,7 @@
   when: inventory_hostname in groups.get(mgr_group_name, [])
 
 - name: check for a rbd mirror container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
   register: ceph_rbd_mirror_container_stat
   changed_when: false
   failed_when: false
@@ -48,7 +48,7 @@
   when: inventory_hostname in groups.get(rbdmirror_group_name, [])
 
 - name: check for a nfs container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
   register: ceph_nfs_container_stat
   changed_when: false
   failed_when: false
@@ -80,7 +80,7 @@
   when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
 
 - name: check for a ceph-crash container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
   register: ceph_crash_container_stat
   changed_when: false
   failed_when: false
index 6187fca79004ae5494a6c436fc68c4a173e7b01a..b4039b10e658a2d2afa7a13bedfa25700f4f3028 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: restart the ceph-crash service
   systemd:
-    name: ceph-crash@{{ ansible_hostname }}
+    name: ceph-crash@{{ ansible_facts['hostname'] }}
     state: restarted
     enabled: yes
     masked: no
index b40d579b27b8caf8d4112929a5878549d0b37712..441ddd8f1d08596ace07388088a0d50cc54d4bbb 100644 (file)
@@ -3,13 +3,13 @@
 RETRIES="{{ handler_health_mds_check_retries }}"
 DELAY="{{ handler_health_mds_check_delay }}"
 MDS_NAME="{{ mds_name }}"
-{% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok
 
 # First, restart the daemon
 systemctl restart ceph-mds@${MDS_NAME}
index c9f3554003e7041cca2baf239acff13fef7dabf8..e20c61901231d05f90486537279187f9e5994307 100644 (file)
@@ -2,14 +2,14 @@
 
 RETRIES="{{ handler_health_mgr_check_retries }}"
 DELAY="{{ handler_health_mgr_check_delay }}"
-MGR_NAME="{{ ansible_hostname }}"
-{% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
+MGR_NAME="{{ ansible_facts['hostname'] }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok
 
 systemctl reset-failed ceph-mgr@${MGR_NAME}
 # First, restart the daemon
index 06383eae1b41172ffd7615b2de3f78c0894a3041..2beade15a9fbd972b3d7b5ec1b474ffdf33493bb 100644 (file)
@@ -3,19 +3,19 @@
 RETRIES="{{ handler_health_mon_check_retries }}"
 DELAY="{{ handler_health_mon_check_delay }}"
 MONITOR_NAME="{{ monitor_name }}"
-{% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # if daemon is uninstalled, no restarting is needed; so exit with success
-systemctl status ceph-mon@{{ ansible_hostname }} > /dev/null
+systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null
 if [[ $? -ne 0 ]]; then
   exit 0
 fi
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok
 
 check_quorum() {
 while [ $RETRIES -ne 0 ]; do
@@ -34,7 +34,7 @@ exit 1
 }
 
 # First, restart the daemon
-systemctl restart ceph-mon@{{ ansible_hostname }}
+systemctl restart ceph-mon@{{ ansible_facts['hostname'] }}
 
 COUNT=10
 # Wait and ensure the socket exists after restarting the daemon
@@ -45,5 +45,5 @@ while [ $COUNT -ne 0 ]; do
 done
 # If we reach this point, it means the socket is not present.
 echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:"
-journalctl -u ceph-mon@{{ ansible_hostname }}
+journalctl -u ceph-mon@{{ ansible_facts['hostname'] }}
 exit 1
index 0fab87cc0900b1e8794d266e16fb2d81f35bd95e..d7432a3a7a3924e92f0fa25e17a9fd2d66051e29 100644 (file)
@@ -2,10 +2,10 @@
 
 RETRIES="{{ handler_health_nfs_check_retries }}"
 DELAY="{{ handler_health_nfs_check_delay }}"
-NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
 PID=/var/run/ganesha.pid
-{% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+{% if containerized_deployment | bool %}
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
 {% endif %}
 
 # First, restart the daemon
index 5fb23454a861488caa0d0ffd3552259d38fb401f..2493469351f816b5064dc34acdd7dea67acc6ee3 100644 (file)
@@ -2,14 +2,14 @@
 
 RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
 DELAY="{{ handler_health_rbd_mirror_check_delay }}"
-RBD_MIRROR_NAME="{{ ansible_hostname }}"
+RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}"
 {% if containerized_deployment %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok
 
 # First, restart the daemon
 systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
index 62a5c31180a760c18181826f501efb8afa6d6882..93eda64b8bb8547f7082116c7427153da88fafbe 100644 (file)
@@ -2,7 +2,7 @@
 
 RETRIES="{{ handler_health_rgw_check_retries }}"
 DELAY="{{ handler_health_rgw_check_delay }}"
-HOST_NAME="{{ ansible_hostname }}"
+HOST_NAME="{{ ansible_facts['hostname'] }}"
 RGW_NUMS={{ rgw_instances | length | int }}
 RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }}
 if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then
index c43919c08ad7abb1d70fc75af530e16c3146f621..9be6c670e69182d59cc4e8828c72a672213b6956 100644 (file)
@@ -15,7 +15,7 @@
   block:
     - name: install firewalld python binding
       package:
-        name: "python{{ ansible_python.version.major }}-firewall"
+        name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
       tags: with_pkg
       when: not is_atomic | bool
 
index 40d5876a19e95034573629877de5d0678d5b6ebe..e9620327b69c5b2857d06608003c279396b4c4ac 100644 (file)
@@ -2,7 +2,7 @@
 - name: update cache for Debian based OSs
   apt:
     update_cache: yes
-  when: ansible_os_family == "Debian"
+  when: ansible_facts['os_family'] == "Debian"
   register: result
   until: result is succeeded
 
@@ -10,7 +10,7 @@
   include_tasks: configure_firewall.yml
   when:
     - configure_firewall | bool
-    - ansible_os_family in ['RedHat', 'Suse']
+    - ansible_facts['os_family'] in ['RedHat', 'Suse']
   tags: configure_firewall
 
 - name: include_tasks setup_ntp.yml
index 50d8c9766e58f2dabfa35dddd44f3192b7a7a743..c161e3647c37dc67c6c569abe0df33323605d7cf 100644 (file)
@@ -35,13 +35,13 @@ trusted_ip_list: 192.168.122.1
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
 
 # TCMU_RUNNER resource limitation
-ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_tcmu_runner_docker_cpu_limit: 1
 
 # RBD_TARGET_GW resource limitation
-ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_target_gw_docker_cpu_limit: 1
 
 # RBD_TARGET_API resource limitation
-ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_target_api_docker_cpu_limit: 1
index e566e16df6949a2a97dc52ee17d216dc4237b82e..292903e35315dcd01382e2d3a8b38fb572d7ee62 100644 (file)
@@ -24,7 +24,7 @@
 
 - name: add mgr ip address to trusted list with dashboard - ipv4
   set_fact:
-    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
+    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
   with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
   when:
     - dashboard_enabled | bool
@@ -32,7 +32,7 @@
 
 - name: add mgr ip address to trusted list with dashboard - ipv6
   set_fact:
-    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
+    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
   with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
   when:
     - dashboard_enabled | bool
@@ -49,7 +49,7 @@
 
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when: containerized_deployment | bool
 
index cd98ff13aa86e60528e11e9ae3a12558b141ae70..83b03e7991ee38542fb159f140498cccac875083 100644 (file)
@@ -33,7 +33,7 @@
       command: >
         openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
          -x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
-         -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_hostname }}"
+         -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
       delegate_to: localhost
       run_once: True
       with_items: "{{ crt_files_exist.results }}"
index 0d8296daf1e0c4d7b941f29a786c891df3a47823..0dcad9f23964e46fac2a83439a9c6e7622670670 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: red hat based systems tasks
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   block:
     - name: set_fact common_pkgs
       set_fact:
@@ -76,7 +76,7 @@
 
         - name: ceph-iscsi stable repository
           get_url:
-            url: 'https://download.ceph.com/ceph-iscsi/{{ "3" if use_new_ceph_iscsi | bool else "2" }}/rpm/el{{ ansible_distribution_major_version }}/ceph-iscsi.repo'
+            url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
             dest: /etc/yum.repos.d/ceph-iscsi.repo
             force: true
           when: ceph_repository == 'community'
index 0b443ea9a4f703459bde021e3eb118a68b19225a..e249d02eb8bc7d56a8d44b6483413e6f62ba9999 100644 (file)
@@ -19,13 +19,13 @@ copy_admin_key: false
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mds_docker_cpu_limit: 4
 
 # we currently for MDS_NAME to hostname because of a bug in ceph-docker
 # fix here: https://github.com/ceph/ceph-docker/pull/770
 # this will go away soon.
-ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
 ceph_config_keys: [] # DON'T TOUCH ME
 
 
index 08c4b0e790e3e44cec9abb34969c353a35d67641..8919366ba234e0750f1fc3772a5f2d7c562be328 100644 (file)
@@ -4,14 +4,14 @@
 
 - name: systemd start mds container
   systemd:
-    name: ceph-mds@{{ ansible_hostname }}
+    name: ceph-mds@{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
     daemon_reload: yes
 
 - name: wait for mds socket to exist
-  command: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
+  command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
   changed_when: false
   register: multi_mds_socket
   retries: 5
index 6f8f15a612d375e382ab8f74fada0f48403823f1..0e2314d7a7268397a57cdce3e78feab5419a3991 100644 (file)
@@ -3,10 +3,10 @@
   apt:
     name: ceph-mds
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   when:
     - mds_group_name in group_names
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
   register: result
   until: result is succeeded
 
@@ -18,7 +18,7 @@
   until: result is succeeded
   when:
     - mds_group_name in group_names
-    - ansible_os_family in ['Suse', 'RedHat']
+    - ansible_facts['os_family'] in ['Suse', 'RedHat']
 
 - name: create mds keyring
   command: ceph --cluster {{ cluster }} --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
@@ -41,7 +41,7 @@
     path: "/etc/systemd/system/ceph-mds@.service.d/"
   when:
     - ceph_mds_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mds systemd service overrides
   config_template:
@@ -51,7 +51,7 @@
     config_type: "ini"
   when:
     - ceph_mds_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: start and add that the metadata service to the init sequence
   service:
index 6d60fd25af3962a0e63c7ce22462147124ceb2e6..27dc48e875b679d35ebaa69fdf308f60511bc3b9 100644 (file)
@@ -6,17 +6,17 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -32,12 +32,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=MDS \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_mds_docker_extra_env }} \
-  --name=ceph-mds-{{ ansible_hostname }} \
+  --name=ceph-mds-{{ ansible_facts['hostname'] }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index 053f623d7001a97bdaeabeb5095c856e2319c7e8..7455043d501a466f7b7a517973b31e2958f99b54 100644 (file)
@@ -35,7 +35,7 @@ ceph_mgr_packages:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mgr_docker_cpu_limit: 1
 
 ceph_mgr_docker_extra_env:
index e924dcac2a56172cb5fdd159541b64573a207eb7..9d2d82ec47d1032c5fbc1d091db011683264cc8e 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: create mgr directory
   file:
-    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}
+    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
     state: directory
     owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
@@ -9,7 +9,7 @@
 
 - name: fetch ceph mgr keyring
   ceph_key:
-    name: "mgr.{{ ansible_hostname }}"
+    name: "mgr.{{ ansible_facts['hostname'] }}"
     caps:
       mon: allow profile mgr
       osd: allow *
@@ -19,7 +19,7 @@
     owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
     mode: "0400"
-    dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
+    dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
   environment:
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
     CEPH_CONTAINER_BINARY: "{{ container_binary }}"
@@ -30,7 +30,7 @@
   block:
     - name: create ceph mgr keyring(s) on a mon node
       ceph_key:
-        name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+        name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
         caps:
           mon: allow profile mgr
           osd: allow *
@@ -51,7 +51,7 @@
       set_fact:
         _mgr_keys:
           - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key }
-          - { 'name': "mgr.{{ ansible_hostname }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring", 'copy_key': true }
+          - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
 
     - name: get keys from monitors
       command: "{{ _container_exec_cmd | default('') }} ceph --cluster {{ cluster }} auth get {{ item.name }}"
@@ -77,8 +77,8 @@
 
 - name: set mgr key permissions
   file:
-    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
-    owner: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
-    group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
+    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
+    owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
+    group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     mode: "{{ ceph_keyring_permissions }}"
   when: cephx | bool
index d5cf2ef1ef13ce1469e9ea3e58dcb9ede7ab99a8..0c60b026a3393a1d19910ba94db857c47d2c5592 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
index 0cfe8d1fb8bfa11c918484fedbf544f32253817f..6c441ea3b3399f5f547e09857331fe3ad8272835 100644 (file)
@@ -1,10 +1,10 @@
 ---
 - name: set_fact ceph_mgr_packages for sso
   set_fact:
-    ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_distribution_major_version | int == 8 else 'python-saml']) }}"
+    ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
   when:
     - dashboard_enabled | bool
-    - ansible_distribution == 'RedHat'
+    - ansible_facts['distribution'] == 'RedHat'
 
 - name: install ceph-mgr packages on RedHat or SUSE
   package:
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
   register: result
   until: result is succeeded
-  when: ansible_os_family in ['RedHat', 'Suse']
+  when: ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 - name: install ceph-mgr packages for debian
   apt:
     name: '{{ ceph_mgr_packages }}'
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   register: result
   until: result is succeeded
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: install routes python library for dashboard module
   apt:
@@ -29,5 +29,5 @@
   register: result
   until: result is succeeded
   when:
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
     - "'ceph-mgr-dashboard' in ceph_mgr_packages"
index 348a718dde467a0d312381bfe86f206bac56daf9..658ca975803f5b5ec8fbe0e61dcb3b0858c6cba5 100644 (file)
@@ -5,7 +5,7 @@
     path: "/etc/systemd/system/ceph-mgr@.service.d/"
   when:
     - ceph_mgr_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mgr systemd service overrides
   config_template:
@@ -15,7 +15,7 @@
     config_type: "ini"
   when:
     - ceph_mgr_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: include_tasks systemd.yml
   include_tasks: systemd.yml
@@ -23,7 +23,7 @@
 
 - name: systemd start mgr
   systemd:
-    name: ceph-mgr@{{ ansible_hostname }}
+    name: ceph-mgr@{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index 5e4874837e9359e892c3570a39c281f96dac57cf..5f7f9d9ce890626b35ec39b9fc63b578bb0d1953 100644 (file)
@@ -11,11 +11,11 @@ After=network.target
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -31,12 +31,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=MGR \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_mgr_docker_extra_env }} \
-  --name=ceph-mgr-{{ ansible_hostname }} \
+  --name=ceph-mgr-{{ ansible_facts['hostname'] }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index ac362b41243f75701d1c729d305ef116b280224c..636bc4506c3e5dad9e4ecca84b16864a4ac17f01 100644 (file)
@@ -37,7 +37,7 @@ client_admin_ceph_authtool_cap:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mon_docker_cpu_limit: 1
 ceph_mon_container_listen_port: 3300
 
index 7d8ac9f71722eeb651215803c10b496a042429be..e55504ec9ca8a43dbbe8e4c1f306f63ed7a78b47 100644 (file)
@@ -5,7 +5,7 @@
     ceph
     --cluster {{ cluster }}
     -n mon.
-    -k /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring
+    -k /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
     mon_status
     --format json
   register: ceph_health_raw
index 9f7cf893b5a515e23a768d2dcadfe7729551db08..70e29fb71f2384f3c4aa2fca4b75df2391478a2a 100644 (file)
@@ -7,7 +7,7 @@
         name: mon.
         cluster: "{{ cluster }}"
         user: mon.
-        user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_hostname'] }}/keyring"
+        user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring"
         state: info
       environment:
         CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
index 4aa692506d3a0d122ed6d4fa99569431bcbee5ae..848b744f279c96cf4320de5a7da2c883a44042ae 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: include deploy_monitors.yml
index c6a4e74ea1b7aa6155ae68f050b1e7706a213e33..8c3b14314f5ba721894de2b17d8baa3361ba14c6 100644 (file)
@@ -6,7 +6,7 @@
   when:
     - not containerized_deployment | bool
     - ceph_mon_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mon systemd service overrides
   config_template:
@@ -17,7 +17,7 @@
   when:
     - not containerized_deployment | bool
     - ceph_mon_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: include_tasks systemd.yml
   include_tasks: systemd.yml
@@ -25,7 +25,7 @@
 
 - name: start the monitor service
   systemd:
-    name: ceph-mon@{{ monitor_name if not containerized_deployment else ansible_hostname }}
+    name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index ef8c8466857b9856910bd2fcdc33f3d8c8f89047..390c5044519af95c6846b5c9745864c99046d255 100644 (file)
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /etc/localtime:/etc/localtime:ro \
   -v /var/log/ceph:/var/log/ceph:z \
-{% if ansible_distribution == 'RedHat' -%}
+{% if ansible_facts['distribution'] == 'RedHat' -%}
   -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
 {% endif -%}
 {% if mon_docker_privileged -%}
index adc9f5b14b5911cb27b600dddba525ee2756b669..5f058525e677031f3f63637669c7b780986fe1a2 100644 (file)
@@ -17,7 +17,7 @@ copy_admin_key: false
 ceph_nfs_enable_service: true
 
 # ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
 # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
 # such case it's better to have constant instance id instead which
 # can be set by 'ceph_nfs_service_suffix'
@@ -87,7 +87,7 @@ ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
 # they must be configered.
 #ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
 #ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-rgw_client_name: client.rgw.{{ ansible_hostname }}
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
 
 ###################
 # CONFIG OVERRIDE #
index 5acacb08edae706024e30ca9dd6fa39882e5afd6..06860798c63c08cd10a782045d07fc1776ea3e70 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd_nfs
   set_fact:
-    container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: check if "{{ ceph_nfs_rgw_user }}" exists
index f739b32d7b0a1978a259b206657aff53eb4c25fe..eb51e526b2f8f63282e3e0685697dc7583216354 100644 (file)
@@ -15,7 +15,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_distribution_major_version == '7'
+      when: ansible_facts['distribution_major_version'] == '7'
 
     - name: install nfs-ganesha-selinux and python3-policycoreutils on RHEL 8
       package:
@@ -23,7 +23,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_distribution_major_version == '8'
+      when: ansible_facts['distribution_major_version'] == '8'
 
     - name: add ganesha_t to permissive domain
       selinux_permissive:
index 4cd3bd3e287046bc93cc58324fd4caad75fcbb84..4ba9f96a08aa77205cf6f129e058f8cea87ce7ef 100644 (file)
@@ -2,7 +2,7 @@
 # global/common requirement
 - name: stop nfs server service
   systemd:
-    name: "{{ 'nfs-server' if ansible_os_family == 'RedHat' else 'nfsserver' if ansible_os_family == 'Suse' else 'nfs-kernel-server' if ansible_os_family == 'Debian' }}"
+    name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
     state: stopped
     enabled: no
   failed_when: false
@@ -24,7 +24,7 @@
   import_tasks: ganesha_selinux_fix.yml
   when:
     - not containerized_deployment | bool
-    - ansible_os_family == 'RedHat'
+    - ansible_facts['os_family'] == 'RedHat'
 
 - name: nfs with external ceph cluster task related
   when:
@@ -40,7 +40,7 @@
         mode: "0755"
       with_items:
         - "{{ ceph_nfs_ceph_user }}"
-        - "{{ ansible_hostname }}"
+        - "{{ ansible_facts['hostname'] }}"
 
     - name: set_fact rgw_client_name
       set_fact:
@@ -55,7 +55,7 @@
         group: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
       with_nested:
         - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
-        - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring']
+        - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
       when:
         - not item.0.get('skipped', False)
         - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
index c2529d8c7217af4e7ac50044ed71eae3a2024090..65e5fe5b57244076bb0c87ede634db99fc36f898 100644 (file)
@@ -3,7 +3,7 @@
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
       with_items: "{{ groups.get(mon_group_name, []) }}"
       delegate_to: "{{ item }}"
       delegate_facts: true
index 69efda56ba278c455b6914c2bb90b2712184caf5..0bf796a0570c9bd7164f8dfadf1ea15c6d816fa6 100644 (file)
@@ -1,11 +1,11 @@
 ---
 - name: include red hat based system related tasks
   include_tasks: pre_requisite_non_container_red_hat.yml
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
 
 - name: include debian based system related tasks
   include_tasks: pre_requisite_non_container_debian.yml
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: install nfs rgw/cephfs gateway - suse
   zypper:
@@ -18,7 +18,7 @@
   when:
     - (ceph_origin == 'repository' or ceph_origin == 'distro')
     - ceph_repository != 'rhcs'
-    - ansible_os_family == 'Suse'
+    - ansible_facts['os_family'] == 'Suse'
     - item.install | bool
   register: result
   until: result is succeeded
@@ -35,7 +35,7 @@
   with_items:
     - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
     - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
-    - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}", create: "{{ nfs_obj_gw }}" }
+    - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
     - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
     - { name: "/var/log/ceph", create: true }
     - { name: "/var/run/ceph", create: true }
       when: nfs_obj_gw | bool
       block:
         - name: create rados gateway keyring
-          command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_hostname }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+          command: ceph --cluster {{ cluster }} --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring auth get-or-create client.rgw.{{ ansible_facts['hostname'] }} osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring
           args:
-            creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+            creates: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring
           changed_when: false
 
         - name: set rados gateway key permissions
           file:
-            path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring
+            path: /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring
             owner: "ceph"
             group: "ceph"
             mode: "0600"
index 6db66b9bf1c01fc0f386513a89d7ce8f45811711..d2c4dd4bd3194888fb32665728a0af6d2a54e6ee 100644 (file)
@@ -9,7 +9,7 @@
       block:
         - name: add nfs-ganesha stable repository
           apt_repository:
-            repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+            repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
             state: present
             update_cache: no
           register: add_ganesha_apt_repo
@@ -30,7 +30,7 @@
       block:
         - name: fetch nfs-ganesha development repository
           uri:
-            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/flavors/{{ nfs_ganesha_flavor }}/repo
+            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo
             return_content: yes
           register: nfs_ganesha_dev_apt_repo
 
index d2c1ff20ef59bf1e55e080b8f7cf3345119bb953..527de75f50c85dd38c23e2bbe6a3a979cfe20ebf 100644 (file)
@@ -18,7 +18,7 @@
       block:
         - name: fetch nfs-ganesha red hat development repository
           uri:
-            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/flavors/{{ nfs_ganesha_flavor }}/repo
+            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo
             return_content: yes
           register: nfs_ganesha_dev_yum_repo
 
index d3e4ac1f8fdd1e79435649b14cb1c0990e1fc4c7..c7b304eb8ba8e0b0cc8a6dab18e6e0ee64da8959 100644 (file)
@@ -8,7 +8,7 @@
 
   - name: set_fact container_exec_cmd_nfs - internal
     set_fact:
-      exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }} rados"
+      exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
       delegate_node: "{{ groups[mon_group_name][0] }}"
     when: groups.get(mon_group_name, []) | length > 0
 
@@ -72,7 +72,7 @@
 
 - name: systemd start nfs container
   systemd:
-    name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+    name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
     state: started
     enabled: yes
     masked: no
index 1f749cd5a1991e246b5a3a0f54c1d873d452a234..e168b3e0e37c37c6ef4e18860ec8c22385d3ee75 100644 (file)
@@ -15,7 +15,7 @@ ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
 ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
 {% endif %}
 ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
-ExecStartPre={{ '/bin/mkdir' if ansible_os_family == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -36,7 +36,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=NFS \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_nfs_docker_extra_env }} \
-  --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
+  --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index 4658401862fd7036cb61ea2762442a31a13b3262..8084140d4a336fa2aefafb58e49aa92542447678 100644 (file)
@@ -161,7 +161,7 @@ ceph_config_keys: [] # DON'T TOUCH ME
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_osd_docker_cpu_limit: 4
 
 # The next two variables are undefined, and thus, unused by default.
index bc56ae14375f0a5cdfee0ddba9dc6f00ca4a6cda..185950553112e55e42a88ceb3ce84906fb8623b1 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
@@ -23,7 +23,7 @@
   until: result is succeeded
   when:
     - not containerized_deployment | bool
-    - ansible_os_family != 'ClearLinux'
+    - ansible_facts['os_family'] != 'ClearLinux'
 
 - name: install numactl when needed
   package:
index 69bce22a54f0793c609d6a4f8acc0b4411fa4532..087b4fac96f769081f21886a718aeb5860108d90 100644 (file)
@@ -38,7 +38,7 @@
     path: "/etc/systemd/system/ceph-osd@.service.d/"
   when:
     - ceph_osd_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-osd systemd service overrides
   config_template:
@@ -48,7 +48,7 @@
     config_type: "ini"
   when:
     - ceph_osd_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present
   file:
index 86a157a8d071addbc2aecd13f50f153371ee766b..48c7d665e056d4022c0f806f0f3eec7c5d98335d 100644 (file)
@@ -2,7 +2,7 @@
 - name: debian based systems tasks
   when:
     - osd_objectstore == 'filestore'
-    - ansible_os_family == "Debian"
+    - ansible_facts['os_family'] == "Debian"
   block:
     - name: disable osd directory parsing by updatedb
       command: updatedb -e /var/lib/ceph
@@ -45,7 +45,7 @@
 
 - name: set_fact vm_min_free_kbytes
   set_fact:
-    vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.stdout }}"
+    vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.stdout }}"
 
 - name: apply operating system tuning
   sysctl:
index 32d78a5668b2c03e1ef942311f26e30a04684cb2..216db6b17c10366637614ab952bd8d2b7c0df3fa 100644 (file)
@@ -7,7 +7,7 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=-/etc/environment
@@ -48,7 +48,7 @@ numactl \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /var/run/udev/:/var/run/udev/ \
   -v /var/log/ceph:/var/log/ceph:z \
-  {% if ansible_distribution == 'Ubuntu' -%}
+  {% if ansible_facts['distribution'] == 'Ubuntu' -%}
   --security-opt apparmor:unconfined \
   {% endif -%}
   {{ container_env_args }} \
index 6aba7709eb875127b55bdbca49657e64f9a2bf7c..28280c3d683f5b378594ed47b7f07fa4bd07bcbf 100644 (file)
@@ -35,7 +35,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name=alertmanager \
   --cluster.peer={{ peer }}:{{ alertmanager_cluster_port }} \
 {% endfor %}
   --storage.path=/alertmanager \
-  --web.external-url=http://{{ ansible_fqdn }}:{{ alertmanager_port }}/ \
+  --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ alertmanager_port }}/ \
   --web.listen-address={{ grafana_server_addr }}:{{ alertmanager_port }}
 {% if container_binary == 'podman' %}
 ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index c2600acec42529542e78d87c756f826379132376..2ca58f417e7440f4ac7e6c79395a1266f64067c6 100644 (file)
@@ -11,5 +11,5 @@ receivers:
 - name: 'ceph-dashboard'
   webhook_configs:
 {% for host in groups['mgrs'] | default(groups['mons']) %}
-  - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
+  - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
 {% endfor %}
index 3a17da6f01647a83b4dd26050147de8fd65aaa98..601f19a1db9b4f85749c14d2a2f4f332142bd922 100644 (file)
@@ -31,7 +31,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name=prometheus \
   {{ prometheus_container_image }} \
   --config.file=/etc/prometheus/prometheus.yml \
   --storage.tsdb.path=/prometheus \
-  --web.external-url=http://{{ ansible_fqdn }}:{{ prometheus_port }}/ \
+  --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ prometheus_port }}/ \
   --web.listen-address={{ grafana_server_addr }}:{{ prometheus_port }}
 {% if container_binary == 'podman' %}
 ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index 29d0461e124f9e7470e2bfd6c0c8b1a55f07d57a..28882f3db1523659cf267ebf4f21a9ef37c791b4 100644 (file)
@@ -22,14 +22,14 @@ scrape_configs:
 {% for host in (groups['all'] | difference(groups[grafana_server_group_name]|union(groups.get(client_group_name, [])))) %}
       - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
   - job_name: 'grafana'
     static_configs:
 {% for host in groups[grafana_server_group_name] %}
       - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
 {% if iscsi_gw_group_name in groups %}
   - job_name: 'iscsi-gws'
@@ -37,7 +37,7 @@ scrape_configs:
 {% for host in groups[iscsi_gw_group_name] %}
       - targets: ['{{ host }}:9287']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
 {% endif %}
 alerting:
index 75af947ff920ee5205c25a7a1db6b8d17bc7dc1e..a99f2ba8a8eaae94190d8d850e2379d84f13c1e8 100644 (file)
@@ -42,7 +42,7 @@ ceph_rbd_mirror_remote_user: ""
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_mirror_docker_cpu_limit: 1
 
 ceph_rbd_mirror_docker_extra_env:
index e350808aef6d2dc695e1f053a5a9883d53bd49bb..5fee414d548a7960a0678299624569e0d3ebbcab 100644 (file)
     ceph --cluster {{ cluster }}
     --name client.bootstrap-rbd-mirror
     --keyring /var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring
-    auth get-or-create client.rbd-mirror.{{ ansible_hostname }}
+    auth get-or-create client.rbd-mirror.{{ ansible_facts['hostname'] }}
     mon 'profile rbd-mirror'
     osd 'profile rbd'
-    -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
+    -o /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring
   args:
-    creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
+    creates: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring
   when: not containerized_deployment | bool
 
 - name: set rbd-mirror key permissions
   file:
-    path: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring
+    path: /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring
     owner: "ceph"
     group: "ceph"
     mode: "{{ ceph_keyring_permissions }}"
index 4da988c1bd02c323efc4b77e771d09c182615eb7..07a1384fd1bc930e4c49c2eb44a7129469cec58d 100644 (file)
@@ -1,14 +1,14 @@
 ---
 - name: enable mirroring on the pool
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
   changed_when: false
 
 - name: list mirroring peer
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool info {{ ceph_rbd_mirror_pool }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool info {{ ceph_rbd_mirror_pool }}"
   changed_when: false
   register: mirror_peer
 
 - name: add a mirroring peer
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
   changed_when: false
   when: ceph_rbd_mirror_remote_user in mirror_peer.stdout
index 1544c17b70eb5a3a624b8e794b9b01b397cf8330..98a6404ae0fd81265e3dddabb9de4074ce4be9b3 100644 (file)
@@ -16,7 +16,7 @@
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
 
     - name: include start_container_rbd_mirror.yml
       include_tasks: start_container_rbd_mirror.yml
index cebeccff422df08ff5556c5709f579d096436731..c1ac8cd37c34fb4ba580d13fcc57026039faaf98 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: systemd start rbd mirror container
   systemd:
-    name: ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}
+    name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index 70140c5d138634f44cf19a7d5f9d6e155dd250bd..cbc32748b6e3abe7c8e6fef99fc40ba58d28345b 100644 (file)
@@ -5,7 +5,7 @@
     path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/"
   when:
     - ceph_rbd_mirror_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-rbd-mirror systemd service overrides
   config_template:
@@ -15,7 +15,7 @@
     config_type: "ini"
   when:
     - ceph_rbd_mirror_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: stop and remove the generic rbd-mirror service instance
   service:
@@ -34,7 +34,7 @@
 
 - name: start and add the rbd-mirror service instance
   service:
-    name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+    name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
     state: started
     enabled: yes
     masked: no
index 830d29ae245bfdc73487cbfd98a8f628ab7e50ef..8dd83bd8eafb55a988592cb80fbef14e4fbd31fd 100644 (file)
@@ -11,11 +11,11 @@ After=network.target
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -30,13 +30,13 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CLUSTER={{ cluster }} \
   -e CEPH_DAEMON=RBD_MIRROR \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-  --name=ceph-rbd-mirror-{{ ansible_hostname }} \
+  --name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }} \
   {{ ceph_rbd_mirror_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index 65bd791869e142a793cb953598e760975534807e..8f448160d550a4378aed41680b36e445e4a22852 100644 (file)
@@ -46,6 +46,6 @@ backend rgw-backend
     option httpchk HEAD /
 {% for host in groups[rgw_group_name] %}
 {% for instance in hostvars[host]['rgw_instances'] %}
-       server {{ 'server-' + hostvars[host]['ansible_hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
+       server {{ 'server-' + hostvars[host]['ansible_facts']['hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
 {% endfor %}
 {% endfor %}
index 0c5bd6135114b3c4f0dd8cd18840a76b308bb7e6..8793680f8529513c470ca63291efc6a0174f9745 100644 (file)
@@ -15,8 +15,8 @@ vrrp_script check_haproxy {
 
 {% for instance in vrrp_instances %}
 vrrp_instance {{ instance['name'] }} {
-    state {{ 'MASTER' if ansible_hostname == instance['master'] else 'BACKUP' }}
-    priority {{ '100' if ansible_hostname == instance['master'] else '90' }}
+    state {{ 'MASTER' if ansible_facts['hostname'] == instance['master'] else 'BACKUP' }}
+    priority {{ '100' if ansible_facts['hostname'] == instance['master'] else '90' }}
     interface {{ virtual_ip_interface }}
     virtual_router_id {{ 50 + loop.index }}
     advert_int 1
index f4bab87c15ce7c0958b37d29a314066df3b49ddb..5d993b360e6d14c1effa178652c2d39ae93faa77 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: restart rgw
   service:
-    name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     state: restarted
   with_items: "{{ rgw_instances }}"
index 3af8405da5634a3cc320c56ce764aa3d8e3291ea..acfe50bb4e097be3f233a9da71866c6226be088c 100644 (file)
@@ -5,7 +5,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
+  when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf'
 
 - name: install libnss3-tools on debian
   package:
@@ -13,7 +13,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_pkg_mgr == 'apt'
+  when: ansible_facts['pkg_mgr'] == 'apt'
 
 - name: create nss directory for keystone certificates
   file:
index 68a2f429d5b0dcf414f89650e64537aedf6ebe28..cd489e0e4989dcfec39e6f91f7a12c3d34574be4 100644 (file)
@@ -2,11 +2,11 @@
 - name: create rgw keyrings
   ceph_key:
     state: present
-    name: "client.rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     cluster: "{{ cluster }}"
     user: "client.bootstrap-rgw"
     user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
-    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring"
+    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
     caps:
       osd: 'allow rwx'
       mon: 'allow rw'
index d0d96314ee6b3e7567ba9382015752d7efe6b723..f4e3296ad16bdf2ea57e794f552149971ed8fbef 100644 (file)
@@ -4,7 +4,7 @@
 
 - name: systemd start rgw container
   systemd:
-    name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+    name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
     state: started
     enabled: yes
     masked: no
index 1431d988b4a133f164b117e7d1ed1ff90d790894..564540a86dd61c41af383ba9d6ee5338244f5633 100644 (file)
@@ -15,7 +15,7 @@
 
 - name: start rgw instance
   service:
-    name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+    name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
     state: started
     enabled: yes
     masked: no
index 54bd5b0c10d1071df1238f9e535477509f51945f..d0f8d5358ed08f907840af610ce9c633138b223b 100644 (file)
@@ -6,17 +6,17 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_rgw_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_rgw_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -34,7 +34,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /etc/localtime:/etc/localtime:ro \
   -v /var/log/ceph:/var/log/ceph:z \
-  {% if ansible_distribution == 'RedHat' -%}
+  {% if ansible_facts['distribution'] == 'RedHat' -%}
   -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
   {% endif -%}
   {% if radosgw_frontend_ssl_certificate -%}
@@ -42,15 +42,15 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   {% endif -%}
   -e CEPH_DAEMON=RGW \
   -e CLUSTER={{ cluster }} \
-  -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \
+  -e RGW_NAME={{ ansible_facts['hostname'] }}.${INST_NAME} \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-  --name=ceph-rgw-{{ ansible_hostname }}-${INST_NAME} \
+  --name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \
   {{ ceph_rgw_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% endif %}
 KillMode=none
 Restart=always
index 5f805473cf824a7085d70141b848ff4229556a19..17251127eed777199a8507a6e8d5ecf21cebe9ef 100644 (file)
@@ -2,23 +2,23 @@
 - name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
-  when: monitor_interface not in ansible_interfaces
+  when: monitor_interface not in ansible_facts['interfaces']
 
 - name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
-  when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
+  when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active']
 
 - name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv4"
-    - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv4'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined
 
 - name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv6"
-    - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv6'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv6'] is not defined
index 73db6078e2a3ee9c54cd3a07853bd2dc3ea4fa26..c2438cf3dd3f40e24e9c2b2bafe2525420574196 100644 (file)
@@ -2,23 +2,23 @@
 - name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
-  when: radosgw_interface not in ansible_interfaces
+  when: radosgw_interface not in ansible_facts['interfaces']
 
 - name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
-  when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
+  when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false"
 
 - name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv4"
-    - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
 
 - name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv6"
-    - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
index 3bcd462691ec37977deba4d41410dc6d952687ba..734cd69700b572ee64c0214f0fd6659fb9ee6fa9 100644 (file)
@@ -2,4 +2,4 @@
 - name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
   fail:
     msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
-  when: hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
+  when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
index 0b06afe430271e73575dcae3763f385c15ec5a70..85b27456a73222857165e1c028acc763b0a440e3 100644 (file)
     - name: fail on unsupported distribution for iscsi gateways
       fail:
         msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
-      when: ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
+      when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora']
 
     - name: fail on unsupported distribution version for iscsi gateways
-      command: 'grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_kernel }}'
+      command: 'grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts["kernel"] }}'
       register: iscsi_kernel
       changed_when: false
       failed_when: iscsi_kernel.rc != 0
@@ -41,5 +41,5 @@
         - CONFIG_TARGET_CORE
         - CONFIG_TCM_USER2
         - CONFIG_ISCSI_TARGET
-      when: ansible_distribution in ['RedHat', 'CentOS']
+      when: ansible_facts['distribution'] in ['RedHat', 'CentOS']
   when: iscsi_gw_group_name in group_names
index 99895dcbb21f4b4abb1441f473807e8618ac8fc8..6c7b82ecae4ef8a0e0d13905ed6c95519960d108 100644 (file)
@@ -12,4 +12,4 @@
     msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
   when:
     - ceph_origin == 'distro'
-    - ansible_distribution == 'openSUSE Leap'
+    - ansible_facts['distribution'] == 'openSUSE Leap'
index 5592b7b006d23f54e77e48a7f22f6612d20d1565..330199f9052496ca1d93f83958dc42260609abeb 100644 (file)
 
 - name: fail on unsupported system
   fail:
-    msg: "System not supported {{ ansible_system }}"
-  when: ansible_system not in ['Linux']
+    msg: "System not supported {{ ansible_facts['system'] }}"
+  when: ansible_facts['system'] not in ['Linux']
 
 - name: fail on unsupported architecture
   fail:
-    msg: "Architecture not supported {{ ansible_architecture }}"
-  when: ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
+    msg: "Architecture not supported {{ ansible_facts['architecture'] }}"
+  when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
 
 - name: fail on unsupported distribution
   fail:
-    msg: "Distribution not supported {{ ansible_os_family }}"
-  when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
+    msg: "Distribution not supported {{ ansible_facts['os_family'] }}"
+  when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
 
 - name: red hat based systems tasks
   when:
     - ceph_repository == 'rhcs'
-    - ansible_distribution == 'RedHat'
+    - ansible_facts['distribution'] == 'RedHat'
   block:
     - name: fail on unsupported distribution for red hat ceph storage
       fail:
-        msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL 8 (>= 8.1) or RHEL 7 (>= 7.7)"
-      when: (ansible_distribution_major_version | int == 8 and ansible_distribution_version is version('8.1', '<')) or
-            (ansible_distribution_major_version | int == 7 and ansible_distribution_version is version('7.7', '<'))
+        msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL 8 (>= 8.1) or RHEL 7 (>= 7.7)"
+      when: (ansible_facts['distribution_major_version'] | int == 8 and ansible_facts['distribution_version'] is version('8.1', '<')) or
+            (ansible_facts['distribution_major_version'] | int == 7 and ansible_facts['distribution_version'] is version('7.7', '<'))
 
     - name: subscription manager related tasks
       when: ceph_repository_type == 'cdn'
 
 - name: fail on unsupported distribution for ubuntu cloud archive
   fail:
-    msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_distribution }}"
+    msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}"
   when:
     - ceph_repository == 'uca'
-    - ansible_distribution != 'Ubuntu'
+    - ansible_facts['distribution'] != 'Ubuntu'
 
 - name: "fail on unsupported openSUSE distribution (only 15.x supported)"
   fail:
-    msg: "Distribution not supported: {{ ansible_distribution }}"
+    msg: "Distribution not supported: {{ ansible_facts['distribution'] }}"
   when:
-    - ansible_distribution == 'openSUSE Leap'
-    - ansible_distribution_major_version != '15'
+    - ansible_facts['distribution'] == 'openSUSE Leap'
+    - ansible_facts['distribution_major_version'] != '15'
 
 - name: fail if systemd is not present
   fail:
     msg: "Systemd must be present"
-  when: ansible_service_mgr != 'systemd'
+  when: ansible_facts['service_mgr'] != 'systemd'
index a2c77049bf166a0d8a4d4f163ad672fde0b20ae9..07c2e95e5e09f4736008e611389fa4e68eeffbf5 100644 (file)
@@ -97,7 +97,7 @@
     - not use_fqdn_yes_i_am_sure | bool
 
 - name: debian based systems tasks
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
   block:
     - name: fail if local scenario is enabled on debian
       fail:
     msg: installation can't happen on Atomic and ntpd needs to be installed
   when:
     - is_atomic | default(False) | bool
-    - ansible_os_family == 'RedHat'
+    - ansible_facts['os_family'] == 'RedHat'
     - ntp_daemon_type == 'ntpd'
 
 - name: make sure journal_size configured
index af9c317f21892ff7499f089c2781ef910b2e949c..a3e97d404f3f79c2a45d95b809d60d96a16c6933 100644 (file)
 
     - name: set_fact container_binary
       set_fact:
-        container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
+        container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
 
     - name: get ceph status from the first monitor
       command: >
-        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
+        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
index 021f20c4f95b123df34ce5c7c13b7dbfa1189e96..4c577d41707bae3ab0f2bdfaec4903258e162b82 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.55.0/24"
 cluster_network: "192.168.56.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 663a4b1d07bdebef39a493265376258c88fc8049..391ce1ac51f731c1579366d89ed4cb5143d3527e 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.53.0/24"
 cluster_network: "192.168.54.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 460c5c71e73d29f408a79b810873fc82d524571d..8675fabc69351a580fa91f143846a979cf7a8f7d 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.67.0/24"
 cluster_network: "192.168.68.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 osd_scenario: lvm
index 7bf0324e86dac446c507bd38c3d6393a2b5d0148..2d192f9cdfcc2beb32ed045afb2363b8cdc29aa6 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.65.0/24"
 cluster_network: "192.168.66.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 osd_scenario: lvm
index 33f386334f95f295df2835b527730d00b9974041..1f2c0a63687234a873a45100506f8dce4df76aa7 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.71.0/24"
 cluster_network: "192.168.72.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 osd_scenario: lvm
index 819b3bbd04e8f55eac29bc4b415e6ec898c8336e..b3158ff69e059550f04c8f919a29271c232e6c50 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.69.0/24"
 cluster_network: "192.168.70.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 osd_scenario: lvm
index 455cc7d6649c4ac94e4437ca8a88d6e3a44c3865..1e50b133dbcf13a4b3bb3dc73b38b4f63ab92378 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
index 72d14dd5bc3e4753b324c5739b899789a290ccb7..a6d220b487d1722458873b6fadc56146c53dec24 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 openstack_config: True
index 95814b0b05a56cd5e744dab6ec7350ac9d9478b1..9b9310614ce60900cd9360869170d46794fb8caa 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.17.0/24"
index fe64c8e8b35d82c27944298358a3a7fe236e5845..996d7807688ea57b0d30f4820288ffac8d2f1cbd 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.17.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.17.12
 
 [mgrs]
index 5e22f90e3b1bf37cb9f9e0ead82897a1cd962975..daf24cfe5e0238fa5988aff70e21580a175ea9a4 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.17.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.17.12
 
 [mgrs]
index 95e020424e0b24beda6b2cb5cf054e165c043acd..9f967a21d2d0ed58e367ae23a38afe46100a9265 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.17.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.17.12
 
 [mgrs]
index 574adfaf1ad55c5a01c6434e608357a4eec41ea7..9a80e566f8dbc40e1254cbd1508b1c0a4fd9df29 100644 (file)
@@ -3,7 +3,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.1.0/24"
 cluster_network: "192.168.2.0/24"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_conf_overrides:
   global:
     osd_pool_default_size: 1
index 1c3bc54a0f889ef15977d5b9d8cbc0e5f8127166..85c64c9a34a2e066b1a6d195c84f5d292bef272e 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [mgrs]
index 8c3aed16a1cf8f48d4f4871ec75cbf0aa73c220a..2f31ca9cec8cd3e1cbaf2c2aa25509d57f8c87a4 100644 (file)
@@ -3,7 +3,7 @@ docker=True
 
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [mgrs]
index e0e64ba694c4aec5cfe6a66550bcf502477b1b70..7d2bd6dd488016e3356c95633c2dfcd17a1c9d16 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [mgrs]
index 6bc61511f22497b7aeca450b0b6d6602443e3944..d8cd6aa73965152a9c429e55df395a96f789ccd0 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
index 6a34387f259d193bba47635cb5ef04afbe52ff0e..9fdbd55b43ed06bf45f0b456d0188dc732aedf15 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.15.0/24"
index 4a3134f0716cab53c519c55e7659d000e9151cf0..23205e24cd7dd2d63bc1f7b906c9a327430c8c89 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.58.0/24"
index 2bd0e1a35b2235a9f38c20172c0f9501a178f905..b61c80808bcd717d509e1caa2eec2e265288d4b5 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
index 276b8dfaff9e6783ee6f61bdcd650a862ea36dc3..6a58eddcf6f5bb8935231ebcc3483eb7868a3052 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 openstack_config: True
index 8f3fa777f99de1c78428e7f9fead3676de855fcb..fc9067dc5c3e3919f83b03d7eb3e54b2564e2f59 100644 (file)
@@ -8,7 +8,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.43.0/24"
 cluster_network: "192.168.44.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 containerized_deployment: true
index 23799f822ae6758d0fff280b6c69cb26d3ee16df..d19c75aca357b54c0a0fd5584956482cca8e8d54 100644 (file)
@@ -4,7 +4,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.41.0/24"
 cluster_network: "192.168.42.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
index ac6994009f8866d78996a6119735342abeb18509..531fd6446789fd70ec44974ca35b6703c0b2d97b 100644 (file)
@@ -10,8 +10,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 crush_device_class: test
index aa15d6851f010e4a196961cc7bca6b04d3acaadb..0284affcaae2a12a019372dc6ca3e31be8c7b609 100644 (file)
@@ -5,8 +5,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
 copy_admin_key: true
index 77fa7a97d9377026832fb21d0bd6b6bad4c4a366..513bea3c30af60599dd0b74253498b4e0fc2a017 100644 (file)
@@ -10,8 +10,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 crush_device_class: test
index ad72b72c5adc0a5b40571b460cba5d45adf97e1f..aed954110c3ef32b907b3edea73c6bc3f58da563 100644 (file)
@@ -5,8 +5,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
 copy_admin_key: true
index 1d483e1e88442983a2c42d95e32f43b99aa6e92d..bc5941341041952b1c5bb36487ec5fdd1a8110ee 100644 (file)
@@ -8,7 +8,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 containerized_deployment: true
index 8cae342f0b32d45af19cc41a3510f2f66938befc..2cd64374f3c5669d9982adcc0c44cd8101d7bfca 100644 (file)
@@ -4,7 +4,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
index 00c75a47bab0d4f6eff37e9b0401be9f97343005..aaf6f26369bb8d11a100b4d78820c74afd21b208 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.30.0/24"
index f602319349f9d6b1d7bc9e9073dd92bde22bd82b..1e47a8d4893280fa4962acc6c72b5d632b0d067e 100644 (file)
@@ -6,8 +6,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.105.0/24"
 cluster_network: "192.168.106.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 8b118a17840ee241a00507789828e1ba75476e5a..f37fd1c38c6ed78c8a4be80cba656340e2936a01 100644 (file)
@@ -6,8 +6,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.107.0/24"
 cluster_network: "192.168.108.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 9ad488a0b03ee9c5b0c7b42089858c410233d6f6..4fe73d43a7d25f71214cd0092b8bc86046fff499 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.101.0/24"
 cluster_network: "192.168.102.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 97f8440dc75c7fda2a44c7656b1234671957b394..c247169b22fe18de66cb7c6a35092981ee8f1b88 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.103.0/24"
 cluster_network: "192.168.104.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 14804669b440cb79e40573944b73966a3b69baae..7bc97ec14eeb3a482d3c54c6d44ce49dae296768 100644 (file)
@@ -81,7 +81,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_os_family == 'RedHat'
+      when: ansible_facts['os_family'] == 'RedHat'
 
     - name: allow insecure docker registries
       lineinfile:
index 96e62b233c97ea1946f01bb3443460f55abf2fc9..0413e7337949f65308c704ad9d4a225d415e9e1e 100644 (file)
@@ -50,8 +50,8 @@
             option: metalink
             state: absent
       when:
-        - ansible_distribution == 'CentOS'
-        - ansible_distribution_major_version | int == 7
+        - ansible_facts['distribution'] == 'CentOS'
+        - ansible_facts['distribution_major_version'] | int == 7
         - not is_atomic | bool
 
     - name: resize logical volume for root partition to fill remaining free space
index e621da4501e062b71046acf2db51292edf4c293b..7e85132c7707cb9fb444d8fa8fd1f46f0fde369c 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.79.0/24"
index c26ebb3d6bdfc5fe1ace25dc339a57c92e1c6b00..44753fcd64b3af82cca385b852a11ab60da1aa2e 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.83.0/24"
index 9801d081aebe49a42f125961860a2cab64236bf0..f549cebe0e457d46edf5b529cdd9a8e0317cb8ec 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.17.0/24"
index 541558a7a7364db09bc0bf5d615f586cdf1c2594..c0230214637576608b9e426cd9ad548c630b5dcc 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [osds]
index c95d9d2f19977a56da8393ff86cd04077d634b08..b995e9b99339a09c4f2ab73005c894b42dcb8343 100644 (file)
@@ -3,7 +3,7 @@ docker=True
 
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [osds]
index f2cc0b1252c3c8e8a996332f00ecd11588bccbac..e0bf3eb2e8944c703ad7802ce56c70bea299652e 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [osds]
index c257f61038936d3f2db2c37d4c03dd0fb13f651a..e99fedfe7f656d9031295c61f468dd7aabcb0341 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 public_network: "192.168.73.0/24"
index abdd93bd468ba52f5068b5e59facf40449809efc..a95a0cf6c103402ebae55483099794799f31a54e 100644 (file)
@@ -5,7 +5,7 @@ docker: True
 public_network: "192.168.87.0/24"
 cluster_network: "192.168.88.0/24"
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 ceph_conf_overrides:
index e49c96ba8226a2ddfee96077ff97f7dfe5b0d725..291e80ec5ec1c3f973373eeb4044896684c7c2eb 100644 (file)
@@ -6,8 +6,8 @@ docker: True
 containerized_deployment: True
 public_network: "192.168.91.0/24"
 cluster_network: "192.168.92.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_docker_on_openstack: False
 ceph_conf_overrides:
index 406a0cf1c0d34c5baf311c09a551b50995d30ce3..3a78f052f0e41358bfd52461de955a303f640668 100644 (file)
@@ -3,8 +3,8 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.89.0/24"
 cluster_network: "192.168.90.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 osd_scenario: lvm
 copy_admin_key: true