]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Use ansible_facts
authorAlex Schultz <aschultz@redhat.com>
Wed, 3 Mar 2021 14:43:50 +0000 (07:43 -0700)
committerGuillaume Abrioux <gabrioux@redhat.com>
Mon, 8 Mar 2021 19:54:02 +0000 (20:54 +0100)
It has come to our attention that using ansible_* vars that are
populated with INJECT_FACTS_AS_VARS=True is not very performant.  In
order to be able to support setting that to off, we need to update the
references to use ansible_facts[<thing>] instead of ansible_<thing>.

Related: ansible#73654
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1935406
Signed-off-by: Alex Schultz <aschultz@redhat.com>
173 files changed:
group_vars/all.yml.sample
group_vars/iscsigws.yml.sample
group_vars/mdss.yml.sample
group_vars/mgrs.yml.sample
group_vars/mons.yml.sample
group_vars/nfss.yml.sample
group_vars/osds.yml.sample
group_vars/rbdmirrors.yml.sample
group_vars/rhcs.yml.sample
infrastructure-playbooks/cephadm-adopt.yml
infrastructure-playbooks/cephadm.yml
infrastructure-playbooks/purge-cluster.yml
infrastructure-playbooks/purge-container-cluster.yml
infrastructure-playbooks/purge-iscsi-gateways.yml
infrastructure-playbooks/rolling_update.yml
infrastructure-playbooks/shrink-mds.yml
infrastructure-playbooks/shrink-mgr.yml
infrastructure-playbooks/shrink-mon.yml
infrastructure-playbooks/shrink-osd.yml
infrastructure-playbooks/shrink-rbdmirror.yml
infrastructure-playbooks/shrink-rgw.yml
infrastructure-playbooks/switch-from-non-containerized-to-containerized-ceph-daemons.yml
infrastructure-playbooks/untested-by-ci/cluster-os-migration.yml
infrastructure-playbooks/untested-by-ci/make-osd-partitions.yml
infrastructure-playbooks/untested-by-ci/replace-osd.yml
profiles/rgw-keystone-v2
profiles/rgw-keystone-v3
profiles/rgw-radosgw-static-website
profiles/rgw-usage-log
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-common/tasks/configure_cluster_name.yml
roles/ceph-common/tasks/configure_memory_allocator.yml
roles/ceph-common/tasks/installs/configure_redhat_repository_installation.yml
roles/ceph-common/tasks/installs/debian_community_repository.yml
roles/ceph-common/tasks/installs/debian_custom_repository.yml
roles/ceph-common/tasks/installs/debian_dev_repository.yml
roles/ceph-common/tasks/installs/install_debian_packages.yml
roles/ceph-common/tasks/installs/install_redhat_packages.yml
roles/ceph-common/tasks/installs/prerequisite_rhcs_cdn_install.yml
roles/ceph-common/tasks/installs/redhat_community_repository.yml
roles/ceph-common/tasks/installs/redhat_dev_repository.yml
roles/ceph-common/tasks/main.yml
roles/ceph-config/tasks/rgw_systemd_environment_file.yml
roles/ceph-config/templates/ceph.conf.j2
roles/ceph-container-engine/tasks/pre_requisites/debian_prerequisites.yml
roles/ceph-container-engine/tasks/pre_requisites/prerequisites.yml
roles/ceph-crash/tasks/main.yml
roles/ceph-dashboard/tasks/configure_dashboard.yml
roles/ceph-dashboard/tasks/configure_dashboard_backends.yml
roles/ceph-dashboard/tasks/main.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-facts/tasks/container_binary.yml
roles/ceph-facts/tasks/facts.yml
roles/ceph-facts/tasks/grafana.yml
roles/ceph-facts/tasks/set_monitor_address.yml
roles/ceph-facts/tasks/set_radosgw_address.yml
roles/ceph-grafana/tasks/configure_grafana.yml
roles/ceph-grafana/templates/grafana.ini.j2
roles/ceph-handler/tasks/check_running_containers.yml
roles/ceph-handler/tasks/handler_crash.yml
roles/ceph-handler/templates/restart_mds_daemon.sh.j2
roles/ceph-handler/templates/restart_mgr_daemon.sh.j2
roles/ceph-handler/templates/restart_mon_daemon.sh.j2
roles/ceph-handler/templates/restart_nfs_daemon.sh.j2
roles/ceph-handler/templates/restart_rbd_mirror_daemon.sh.j2
roles/ceph-handler/templates/restart_rgw_daemon.sh.j2
roles/ceph-infra/tasks/configure_firewall.yml
roles/ceph-infra/tasks/main.yml
roles/ceph-infra/tasks/setup_ntp.yml
roles/ceph-iscsi-gw/defaults/main.yml
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-iscsi-gw/tasks/deploy_ssl_keys.yml
roles/ceph-iscsi-gw/tasks/non-container/prerequisites.yml
roles/ceph-mds/defaults/main.yml
roles/ceph-mds/tasks/common.yml
roles/ceph-mds/tasks/containerized.yml
roles/ceph-mds/tasks/non_containerized.yml
roles/ceph-mds/templates/ceph-mds.service.j2
roles/ceph-mgr/defaults/main.yml
roles/ceph-mgr/tasks/common.yml
roles/ceph-mgr/tasks/main.yml
roles/ceph-mgr/tasks/pre_requisite.yml
roles/ceph-mgr/tasks/start_mgr.yml
roles/ceph-mgr/templates/ceph-mgr.service.j2
roles/ceph-mon/defaults/main.yml
roles/ceph-mon/tasks/ceph_keys.yml
roles/ceph-mon/tasks/deploy_monitors.yml
roles/ceph-mon/tasks/main.yml
roles/ceph-mon/tasks/start_monitor.yml
roles/ceph-mon/templates/ceph-mon.service.j2
roles/ceph-nfs/defaults/main.yml
roles/ceph-nfs/tasks/create_rgw_nfs_user.yml
roles/ceph-nfs/tasks/ganesha_selinux_fix.yml
roles/ceph-nfs/tasks/main.yml
roles/ceph-nfs/tasks/pre_requisite_container.yml
roles/ceph-nfs/tasks/pre_requisite_non_container.yml
roles/ceph-nfs/tasks/pre_requisite_non_container_debian.yml
roles/ceph-nfs/tasks/pre_requisite_non_container_red_hat.yml
roles/ceph-nfs/tasks/start_nfs.yml
roles/ceph-nfs/templates/ceph-nfs.service.j2
roles/ceph-osd/defaults/main.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/start_osds.yml
roles/ceph-osd/tasks/system_tuning.yml
roles/ceph-osd/templates/ceph-osd.service.j2
roles/ceph-prometheus/templates/alertmanager.service.j2
roles/ceph-prometheus/templates/alertmanager.yml.j2
roles/ceph-prometheus/templates/prometheus.service.j2
roles/ceph-prometheus/templates/prometheus.yml.j2
roles/ceph-rbd-mirror/defaults/main.yml
roles/ceph-rbd-mirror/tasks/common.yml
roles/ceph-rbd-mirror/tasks/configure_mirroring.yml
roles/ceph-rbd-mirror/tasks/main.yml
roles/ceph-rbd-mirror/tasks/start_container_rbd_mirror.yml
roles/ceph-rbd-mirror/tasks/start_rbd_mirror.yml
roles/ceph-rbd-mirror/templates/ceph-rbd-mirror.service.j2
roles/ceph-rgw-loadbalancer/templates/haproxy.cfg.j2
roles/ceph-rgw/handlers/main.yml
roles/ceph-rgw/tasks/openstack-keystone.yml
roles/ceph-rgw/tasks/pre_requisite.yml
roles/ceph-rgw/tasks/start_docker_rgw.yml
roles/ceph-rgw/tasks/start_radosgw.yml
roles/ceph-rgw/templates/ceph-radosgw.service.j2
roles/ceph-validate/tasks/check_eth_mon.yml
roles/ceph-validate/tasks/check_eth_rgw.yml
roles/ceph-validate/tasks/check_ipaddr_mon.yml
roles/ceph-validate/tasks/check_iscsi.yml
roles/ceph-validate/tasks/check_nfs.yml
roles/ceph-validate/tasks/check_system.yml
roles/ceph-validate/tasks/main.yml
site-container.yml.sample
tests/functional/add-osds/container/group_vars/all
tests/functional/add-osds/group_vars/all
tests/functional/add-rbdmirrors/container/group_vars/all
tests/functional/add-rbdmirrors/group_vars/all
tests/functional/add-rgws/container/group_vars/all
tests/functional/add-rgws/group_vars/all
tests/functional/all-in-one/container/group_vars/all
tests/functional/all-in-one/group_vars/all
tests/functional/all_daemons/container/group_vars/all
tests/functional/all_daemons/container/hosts
tests/functional/all_daemons/group_vars/all
tests/functional/all_daemons/hosts
tests/functional/all_daemons/hosts-switch-to-containers
tests/functional/collocation/container/group_vars/all
tests/functional/collocation/group_vars/all
tests/functional/docker2podman/group_vars/all
tests/functional/external_clients/container/inventory/group_vars/all
tests/functional/external_clients/inventory/group_vars/all
tests/functional/filestore-to-bluestore/container/group_vars/all
tests/functional/filestore-to-bluestore/group_vars/all
tests/functional/lvm-auto-discovery/container/group_vars/all
tests/functional/lvm-auto-discovery/group_vars/all
tests/functional/lvm-batch/container/group_vars/all
tests/functional/lvm-batch/group_vars/all
tests/functional/lvm-osds/container/group_vars/all
tests/functional/lvm-osds/group_vars/all
tests/functional/podman/group_vars/all
tests/functional/rgw-multisite/container/group_vars/all
tests/functional/rgw-multisite/container/secondary/group_vars/all
tests/functional/rgw-multisite/group_vars/all
tests/functional/rgw-multisite/secondary/group_vars/all
tests/functional/rhcs_setup.yml
tests/functional/setup.yml
tests/functional/shrink_mds/container/group_vars/all
tests/functional/shrink_mgr/container/group_vars/all
tests/functional/shrink_mon/container/group_vars/all
tests/functional/shrink_mon/hosts
tests/functional/shrink_mon/hosts-switch-to-containers
tests/functional/shrink_osd/container/group_vars/all
tests/functional/shrink_rbdmirror/container/group_vars/all
tests/functional/shrink_rgw/container/group_vars/all
tests/functional/shrink_rgw/group_vars/all

index 2f9cd5dff6f4fde72717481e9b81a6577b85d071..67d2f1642268bbee4a772ed63d27a45034edda25 100644 (file)
@@ -83,7 +83,7 @@ dummy:
 
 #centos_package_dependencies:
 #  - epel-release
-#  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+#  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 #redhat_package_dependencies: []
 
@@ -153,7 +153,7 @@ dummy:
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@@ -181,7 +181,7 @@ dummy:
 #
 #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 #ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -191,7 +191,7 @@ dummy:
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -254,7 +254,7 @@ dummy:
 
 #ceph_conf_key_directory: /etc/ceph
 
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 #ceph_keyring_permissions: '0600'
@@ -529,7 +529,7 @@ dummy:
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 #ceph_conf_overrides: {}
index bd0f7b8d5691ac1e32b967cd9a969a78642332bd..57aecc421193293078ebac7a14815b790fe548d3 100644 (file)
@@ -43,14 +43,14 @@ dummy:
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
 
 # TCMU_RUNNER resource limitation
-#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_tcmu_runner_docker_cpu_limit: 1
 
 # RBD_TARGET_GW resource limitation
-#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_target_gw_docker_cpu_limit: 1
 
 # RBD_TARGET_API resource limitation
-#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_target_api_docker_cpu_limit: 1
 
index 2412ea730941251631d3e4cc4336465f3ad9bb31..14b1bfb8acd5e0a78a075ff98419c17bf149f1a0 100644 (file)
@@ -27,13 +27,13 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mds_docker_cpu_limit: 4
 
 # we currently for MDS_NAME to hostname because of a bug in ceph-docker
 # fix here: https://github.com/ceph/ceph-docker/pull/770
 # this will go away soon.
-#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
 #ceph_config_keys: [] # DON'T TOUCH ME
 
 
index 298525739875b1628d5c409e0d8c646754b3b22b..6fb9bc155fe9317ab7f814f3ac35a466b80d6dc8 100644 (file)
@@ -41,7 +41,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mgr_docker_cpu_limit: 1
 
 #ceph_mgr_docker_extra_env:
index 002d62b220b55ab692b64eb08b0d6b0ec0128360..ad59172b79a49fb70f044530ccb40b897e23676c 100644 (file)
@@ -45,7 +45,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_mon_docker_cpu_limit: 1
 #ceph_mon_container_listen_port: 3300
 
index 7c35af31afd40d94ed194759b3b1a8932b367b1b..2e87b1fc821296c84a8f77a87c026f4878deb3d7 100644 (file)
@@ -25,7 +25,7 @@ dummy:
 #ceph_nfs_enable_service: true
 
 # ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
 # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
 # such case it's better to have constant instance id instead which
 # can be set by 'ceph_nfs_service_suffix'
@@ -82,7 +82,7 @@ dummy:
 # they must be configered.
 #ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
 #ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-#rgw_client_name: client.rgw.{{ ansible_hostname }}
+#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
 
 ###################
 # CONFIG OVERRIDE #
index e8620a8607c43139f752271389d5b2e3a937ff6c..91abec986ca3bd85e43ba568ee5f010f4aba973f 100644 (file)
@@ -169,7 +169,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_osd_docker_cpu_limit: 4
 
 # The next two variables are undefined, and thus, unused by default.
index 2c565fe01f3f988af0819fd4b3e7becba03f46d5..00984136168c6851a90dab3cca757ee3e6634160 100644 (file)
@@ -50,7 +50,7 @@ dummy:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 #ceph_rbd_mirror_docker_cpu_limit: 1
 
 #ceph_rbd_mirror_docker_extra_env:
index 99f38a1f40145593b7db53c70fdc159062d59520..0a9d1b9d48cbdc6a38b9c204d8a44f57da022c3d 100644 (file)
@@ -83,7 +83,7 @@ dummy:
 
 #centos_package_dependencies:
 #  - epel-release
-#  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+#  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 #redhat_package_dependencies: []
 
@@ -153,7 +153,7 @@ ceph_repository: rhcs
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@@ -181,7 +181,7 @@ ceph_rhcs_version: 5
 #
 #ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 #ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -191,7 +191,7 @@ ceph_rhcs_version: 5
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -254,7 +254,7 @@ ceph_iscsi_config_dev: false
 
 #ceph_conf_key_directory: /etc/ceph
 
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 #ceph_keyring_permissions: '0600'
@@ -529,7 +529,7 @@ ceph_iscsi_config_dev: false
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 #ceph_conf_overrides: {}
index 9127dd10e70be2e8208d24f0f283b4f01177429d..d49782d66e0827c4a86b426b74a1e9266f822d0b 100644 (file)
           delegate_to: '{{ groups[mon_group_name][0] }}'
 
     - name: manage nodes with cephadm
-      command: "{{ ceph_cmd }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
+      command: "{{ ceph_cmd }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
       changed_when: false
       delegate_to: '{{ groups[mon_group_name][0] }}'
 
     - name: add ceph label for core component
-      command: "{{ ceph_cmd }} orch host label add {{ ansible_hostname }} ceph"
+      command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['hostname'] }} ceph"
       changed_when: false
       delegate_to: '{{ groups[mon_group_name][0] }}'
       when: inventory_hostname in groups.get(mon_group_name, []) or
 
     - name: adopt mon daemon
       cephadm_adopt:
-        name: "mon.{{ ansible_hostname }}"
+        name: "mon.{{ ansible_facts['hostname'] }}"
         cluster: "{{ cluster }}"
         image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
         docker: "{{ true if container_binary == 'docker' else false }}"
         firewalld: "{{ true if configure_firewall | bool else false }}"
 
     - name: reset failed ceph-mon systemd unit
-      command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}'  # noqa 303
+      command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}"  # noqa 303
       changed_when: false
       failed_when: false
       when: containerized_deployment | bool
       changed_when: false
       register: ceph_health_raw
       until: >
-        ansible_hostname in (ceph_health_raw.stdout | from_json)["quorum_names"]
+        ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
       environment:
 
     - name: adopt mgr daemon
       cephadm_adopt:
-        name: "mgr.{{ ansible_hostname }}"
+        name: "mgr.{{ ansible_facts['hostname'] }}"
         cluster: "{{ cluster }}"
         image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
         docker: "{{ true if container_binary == 'docker' else false }}"
         firewalld: "{{ true if configure_firewall | bool else false }}"
 
     - name: reset failed ceph-mgr systemd unit
-      command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}'  # noqa 303
+      command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}"  # noqa 303
       changed_when: false
       failed_when: false
       when: containerized_deployment | bool
 
     - name: stop and disable ceph-mds systemd service
       service:
-        name: 'ceph-mds@{{ ansible_hostname }}'
+        name: "ceph-mds@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: false
       failed_when: false
       when: not containerized_deployment | bool
 
     - name: reset failed ceph-mds systemd unit
-      command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}'  # noqa 303
+      command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}"  # noqa 303
       changed_when: false
       failed_when: false
       when: containerized_deployment | bool
 
     - name: remove legacy ceph mds data
       file:
-        path: '/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}'
+        path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
         state: absent
 
 - name: rgw realm/zonegroup/zone requirements
 
     - name: stop and disable ceph-radosgw systemd service
       service:
-        name: 'ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: false
       failed_when: false
       when: not containerized_deployment | bool
 
     - name: reset failed ceph-radosgw systemd unit
-      command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'  # noqa 303
+      command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"  # noqa 303
       changed_when: false
       failed_when: false
       loop: '{{ rgw_instances }}'
 
     - name: remove legacy ceph radosgw data
       file:
-        path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
+        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: absent
       loop: '{{ rgw_instances }}'
 
     - name: remove legacy ceph radosgw directory
       file:
-        path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}'
+        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
         state: absent
 
 - name: redeploy rbd-mirror daemons
 
     - name: stop and disable rbd-mirror systemd service
       service:
-        name: 'ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
+        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: false
       failed_when: false
       when: not containerized_deployment | bool
 
     - name: reset failed rbd-mirror systemd unit
-      command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'  # noqa 303
+      command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"  # noqa 303
       changed_when: false
       failed_when: false
       when: containerized_deployment | bool
 
         - name: adopt alertmanager daemon
           cephadm_adopt:
-            name: "alertmanager.{{ ansible_hostname }}"
+            name: "alertmanager.{{ ansible_facts['hostname'] }}"
             cluster: "{{ cluster }}"
             image: "{{ alertmanager_container_image }}"
             docker: "{{ true if container_binary == 'docker' else false }}"
 
         - name: adopt prometheus daemon
           cephadm_adopt:
-            name: "prometheus.{{ ansible_hostname }}"
+            name: "prometheus.{{ ansible_facts['hostname'] }}"
             cluster: "{{ cluster }}"
             image: "{{ prometheus_container_image }}"
             docker: "{{ true if container_binary == 'docker' else false }}"
 
         - name: adopt grafana daemon
           cephadm_adopt:
-            name: "grafana.{{ ansible_hostname }}"
+            name: "grafana.{{ ansible_facts['hostname'] }}"
             cluster: "{{ cluster }}"
             image: "{{ grafana_container_image }}"
             docker: "{{ true if container_binary == 'docker' else false }}"
index 0f912a3f1a00be5ed3af9556e03b7d6feeedd3e1..b25d08abc786c9c0a7a56d16491db6bea8040d5c 100644 (file)
         CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
 
     - name: manage nodes with cephadm
-      command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
+      command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
       changed_when: false
       delegate_to: '{{ groups[mon_group_name][0] }}'
       environment:
         CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
 
     - name: add ceph label for core component
-      command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_hostname }} ceph"
+      command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
       changed_when: false
       delegate_to: '{{ groups[mon_group_name][0] }}'
       when: inventory_hostname in groups.get(mon_group_name, []) or
index 023d502ff205237e3c94a3a81ec4b0eec41bee62..7ead4d366219ac0f5eb12721a1808bd5b1f4dda2 100644 (file)
@@ -71,7 +71,7 @@
           run_once: true
 
         - name: get all nfs-ganesha mount points
-          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+          command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
           register: nfs_ganesha_mount_points
           failed_when: false
           with_items: "{{ groups[nfs_group_name] }}"
       name: nfs-ganesha
       state: stopped
     failed_when: false
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 - name: purge node-exporter
   hosts:
 
   - name: stop ceph mdss with systemd
     service:
-      name: ceph-mds@{{ ansible_hostname }}
+      name: ceph-mds@{{ ansible_facts['hostname'] }}
       state: stopped
       enabled: no
     failed_when: false
 
   - name: stop ceph mgrs with systemd
     service:
-      name: ceph-mgr@{{ ansible_hostname }}
+      name: ceph-mgr@{{ ansible_facts['hostname'] }}
       state: stopped
       enabled: no
     failed_when: false
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 - name: purge rgwloadbalancer cluster
 
 
     - name: stop ceph rgws with systemd
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       failed_when: false
 
   - name: stop ceph rbd mirror with systemd
     service:
-      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
       state: stopped
     failed_when: false
 
     become: false
     wait_for:
       port: 22
-      host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+      host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
       state: started
       delay: 10
       timeout: 500
       state: stopped
       enabled: no
     with_items: "{{ osd_ids.stdout_lines }}"
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
   - name: remove ceph udev rules
     file:
 
   - name: stop ceph mons with systemd
     service:
-      name: "ceph-{{ item }}@{{ ansible_hostname }}"
+      name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     failed_when: false
     yum:
       name: "{{ ceph_packages }}"
       state: absent
-    when: ansible_pkg_mgr == 'yum'
+    when: ansible_facts['pkg_mgr'] == 'yum'
 
   - name: purge ceph packages with dnf
     dnf:
       name: "{{ ceph_packages }}"
       state: absent
-    when: ansible_pkg_mgr == 'dnf'
+    when: ansible_facts['pkg_mgr'] == 'dnf'
 
   - name: purge ceph packages with apt
     apt:
       name: "{{ ceph_packages }}"
       state: absent
       purge: true
-    when: ansible_pkg_mgr == 'apt'
+    when: ansible_facts['pkg_mgr'] == 'apt'
 
   - name: purge remaining ceph packages with yum
     yum:
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'yum'
+      - ansible_facts['pkg_mgr'] == 'yum'
       - purge_all_packages | bool
 
   - name: purge remaining ceph packages with dnf
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'dnf'
+      - ansible_facts['pkg_mgr'] == 'dnf'
       - purge_all_packages | bool
 
   - name: purge remaining ceph packages with apt
       name: "{{ ceph_remaining_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'apt'
+      - ansible_facts['pkg_mgr'] == 'apt'
       - purge_all_packages | bool
 
   - name: purge extra packages with yum
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'yum'
+      - ansible_facts['pkg_mgr'] == 'yum'
       - purge_all_packages | bool
 
   - name: purge extra packages with dnf
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'dnf'
+      - ansible_facts['pkg_mgr'] == 'dnf'
       - purge_all_packages | bool
 
   - name: purge extra packages with apt
       name: "{{ extra_packages }}"
       state: absent
     when:
-      - ansible_pkg_mgr == 'apt'
+      - ansible_facts['pkg_mgr'] == 'apt'
       - purge_all_packages | bool
 
   - name: remove config and any ceph socket left
 
   - name: purge dnf cache
     command: dnf clean all
-    when: ansible_pkg_mgr == 'dnf'
+    when: ansible_facts['pkg_mgr'] == 'dnf'
 
   - name: purge rpm cache in /tmp
     file:
 
   - name: clean apt
     command: apt-get clean  # noqa 303
-    when: ansible_pkg_mgr == 'apt'
+    when: ansible_facts['pkg_mgr'] == 'apt'
 
   - name: purge ceph repo file in /etc/yum.repos.d
     file:
       - ceph-dev
       - ceph_stable
       - rh_storage
-    when: ansible_os_family == 'RedHat'
+    when: ansible_facts['os_family'] == 'RedHat'
 
   - name: check for anything running ceph
     command: "ps -u ceph -U ceph"
       path: "{{ item.path }}"
       state: absent
     with_items: "{{ systemd_files.files }}"
-    when: ansible_service_mgr == 'systemd'
+    when: ansible_facts['service_mgr'] == 'systemd'
 
 
 - name: purge fetch directory
index 2debc0d81def823568a49a4e9b85d4d7924b9347..892bbb12f5cd64af0a770937847ca05c47c1f7ca 100644 (file)
@@ -55,7 +55,7 @@
           run_once: true
 
         - name: get all nfs-ganesha mount points
-          command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
+          command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
           register: nfs_ganesha_mount_points
           failed_when: false
           with_items: "{{ groups[nfs_group_name] }}"
 
   - name: disable ceph nfs service
     service:
-      name: "ceph-nfs@{{ ansible_hostname }}"
+      name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
       path: /etc/systemd/system/ceph-nfs@.service
       state: absent
 
-  - name: remove ceph nfs directories for "{{ ansible_hostname }}"
+  - name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
     file:
       path: "{{ item }}"
       state: absent
 
   - name: disable ceph mds service
     service:
-      name: "ceph-mds@{{ ansible_hostname }}"
+      name: "ceph-mds@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
 
   - name: disable ceph mgr service
     service:
-      name: "ceph-mgr@{{ ansible_hostname }}"
+      name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
 
     - name: disable ceph rgw service
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       failed_when: false
 
   - name: disable ceph rbd-mirror service
     service:
-      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+      name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
       state: stopped
       enabled: no
     ignore_errors: true
       enabled: no
     ignore_errors: true
     with_items:
-      - "ceph-mgr@{{ ansible_hostname }}"
-      - "ceph-mon@{{ ansible_hostname }}"
+      - "ceph-mgr@{{ ansible_facts['hostname'] }}"
+      - "ceph-mon@{{ ansible_facts['hostname'] }}"
 
   - name: remove ceph mon and mgr service
     file:
   tasks:
     - name: stop ceph-crash container
       service:
-        name: "ceph-crash@{{ ansible_hostname }}"
+        name: "ceph-crash@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
       failed_when: false
         state: absent
         update_cache: yes
         autoremove: yes
-      when: ansible_os_family == 'Debian'
+      when: ansible_facts['os_family'] == 'Debian'
 
     - name: red hat based systems tasks
       block:
               args:
                 warn: no
           when:
-            ansible_pkg_mgr == "yum"
+            ansible_facts['pkg_mgr'] == "yum"
 
         - name: dnf related tasks on red hat
           block:
               args:
                 warn: no
           when:
-            ansible_pkg_mgr == "dnf"
+            ansible_facts['pkg_mgr'] == "dnf"
       when:
-        ansible_os_family == 'RedHat' and
+        ansible_facts['os_family'] == 'RedHat' and
         not is_atomic
 
     - name: find any service-cid file left
   become: true
 
   tasks:
-  - name: purge ceph directories for "{{ ansible_hostname }}" and ceph socket
+  - name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
     file:
       path: "{{ item }}"
       state: absent
index 7c2482bd25d71b924da73fd8c7a6ddb651a4edaf..ec2247d2f4679abb6e6478a92721a02cb2807c23 100644 (file)
@@ -83,7 +83,7 @@
 
         - name: set_fact container_exec_cmd
           set_fact:
-            container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+            container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
           when: containerized_deployment | bool
 
         - name: get iscsi gateway list
index 76b80f59071be5aba013bfb98cf7953297b1e27b..81434e9a8f3761a257fbe388b46195fc35c1224e 100644 (file)
         enabled: no
         masked: yes
       with_items:
-        - "{{ ansible_hostname }}"
-        - "{{ ansible_fqdn }}"
+        - "{{ ansible_facts['hostname'] }}"
+        - "{{ ansible_facts['fqdn'] }}"
 
     # only mask the service for mgr because it must be upgraded
     # after ALL monitors, even when collocated
     - name: mask the mgr service
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         masked: yes
       when: inventory_hostname in groups[mgr_group_name] | default([])
             or groups[mgr_group_name] | default([]) | length == 0
 
     - name: start ceph mgr
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         state: started
         enabled: yes
         masked: no
       register: ceph_health_raw
       until:
         - ceph_health_raw.rc == 0
-        - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') |  from_json)["quorum_names"] or
-          hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+        - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') |  from_json)["quorum_names"] or
+          hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
       when: not containerized_deployment | bool
 
     - name: container | waiting for the containerized monitor to join the quorum...
       command: >
-        {{ container_binary }} exec ceph-mon-{{ ansible_hostname }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
+        {{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
       register: ceph_health_raw
       until:
         - ceph_health_raw.rc == 0
-        - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
-          hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
+        - (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+          hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
       when: containerized_deployment | bool
       block:
         - name: stop ceph mgr
           systemd:
-            name: ceph-mgr@{{ ansible_hostname }}
+            name: ceph-mgr@{{ ansible_facts['hostname'] }}
             state: stopped
             masked: yes
 
     # or if we run a Ceph cluster before Luminous
     - name: stop ceph mgr
       systemd:
-        name: ceph-mgr@{{ ansible_hostname }}
+        name: ceph-mgr@{{ ansible_facts['hostname'] }}
         state: stopped
         enabled: no
         masked: yes
 
     - name: set_fact container_exec_cmd_osd
       set_fact:
-        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: stop ceph osd
               set_fact:
                 mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
               with_items: "{{ groups[mds_group_name] }}"
-              when: hostvars[item]['ansible_hostname'] == mds_active_name
+              when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
 
             - name: create standby_mdss group
               add_host:
 
             - name: stop standby ceph mds
               systemd:
-                name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+                name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
                 state: stopped
                 enabled: no
               delegate_to: "{{ item }}"
             # somehow, having a single task doesn't work in containerized context
             - name: mask systemd units for standby ceph mds
               systemd:
-                name: "ceph-mds@{{ hostvars[item]['ansible_hostname'] }}"
+                name: "ceph-mds@{{ hostvars[item]['ansible_facts']['hostname'] }}"
                 masked: yes
               delegate_to: "{{ item }}"
               with_items: "{{ groups['standby_mdss'] }}"
 
     - name: prevent restart from the packaging
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         enabled: no
         masked: yes
       when: not containerized_deployment | bool
 
     - name: restart ceph mds
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         state: restarted
         enabled: yes
         masked: no
       when: not containerized_deployment | bool
 
     - name: restart active mds
-      command: "{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}"
+      command: "{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}"
       changed_when: false
       when: containerized_deployment | bool
 
 
     - name: prevent restarts from the packaging
       systemd:
-        name: ceph-mds@{{ ansible_hostname }}
+        name: ceph-mds@{{ ansible_facts['hostname'] }}
         enabled: no
         masked: yes
       when: not containerized_deployment | bool
 
     - name: stop ceph rgw when upgrading from stable-3.2
       systemd:
-        name: ceph-radosgw@rgw.{{ ansible_hostname }}
+        name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}
         state: stopped
         enabled: no
         masked: yes
 
     - name: stop ceph rgw
       systemd:
-        name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+        name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
         state: stopped
         enabled: no
         masked: yes
   tasks:
     - name: stop ceph rbd mirror
       systemd:
-        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
         masked: yes
 
     - name: systemd stop nfs container
       systemd:
-        name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+        name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
         state: stopped
         enabled: no
         masked: yes
   tasks:
     - name: stop the ceph-crash service
       systemd:
-        name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
+        name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
         state: stopped
         enabled: no
         masked: yes
         tasks_from: container_binary.yml
 
     - name: container | disallow pre-quincy OSDs and enable all new quincy-only functionality
-      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release quincy"
+      command: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} osd require-osd-release quincy"
       delegate_to: "{{ groups[mon_group_name][0] }}"
       run_once: True
       when:
 
     - name: set_fact container_exec_cmd_status
       set_fact:
-        container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: show ceph status
index 7c27a36c0f7bcc1d2fa5a866972dec80c79ada3f..296ef52f1f90beddd8a4e69b54fa48eb22f76245 100644 (file)
@@ -61,7 +61,7 @@
 
     - name: set_fact container_exec_cmd for mon0
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -74,7 +74,7 @@
 
     - name: set_fact mds_to_kill_hostname
       set_fact:
-        mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_hostname'] }}"
+        mds_to_kill_hostname: "{{ hostvars[mds_to_kill]['ansible_facts']['hostname'] }}"
 
   tasks:
     # get rid of this as soon as "systemctl stop ceph-msd@$HOSTNAME" also
index 02de185ecdeb2ed8f2e0ef28f217db682dd1d927..f99726890ee40829ca778c90c306923bf3a83282 100644 (file)
@@ -39,7 +39,7 @@
     - name: set_fact container_exec_cmd
       when: containerized_deployment | bool
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 
     - name: exit playbook, if can not connect to the cluster
       command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
@@ -92,7 +92,7 @@
 
     - name: set_fact mgr_to_kill_hostname
       set_fact:
-        mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_hostname'] }}"
+        mgr_to_kill_hostname: "{{ hostvars[mgr_to_kill]['ansible_facts']['hostname'] }}"
 
   tasks:
     - name: stop manager services and verify it
index df2ae60bd46cf1d15e70a7aff995fa74d16046c9..05d6c2be543e1483cf028d85ec5a0e2ee864f623 100644 (file)
@@ -76,7 +76,7 @@
 
     - name: "set_fact container_exec_cmd build {{ container_binary }} exec command (containerized)"
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -90,7 +90,7 @@
 
     - name: set_fact mon_to_kill_hostname
       set_fact:
-        mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_hostname'] }}"
+        mon_to_kill_hostname: "{{ hostvars[mon_to_kill]['ansible_facts']['hostname'] }}"
 
     - name: stop monitor service(s)
       service:
index 1e3d874bb388620169c8b0186b62345e5dfa0cd8..970268afade82d1a3efc19507694fa3a4d93522a 100644 (file)
@@ -65,7 +65,7 @@
   post_tasks:
     - name: set_fact container_exec_cmd build docker exec command (containerized)
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -93,7 +93,7 @@
       with_nested:
         - "{{ groups.get(osd_group_name) }}"
         - "{{ osd_hosts }}"
-      when: hostvars[item.0]['ansible_hostname'] == item.1
+      when: hostvars[item.0]['ansible_facts']['hostname'] == item.1
 
     - name: get ceph-volume lvm list data
       ceph_volume:
index 3e65393306e666dcfac41c52fae3ed03df9e8eb7..db7cc3a213c810c32ec875fa038a5507b558039a 100644 (file)
@@ -64,7 +64,7 @@
     - name: set_fact container_exec_cmd for mon0
       when: containerized_deployment | bool
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 
     - name: exit playbook, if can not connect to the cluster
       command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} service dump -f json"
@@ -76,7 +76,7 @@
 
     - name: set_fact rbdmirror_to_kill_hostname
       set_fact:
-        rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_hostname'] }}"
+        rbdmirror_to_kill_hostname: "{{ hostvars[rbdmirror_to_kill]['ansible_facts']['hostname'] }}"
 
     - name: set_fact rbdmirror_gids
       set_fact:
index 7739628fb393c11ad353c692ab1c2aa3bc691135..b4162897525c10ce4a0ef07786660875c7463f44 100644 (file)
@@ -66,7 +66,7 @@
 
     - name: set_fact container_exec_cmd for mon0
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
@@ -95,7 +95,7 @@
       set_fact:
         rgw_host: '{{ item }}'
       with_items: '{{ groups[rgw_group_name] }}'
-      when: hostvars[item]['ansible_hostname'] == rgw_to_kill.split('.')[0]
+      when: hostvars[item]['ansible_facts']['hostname'] == rgw_to_kill.split('.')[0]
 
     - name: stop rgw service
       service:
index 8cdf08d15ed9b7c24a57ec42ca436d85cbd5af6f..41faba3753a73346048795287bb77024e6a765e5 100644 (file)
@@ -74,7 +74,7 @@
 
     - name: stop non-containerized ceph mon
       service:
-        name: "ceph-mon@{{ ansible_hostname }}"
+        name: "ceph-mon@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
       when: ldb_files.rc == 0
 
     - name: copy mon initial keyring in /etc/ceph to satisfy fetch config task in ceph-container-common
-      command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_hostname }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
+      command: cp /var/lib/ceph/mon/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring /etc/ceph/{{ cluster }}.mon.keyring
       args:
         creates: /etc/ceph/{{ cluster }}.mon.keyring
       changed_when: false
     - name: waiting for the monitor to join the quorum...
       command: "{{ container_binary }} run --rm  -v /etc/ceph:/etc/ceph:z --entrypoint=ceph {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} --cluster {{ cluster }} quorum_status --format json"
       register: ceph_health_raw
-      until: ansible_hostname in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
+      until: ansible_facts['hostname'] in (ceph_health_raw.stdout | trim | from_json)["quorum_names"]
       changed_when: false
       retries: "{{ health_mon_check_retries }}"
       delay: "{{ health_mon_check_delay }}"
     # will not exist
     - name: stop non-containerized ceph mgr(s)
       service:
-        name: "ceph-mgr@{{ ansible_hostname }}"
+        name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
       failed_when: false
   post_tasks:
     - name: container - waiting for clean pgs...
       command: >
-        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
+        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} pg stat --format json
       register: ceph_health_post
       until: >
         (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
 
     - name: stop non-containerized ceph mds(s)
       service:
-        name: "ceph-mds@{{ ansible_hostname }}"
+        name: "ceph-mds@{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
   tasks:
     - name: stop non-containerized ceph rgw(s)
       service:
-        name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
         state: stopped
         enabled: no
       with_items: "{{ rgw_instances }}"
   pre_tasks:
     - name: stop non-containerized ceph rbd mirror(s)
       service:
-        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
         state: stopped
         enabled: no
 
index ab92b7351bd401ccd638b68b1ad5904d95a4573f..e7f9485b9bbcf634a4cd5647ee4ac1e4314f0362 100644 (file)
 
     - name: Check if the node has be migrated already
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
       register: migration_completed
       failed_when: false
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: mon_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
       when: migration_completed.stat.exists == False and mon_archive_leftover.stat.exists == True
 
     - name: Compress the store as much as possible
-      command: ceph tell mon.{{ ansible_hostname }} compact
+      command: ceph tell mon.{{ ansible_facts['hostname'] }} compact
       when: migration_completed.stat.exists == False
 
     - name: Check if sysvinit
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
       register: monsysvinit
       changed_when: False
 
     - name: Check if upstart
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
       register: monupstart
       changed_when: False
 
@@ -70,7 +70,7 @@
       service: >
         name=ceph-mon
         state=restarted
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Restart the Monitor after compaction (Sysvinit)
@@ -92,7 +92,7 @@
       service: >
         name=ceph-mon
         state=stopped
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Stop the monitor (Sysvinit)
     # NOTE (leseb): should we convert upstart to sysvinit here already?
     - name: Archive monitor stores
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Scp the Monitor store
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
-        dest={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
+        dest={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
         flat=yes
       when: migration_completed.stat.exists == False
 
 
     - name: Check if sysvinit
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/sysvinit
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/sysvinit
       register: monsysvinit
       changed_when: False
 
     - name: Check if upstart
       stat: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/upstart
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/upstart
       register: monupstart
       changed_when: False
 
       service: >
         name=ceph-mon
         state=stopped
-        args=id={{ ansible_hostname }}
+        args=id={{ ansible_facts['hostname'] }}
       when: monupstart.stat.exists == True and migration_completed.stat.exists == False
 
     - name: Make sure the monitor is stopped (Sysvinit)
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the monitor store
       unarchive: >
-        src={{ backup_dir }}/monitors-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/monitors-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
 
     - name: Waiting for the monitor to join the quorum...
       shell: >
-        ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_hostname }}
+        ceph -s | grep monmap | sed 's/.*quorum//' | egrep -q {{ ansible_facts['hostname'] }}
       register: result
       until: result.rc == 0
       retries: 5
 
     - name: Done moving to the next monitor
       file: >
-        path=/var/lib/ceph/mon/ceph-{{ ansible_hostname }}/migration_completed
+        path=/var/lib/ceph/mon/ceph-{{ ansible_facts['hostname'] }}/migration_completed
         state=touch
         owner=root
         group=root
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: osd_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
 
     - name: Archive ceph configs
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/ceph.conf | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Create backup directory
 
     - name: Scp OSDs dirs and configs
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
         dest={{ backup_dir }}/osds-backups/
         flat=yes
       when: migration_completed.stat.exists == False
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the OSD configs
       unarchive: >
-        src={{ backup_dir }}/osds-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/osds-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
 
     - name: Check for failed run
       stat: >
-        path=/var/lib/ceph/{{ ansible_hostname }}.tar
+        path=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
       register: rgw_archive_leftover
 
     - fail: msg="Looks like an archive is already there, please remove it!"
 
     - name: Archive rados gateway configs
       shell: >
-        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_hostname }}.tar
+        tar -cpvzf - --one-file-system . /etc/ceph/* | cat > {{ ansible_facts['hostname'] }}.tar
         chdir=/var/lib/ceph/
-        creates={{ ansible_hostname }}.tar
+        creates={{ ansible_facts['hostname'] }}.tar
       when: migration_completed.stat.exists == False
 
     - name: Create backup directory
 
     - name: Scp RGWs dirs and configs
       fetch: >
-        src=/var/lib/ceph/{{ ansible_hostname }}.tar
+        src=/var/lib/ceph/{{ ansible_facts['hostname'] }}.tar
         dest={{ backup_dir }}/rgws-backups/
         flat=yes
       when: migration_completed.stat.exists == False
     # NOTE (leseb): 'creates' was added in Ansible 1.6
     - name: Copy and unarchive the OSD configs
       unarchive: >
-        src={{ backup_dir }}/rgws-backups/{{ ansible_hostname }}.tar
+        src={{ backup_dir }}/rgws-backups/{{ ansible_facts['hostname'] }}.tar
         dest=/var/lib/ceph/
         copy=yes
         mode=0600
index c6aadfe18e461c0663c1f6209e53bb4a0c54245a..29f40433c6a8bc5850e47d73002c433deaa445f1 100644 (file)
     include_vars: "{{ item }}"
     with_first_found:
       - files:
-          - "host_vars/{{ ansible_hostname }}.yml"
+          - "host_vars/{{ ansible_facts['hostname'] }}.yml"
           - "host_vars/default.yml"
         skip: true
 
   - name: exit playbook, if devices not defined
     fail:
-      msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_hostname }}.yml"
+      msg: "devices must be define in host_vars/default.yml or host_vars/{{ ansible_facts['hostname'] }}.yml"
     when: devices is not defined
 
   - name: install sgdisk(gdisk)
   - set_fact:
       owner: 167
       group: 167
-    when: ansible_os_family == "RedHat"
+    when: ansible_facts['os_family'] == "RedHat"
   
   - set_fact:
       owner: 64045
       group: 64045
-    when: ansible_os_family == "Debian"
+    when: ansible_facts['os_family'] == "Debian"
 
   - name: change partitions ownership
     file:
index 9184598f94c4bc11eaac95076e24296f9ebdfde0..11f38691487283ca03fcdea22525a85337b24caf 100644 (file)
@@ -60,7 +60,7 @@
   post_tasks:
     - name: set_fact container_exec_cmd build docker exec command (containerized)
       set_fact:
-        container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+        container_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
       when: containerized_deployment | bool
 
     - name: exit playbook, if can not connect to the cluster
index 6ba9fcf43b2acbc6288f56848a3c46553ba99be4..0d1d0abfa2f27f47ba7955c9be0f5f781dfe3231 100644 (file)
@@ -7,7 +7,7 @@
 
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     "rgw keystone api version": "2"
     "rgw keystone url": "http://192.168.0.1:35357"
     "rgw keystone admin token": "password"
@@ -16,7 +16,7 @@ ceph_conf_overrides:
     "rgw keystone token cache size": "10000"
     "rgw keystone revocation interval": "900"
     "rgw s3 auth use keystone": "true"
-    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
 
 
 # NOTE (leseb): to authentivate with Keystone you have two options:
index 1044e6d446c26f60518b23070156ac0af2c9ecb3..327de816c70db4c613ecc86ad73f63cb01f25221 100644 (file)
@@ -7,7 +7,7 @@
 
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     "rgw keystone api version": "3"
     "rgw keystone url": "http://192.168.0.1:35357"
     "rgw keystone admin token": "password"
@@ -17,7 +17,7 @@ ceph_conf_overrides:
     "rgw keystone token cache size": "10000"
     "rgw keystone revocation interval": "900"
     "rgw s3 auth use keystone": "true"
-    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss"
+    "nss db path": "/var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_facts['hostname'] }}/nss"
 
 
 # NOTE (leseb): to authentivate with Keystone you have two options:
index cf2dd230de0bd3fe6c207fea84823863624bb544..1cd18c260723a0cc59c61c685fdb88fece69c659 100644 (file)
@@ -6,6 +6,6 @@
 # The double quotes are important, do NOT remove them.
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     rgw enable static website = true
     rgw dns s3website name = objects-website-region.domain.com
index 40645281c68d3dd3984c9b7a9f411d4ee013b207..af704fb7276a94a2ef4bb170378aa22b6afabc61 100644 (file)
@@ -6,7 +6,7 @@
 # The double quotes are important, do NOT remove them.
 
 ceph_conf_overrides:
-  "client.rgw.{{ hostvars[inventory_hostname]['ansible_hostname'] }}":
+  "client.rgw.{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] }}":
     rgw enable usage log = true
     rgw usage log tick interval = 30
     rgw usage log flush threshold = 1024
index b6cdf6c27cd974a13649caa4d413c74bdb785bc1..a754d9803c4af5dee6e7086ca831b47fd223ae20 100644 (file)
@@ -8,7 +8,7 @@
   group_by:
     key: _filtered_clients
     parents: "{{ client_group_name }}"
-  when: (ansible_architecture == 'x86_64') or (not containerized_deployment | bool)
+  when: (ansible_facts['architecture'] == 'x86_64') or (not containerized_deployment | bool)
 
 - name: set_fact delegated_node
   set_fact:
index ab7e518535f048dd90bd969d4ed10ef74bb0727e..7b7cb4293057318c2b7f0021aed3f3105c5369d4 100644 (file)
@@ -6,7 +6,7 @@
     create: yes
     line: "CLUSTER={{ cluster }}"
     regexp: "^CLUSTER="
-  when: ansible_os_family in ["RedHat", "Suse"]
+  when: ansible_facts['os_family'] in ["RedHat", "Suse"]
 
 # NOTE(leseb): we are performing the following check
 # to ensure any Jewel installation will not fail.
@@ -19,7 +19,7 @@
 # - All previous versions from Canonical
 # - Infernalis from ceph.com
 - name: debian based systems - configure cluster name
-  when: ansible_os_family == "Debian"
+  when: ansible_facts['os_family'] == "Debian"
   block:
     - name: check /etc/default/ceph exist
       stat:
index b76145777b722048580f8546778c8d76038a5dae..115ecf59ed10e255c58f70aa835cb1b7bc849ddd 100644 (file)
@@ -7,7 +7,7 @@
     regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
     line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
   when:
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
     - etc_default_ceph.stat.exists
   notify: restart ceph osds
 
@@ -18,5 +18,5 @@
     create: yes
     regexp: "^TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES="
     line: "TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }}"
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   notify: restart ceph osds
index a18801120bfbf6f0f91045d77c813117ddb6746e..a36355a1bb12cbf0e22cf69b9d6363505182dab4 100644 (file)
@@ -21,4 +21,4 @@
   args:
     warn: no
   changed_when: false
-  when: ansible_pkg_mgr == 'yum'
+  when: ansible_facts['pkg_mgr'] == 'yum'
index 0afa23befc8877dfe3c5c905ace656c9f5b65482..6832a3f5b1785e001e5fc9d2089f51feb86185bc 100644 (file)
@@ -15,6 +15,6 @@
 
 - name: configure debian ceph stable community repository
   apt_repository:
-    repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+    repo: "deb {{ ceph_stable_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
     state: present
     update_cache: yes
index 10c5ea38f94e726d7014b7fd5067f11a59c40a84..607ce0896c3cb8581c1b7b31dae374befb0518b2 100644 (file)
@@ -9,6 +9,6 @@
 
 - name: configure debian custom repository
   apt_repository:
-    repo: "deb {{ ceph_custom_repo }} {{ ansible_distribution_release }} main"
+    repo: "deb {{ ceph_custom_repo }} {{ ansible_facts['distribution_release'] }} main"
     state: present
     update_cache: yes
index 9bde8bd2b527a1b90da5e78474b1f82e4a4cfff6..c5c8c5c3a859daee1650efa36828d22325a11498 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: fetch ceph debian development repository
   uri:
-    url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/repo
+    url: https://shaman.ceph.com/api/repos/ceph/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/repo
     return_content: yes
   register: ceph_dev_deb_repo
 
index f72ff82f84fb311a2f34e88d0294bf584ce9f68d..3c6db106f38a0660ce658c60bda9446e51447467 100644 (file)
@@ -4,6 +4,6 @@
     name: "{{ debian_ceph_pkgs | unique }}"
     update_cache: no
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   register: result
   until: result is succeeded
index 28b56fd9ac491b3e6603bbcc30720a3126c151ce..7e9f6d62fd8f1cc5c8c789cc6544fdc15566b1ba 100644 (file)
@@ -5,7 +5,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_distribution == 'RedHat'
+  when: ansible_facts['distribution'] == 'RedHat'
 
 - name: install centos dependencies
   yum:
@@ -13,7 +13,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_distribution == 'CentOS'
+  when: ansible_facts['distribution'] == 'CentOS'
 
 - name: install redhat ceph packages
   package:
index 5fc1da720d1a16e7fa57e102070c3b7c2a662428..88b3228162c50dd4b4bf560d663650efd50ca85f 100644 (file)
@@ -1,15 +1,15 @@
 ---
 - name: enable red hat storage monitor repository
   rhsm_repository:
-    name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_architecture }}-rpms"
+    name: "rhceph-{{ ceph_rhcs_version }}-mon-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
   when: (mon_group_name in group_names or mgr_group_name in group_names)
 
 - name: enable red hat storage osd repository
   rhsm_repository:
-    name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_architecture }}-rpms"
+    name: "rhceph-{{ ceph_rhcs_version }}-osd-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
   when: osd_group_name in group_names
 
 - name: enable red hat storage tools repository
   rhsm_repository:
-    name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_architecture }}-rpms"
+    name: "rhceph-{{ ceph_rhcs_version }}-tools-for-rhel-8-{{ ansible_facts['architecture'] }}-rpms"
   when: (mgr_group_name in group_names or rgw_group_name in group_names or mds_group_name in group_names or nfs_group_name in group_names or iscsi_gw_group_name in group_names or client_group_name in group_names or monitoring_group_name in group_names)
index 5fd817d75c97b6baa7911befafe20b95408417c3..4db2737ccd073c8d9b8b815db11986453f961267 100644 (file)
@@ -5,7 +5,7 @@
   register: result
   until: result is succeeded
   tags: with_pkg
-  when: ansible_distribution_major_version | int == 7
+  when: ansible_facts['distribution_major_version'] | int == 7
 
 - name: configure red hat ceph community repository stable key
   rpm_key:
@@ -21,7 +21,7 @@
     gpgcheck: yes
     state: present
     gpgkey: "{{ ceph_stable_key }}"
-    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/$basearch"
+    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch"
     file: ceph_stable
     priority: 2
   register: result
@@ -34,7 +34,7 @@
     gpgcheck: yes
     state: present
     gpgkey: "{{ ceph_stable_key }}"
-    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_distribution_major_version }}/noarch"
+    baseurl: "{{ ceph_mirror }}/rpm-{{ ceph_stable_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch"
     file: ceph_stable
     priority: 2
   register: result
index f422fae699c1d3a098d4f71cb79b6cf8893fc76d..37b68edd63ed77bd3b9af2242c5cc3c76b763cd1 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: get latest available build
   uri:
-    url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
+    url: "https://shaman.ceph.com/api/search/?status=ready&project=ceph&flavor=default&distros=centos/{{ ansible_facts['distribution_major_version'] }}/{{ ansible_facts['architecture'] }}&ref={{ ceph_dev_branch }}&sha1={{ ceph_dev_sha1 }}"
     return_content: yes
   run_once: true
   register: latest_build
index 2239aad061bb1c5a95601099452a659997e45dd6..31ff66a884bae0e5d098a7f1e6434c15167c8652 100644 (file)
@@ -1,22 +1,22 @@
 ---
 - name: include_tasks installs/install_on_redhat.yml
   include_tasks: installs/install_on_redhat.yml
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   tags: package-install
 
 - name: include_tasks installs/install_on_suse.yml
   include_tasks: installs/install_on_suse.yml
-  when: ansible_os_family == 'Suse'
+  when: ansible_facts['os_family'] == 'Suse'
   tags: package-install
 
 - name: include installs/install_on_debian.yml
   include_tasks: installs/install_on_debian.yml
   tags: package-install
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: include_tasks installs/install_on_clear.yml
   include_tasks: installs/install_on_clear.yml
-  when: ansible_os_family == 'ClearLinux'
+  when: ansible_facts['os_family'] == 'ClearLinux'
   tags: package-install
 
 - name: get ceph version
index 54addc65b150dc625a14507d7c802097d78e0f8e..5dac3243939a7fd875eebb0c657042ee313507d3 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: create rados gateway instance directories
   file:
-    path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     state: directory
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@@ -10,7 +10,7 @@
 
 - name: generate environment file
   copy:
-    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/EnvironmentFile"
+    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/EnvironmentFile"
     owner: "root"
     group: "root"
     mode: "0644"
index aca07f0088ddc83ff0cc9f4008cd7ce2610bc9e7..8761569f7943b67341089186cb897469ed8d122f 100644 (file)
@@ -24,8 +24,8 @@ osd crush chooseleaf type = 0
 
 {% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
 mon initial members = {% for host in groups[mon_group_name] %}
-      {% if hostvars[host]['ansible_hostname'] is defined -%}
-        {{ hostvars[host]['ansible_hostname'] }}
+      {% if hostvars[host]['ansible_facts']['hostname'] is defined -%}
+        {{ hostvars[host]['ansible_facts']['hostname'] }}
       {%- endif %}
       {%- if not loop.last %},{% endif %}
     {% endfor %}
@@ -84,13 +84,13 @@ filestore xattr use omap = true
 [osd]
 {% if is_hci | bool and _num_osds > 0 %}
 {# hci_safety_factor is the safety factor for HCI deployments #}
-{% if ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * hci_safety_factor / _num_osds) | int %}
 {% endif %}
 {% elif _num_osds > 0 %}
 {# non_hci_safety_factor is the safety factor for dedicated nodes #}
-{% if ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
-{% set _osd_memory_target = (ansible_memtotal_mb * 1048576 * non_hci_safety_factor / _num_osds) | int %}
+{% if ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds > osd_memory_target %}
+{% set _osd_memory_target = (ansible_facts['memtotal_mb'] * 1048576 * non_hci_safety_factor / _num_osds) | int %}
 {% endif %}
 {% endif %}
 osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
@@ -98,14 +98,14 @@ osd memory target = {{ _osd_memory_target | default(osd_memory_target) }}
 {% endif %}
 
 {% if inventory_hostname in groups.get(rgw_group_name, []) %}
-{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_hostname']) %}
+{% set _rgw_hostname = hostvars[inventory_hostname]['rgw_hostname'] | default(hostvars[inventory_hostname]['ansible_facts']['hostname']) %}
 {# {{ hostvars[host]['rgw_hostname'] }} for backward compatibility, fqdn issues. See bz1580408 #}
 {% if hostvars[inventory_hostname]['rgw_instances'] is defined %}
 {% for instance in hostvars[inventory_hostname]['rgw_instances'] %}
 [client.rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}]
 host = {{ _rgw_hostname }}
 keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname + '.' + instance['instance_name'] }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_hostname'] + '.' + instance['instance_name'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[inventory_hostname]['ansible_facts']['hostname'] + '.' + instance['instance_name'] }}.log
 {% set _rgw_binding_socket = instance['radosgw_address'] | default(_radosgw_address) | string + ':' + instance['radosgw_frontend_port'] | default(radosgw_frontend_port) | string %}
 {%- macro frontend_line(frontend_type) -%}
 {%- if frontend_type == 'civetweb' -%}
@@ -131,12 +131,12 @@ rgw_zone = {{ instance['rgw_zone'] }}
 
 {% if inventory_hostname in groups.get(nfs_group_name, []) and inventory_hostname not in groups.get(rgw_group_name, []) %}
 {% for host in groups[nfs_group_name] %}
-{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_hostname']) %}
+{% set _rgw_hostname = hostvars[host]['rgw_hostname'] | default(hostvars[host]['ansible_facts']['hostname']) %}
 {% if nfs_obj_gw | bool %}
 [client.rgw.{{ _rgw_hostname }}]
 host = {{ _rgw_hostname }}
 keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ _rgw_hostname }}/keyring
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_facts']['hostname'] }}.log
 {% endif %}
 {% endfor %}
 {% endif %}
index 1adf86ca966aa36afac56f5cd68ac21813bdc8d9..7a965da985426adb7ceecac8b94823ac62c7ae07 100644 (file)
 
 - name: add docker's gpg key
   apt_key:
-    url: 'https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg'
+    url: "https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }}/gpg"
   register: result
   until: result is succeeded
   when: container_package_name == 'docker-ce'
 
 - name: add docker repository
   apt_repository:
-    repo: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable"
+    repo: "deb https://download.docker.com/linux/{{ ansible_facts['distribution'] | lower }} {{ ansible_facts['distribution_release'] }} stable"
   when: container_package_name == 'docker-ce'
 
 - name: add podman ppa repository
@@ -29,4 +29,4 @@
     repo: "ppa:projectatomic/ppa"
   when:
     - container_package_name == 'podman'
-    - ansible_distribution == 'Ubuntu'
+    - ansible_facts['distribution'] == 'Ubuntu'
index 5d73a1621b5f381c6dab73361923dc57a98a81db..5b9eb11f66e20183a576ab9afeafd1030ed18165 100644 (file)
@@ -2,13 +2,13 @@
 - name: include specific variables
   include_vars: "{{ item }}"
   with_first_found:
-    - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yml"
-    - "{{ ansible_os_family }}.yml"
+    - "{{ ansible_facts['distribution'] }}-{{ ansible_facts['distribution_major_version'] }}.yml"
+    - "{{ ansible_facts['os_family'] }}.yml"
 
 - name: debian based systems tasks
   include_tasks: debian_prerequisites.yml
   when:
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
   tags: with_pkg
 
 - name: install container packages
index 65c58a254cfbe8acc0a59d0415db772d13fb1a1d..4d6d247d195dbedcb065b03f2eae4a375f4afdba 100644 (file)
@@ -57,7 +57,7 @@
 
 - name: start the ceph-crash service
   systemd:
-    name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
+    name: "{{ 'ceph-crash@' + ansible_facts['hostname'] if containerized_deployment | bool else 'ceph-crash.service' }}"
     state: started
     enabled: yes
     masked: no
index 78285671b06df9af35ec47d8f21ee10ad02231e1..b3fe0ae0db7833e62c20fe77d68467a24c0e1f50 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: set_fact container_run_cmd
     - name: add iscsi gateways - ipv4
       command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
       args:
-        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}:{{ hostvars[item]['api_port'] | default(5000) }}"
         stdin_add_newline: no
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
     - name: add iscsi gateways - ipv6
       command: "{{ ceph_cmd }} --cluster {{ cluster }} dashboard iscsi-gateway-add -i -"
       args:
-        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
+        stdin: "{{ 'https' if hostvars[item]['api_secure'] | default(false) | bool else 'http' }}://{{ hostvars[item]['api_user'] | default('admin') }}:{{ hostvars[item]['api_password'] | default('admin') }}@{{ hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}:{{ hostvars[item]['api_port'] | default(5000) }}"
         stdin_add_newline: no
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
index 9538142a626193ded50f379eab2ea564f6a947c8..e2f7774b47e07d8afece2d023c2cd2882bbdecc9 100644 (file)
@@ -1,16 +1,16 @@
 ---
 - name: get current mgr backend - ipv4
   set_fact:
-    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
   when: ip_version == 'ipv4'
 
 - name: get current mgr backend - ipv6
   set_fact:
-    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
+    mgr_server_addr: "{{ hostvars[dashboard_backend]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last }}"
   when: ip_version == 'ipv6'
 
 - name: config the current dashboard backend
-  command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_hostname'] }}/server_addr {{ mgr_server_addr }}"
+  command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} config set mgr mgr/dashboard/{{ hostvars[dashboard_backend]['ansible_facts']['hostname'] }}/server_addr {{ mgr_server_addr }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   changed_when: false
   run_once: true
index ee3cbc5282c07042da8c855ad098c99b8aeacfe4..723b3164969ae79666cfcb9025e57212cb8a2e69 100644 (file)
@@ -4,5 +4,5 @@
 
 - name: print dashboard URL
   debug:
-    msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_fqdn }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
+    msg: "The dashboard has been deployed! You can access your dashboard web UI at {{ dashboard_protocol }}://{{ ansible_facts['fqdn'] }}:{{ dashboard_port }}/ as an '{{ dashboard_admin_user }}' user with '{{ dashboard_admin_password }}' password."
   run_once: true
index ca7465fe52b36ecfe52928cdb4abc4e7024990a7..decb66fcf67d64c414db468e2173d0e023f0c7e6 100644 (file)
@@ -75,7 +75,7 @@ debian_package_dependencies: []
 
 centos_package_dependencies:
   - epel-release
-  - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
+  - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
 
 redhat_package_dependencies: []
 
@@ -145,7 +145,7 @@ nfs_ganesha_stable_deb_repo: "{{ ceph_mirror }}/nfs-ganesha/deb-{{ nfs_ganesha_s
 # Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
 # # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
 # for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
+#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
 
 
 # REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
@@ -173,7 +173,7 @@ ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default(
 #
 ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
 ceph_stable_openstack_release_uca: queens
-ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
+ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
 
 # REPOSITORY: openSUSE OBS
 #
@@ -183,7 +183,7 @@ ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_sta
 # usually has newer Ceph releases than the normal distro repository.
 #
 #
-ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
+ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
 
 # REPOSITORY: DEV
 #
@@ -246,7 +246,7 @@ generate_fsid: true
 
 ceph_conf_key_directory: /etc/ceph
 
-ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
+ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
 
 # Permissions for keyring files in /etc/ceph
 ceph_keyring_permissions: '0600'
@@ -521,7 +521,7 @@ rgw_zone: default
 #   global:
 #     foo: 1234
 #     bar: 5678
-#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
+#   "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
 #     rgw_zone: zone1
 #
 ceph_conf_overrides: {}
@@ -789,4 +789,4 @@ client_connections: {}
 
 container_exec_cmd:
 docker: false
-ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0)  }}"
\ No newline at end of file
+ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0)  }}"
index 8e866bcbf15151b2983450cd8262ba19a7702bbc..44249419f4e3f4125b64723576fa52d1b3b67f41 100644 (file)
@@ -6,4 +6,4 @@
 
 - name: set_fact container_binary
   set_fact:
-    container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
\ No newline at end of file
+    container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
\ No newline at end of file
index 7d8d897546655568f322c96d7f172214d1134e13..4d76e557ff0b00dc6606e812aab48c1269b44047 100644 (file)
@@ -26,7 +26,7 @@
 
 - name: set_fact monitor_name ansible_hostname
   set_fact:
-    monitor_name: "{{ hostvars[item]['ansible_hostname'] }}"
+    monitor_name: "{{ hostvars[item]['ansible_facts']['hostname'] }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
   with_items: "{{ groups.get(mon_group_name, []) }}"
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if not rolling_update | bool else hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}"
       when:
         - containerized_deployment | bool
 
     - name: find a running mon container
-      command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+      command: "{{ container_binary }} ps -q --filter name=ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
       register: find_running_mon_container
       failed_when: false
       run_once: true
@@ -98,7 +98,7 @@
 
     - name: set_fact _container_exec_cmd
       set_fact:
-        _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_hostname'] }}"
+        _container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0] if running_mon is undefined else running_mon]['ansible_facts']['hostname'] }}"
       when:
         - containerized_deployment | bool
 
   when: rolling_update | bool or groups.get(mon_group_name, []) | length == 0
 
 - name: get current fsid
-  command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_hostname'] }}.asok config get fsid"
+  command: "{{ timeout_command }} {{ container_exec_cmd }} ceph --admin-daemon /var/run/ceph/{{ cluster }}-mon.{{ hostvars[mon_host | default(groups[mon_group_name][0])]['ansible_facts']['hostname'] }}.asok config get fsid"
   register: rolling_update_fsid
   delegate_to: "{{ mon_host | default(groups[mon_group_name][0]) }}"
   until: rolling_update_fsid is succeeded
 - name: set_fact devices generate device list when osd_auto_discovery
   set_fact:
     devices: "{{ (devices | default([]) + [ item.key | regex_replace('^', '/dev/') ]) | unique }}"
-  with_dict: "{{ ansible_devices }}"
+  with_dict: "{{ ansible_facts['devices'] }}"
   when:
     - osd_auto_discovery | default(False) | bool
     - inventory_hostname in groups.get(osd_group_name, [])
-    - ansible_devices is defined
+    - ansible_facts['devices'] is defined
     - item.value.removable == "0"
     - item.value.sectors != "0"
     - item.value.partitions|count == 0
 
     - name: set_fact rgw_hostname
       set_fact:
-        rgw_hostname: "{% set _value = ansible_hostname -%}
+        rgw_hostname: "{% set _value = ansible_facts['hostname'] -%}
         {% for key in (ceph_current_status['services']['rgw']['daemons'] | list) -%}
-        {% if key == ansible_fqdn -%}
+        {% if key == ansible_facts['fqdn'] -%}
         {% set _value = key -%}
         {% endif -%}
         {% endfor -%}
index 1b1c61ffdeea51b0b8897f24b68a0da055cdac9d..28a7875f7d3cfbe7b39711785190e7568be8700a 100644 (file)
@@ -1,6 +1,6 @@
 - name: set grafana_server_addr fact - ipv4
   set_fact:
-    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
+    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}"
   when:
     - groups.get(monitoring_group_name, []) | length > 0
     - ip_version == 'ipv4'
@@ -9,7 +9,7 @@
 
 - name: set grafana_server_addr fact - ipv6
   set_fact:
-    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
+    grafana_server_addr: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap }}"
   when:
     - groups.get(monitoring_group_name, []) | length > 0
     - ip_version == 'ipv6'
@@ -18,7 +18,7 @@
 
 - name: set grafana_server_addrs fact - ipv4
   set_fact:
-    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
+    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first]) | unique }}"
   with_items: "{{ groups.get(monitoring_group_name, []) }}"
   when:
     - groups.get(monitoring_group_name, []) | length > 0
@@ -27,7 +27,7 @@
 
 - name: set grafana_server_addrs fact - ipv6
   set_fact:
-    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
+    grafana_server_addrs: "{{ (grafana_server_addrs | default([]) + [hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(public_network.split(',')) | last | ipwrap]) | unique }}"
   with_items: "{{ groups.get(monitoring_group_name, []) }}"
   when:
     - groups.get(monitoring_group_name, []) | length > 0
index f8452b8c373b91f245b562a3759a462698648622..b62c1c6811d833c4b823058c81362de725d83513 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact _monitor_addresses to monitor_address_block ipv4
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | first }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
@@ -11,7 +11,7 @@
 
 - name: set_fact _monitor_addresses to monitor_address_block ipv6
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[item]['monitor_address_block'].split(',')) | last | ipwrap }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') |  map(attribute='name') | list"
@@ -30,7 +30,7 @@
 
 - name: set_fact _monitor_addresses to monitor_interface - ipv4
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version]['address'] | ipwrap }]  }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
@@ -41,7 +41,7 @@
 
 - name: set_fact _monitor_addresses to monitor_interface - ipv6
   set_fact:
-    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_' + (hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
+    _monitor_addresses: "{{ _monitor_addresses | default([]) + [{ 'name': item, 'addr': hostvars[item]['ansible_facts'][(hostvars[item]['monitor_interface']|replace('-', '_'))][ip_version][0]['address'] | ipwrap }] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   when:
     - "item not in _monitor_addresses | default([]) | selectattr('name', 'defined') | map(attribute='name') | list"
index 6eae737d74ccf065ec096a52da14c7982a3252a9..81a0bac0a417b8e1f77fc2c81ac6a64f0e3c0ca4 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact _radosgw_address to radosgw_address_block ipv4
   set_fact:
-    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
+    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | first }}"
   when:
     - radosgw_address_block is defined
     - radosgw_address_block != 'subnet'
@@ -9,7 +9,7 @@
 
 - name: set_fact _radosgw_address to radosgw_address_block ipv6
   set_fact:
-    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
+    _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts']['all_ipv6_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['radosgw_address_block'].split(',')) | last | ipwrap }}"
   when:
     - radosgw_address_block is defined
     - radosgw_address_block != 'subnet'
   block:
     - name: set_fact _interface
       set_fact:
-        _interface: "{{ 'ansible_' + (radosgw_interface | replace('-', '_')) }}"
+        _interface: "{{ (radosgw_interface | replace('-', '_')) }}"
 
     - name: set_fact _radosgw_address to radosgw_interface - ipv4
       set_fact:
-        _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version]['address'] }}"
+        _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version]['address'] }}"
       when: ip_version == 'ipv4'
 
     - name: set_fact _radosgw_address to radosgw_interface - ipv6
       set_fact:
-        _radosgw_address: "{{ hostvars[inventory_hostname][_interface][ip_version][0]['address'] | ipwrap }}"
+        _radosgw_address: "{{ hostvars[inventory_hostname]['ansible_facts'][_interface][ip_version][0]['address'] | ipwrap }}"
       when: ip_version == 'ipv6'
 
 - name: set_fact rgw_instances without rgw multisite
index 044e4c3afa9e8010a2a0ad3286210da5cd6a739b..525b8e6a9cd3a9c699c1a592c56f747a39b3fa0c 100644 (file)
@@ -7,7 +7,7 @@
   until: result is succeeded
   when:
     - not containerized_deployment | bool
-    - ansible_os_family in ['RedHat', 'Suse']
+    - ansible_facts['os_family'] in ['RedHat', 'Suse']
   tags: package-install
 
 - name: make sure grafana is down
@@ -41,7 +41,7 @@
   with_items: "{{ grafana_dashboard_files }}"
   when:
     - not containerized_deployment | bool
-    - not ansible_os_family in ['RedHat', 'Suse']
+    - not ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 - name: write grafana.ini
   config_template:
index 4c897b2dec126f7f36efedcdf0d253ecd8fc0133..61ef46c09294950a674c43c789d1346b0dbf7bca 100644 (file)
@@ -18,7 +18,7 @@ org_role = Viewer
 [server]
 cert_file = /etc/grafana/ceph-dashboard.crt
 cert_key = /etc/grafana/ceph-dashboard.key
-domain = {{ ansible_fqdn }}
+domain = {{ ansible_facts['fqdn'] }}
 protocol = {{ dashboard_protocol }}
 http_port = {{ grafana_port }}
 http_addr = {{ grafana_server_addr }}
index 32d90f4a12612fb604f473a65c3df45b0c2924c7..bcb98f857b9aebb8e05f7de7b84e525f0a1a972b 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: check for a mon container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mon-{{ ansible_facts['hostname'] }}'"
   register: ceph_mon_container_stat
   changed_when: false
   failed_when: false
@@ -16,7 +16,7 @@
   when: inventory_hostname in groups.get(osd_group_name, [])
 
 - name: check for a mds container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mds-{{ ansible_facts['hostname'] }}'"
   register: ceph_mds_container_stat
   changed_when: false
   failed_when: false
@@ -24,7 +24,7 @@
   when: inventory_hostname in groups.get(mds_group_name, [])
 
 - name: check for a rgw container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rgw-{{ ansible_facts['hostname'] }}'"
   register: ceph_rgw_container_stat
   changed_when: false
   failed_when: false
@@ -32,7 +32,7 @@
   when: inventory_hostname in groups.get(rgw_group_name, [])
 
 - name: check for a mgr container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-mgr-{{ ansible_facts['hostname'] }}'"
   register: ceph_mgr_container_stat
   changed_when: false
   failed_when: false
@@ -40,7 +40,7 @@
   when: inventory_hostname in groups.get(mgr_group_name, [])
 
 - name: check for a rbd mirror container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }}'"
   register: ceph_rbd_mirror_container_stat
   changed_when: false
   failed_when: false
@@ -48,7 +48,7 @@
   when: inventory_hostname in groups.get(rbdmirror_group_name, [])
 
 - name: check for a nfs container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}'"
   register: ceph_nfs_container_stat
   changed_when: false
   failed_when: false
@@ -80,7 +80,7 @@
   when: inventory_hostname in groups.get(iscsi_gw_group_name, [])
 
 - name: check for a ceph-crash container
-  command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_hostname }}'"
+  command: "{{ container_binary }} ps -q --filter='name=ceph-crash-{{ ansible_facts['hostname'] }}'"
   register: ceph_crash_container_stat
   changed_when: false
   failed_when: false
index 6187fca79004ae5494a6c436fc68c4a173e7b01a..b4039b10e658a2d2afa7a13bedfa25700f4f3028 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: restart the ceph-crash service
   systemd:
-    name: ceph-crash@{{ ansible_hostname }}
+    name: ceph-crash@{{ ansible_facts['hostname'] }}
     state: restarted
     enabled: yes
     masked: no
index 8a36032aad00b3787bdc77f015a5ee11e9f9fb94..5c6473d2800eb5e56dd4b2c6a5b3e8f463d2e2dc 100644 (file)
@@ -2,14 +2,14 @@
 
 RETRIES="{{ handler_health_mds_check_retries }}"
 DELAY="{{ handler_health_mds_check_delay }}"
-MDS_NAME="{{ ansible_hostname }}"
+MDS_NAME="{{ ansible_facts['hostname'] }}"
 {% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok
 
 # First, restart the daemon
 systemctl restart ceph-mds@${MDS_NAME}
index d5a7156892c605c1ab8fdb6bc758f82212e7bc13..e20c61901231d05f90486537279187f9e5994307 100644 (file)
@@ -2,14 +2,14 @@
 
 RETRIES="{{ handler_health_mgr_check_retries }}"
 DELAY="{{ handler_health_mgr_check_delay }}"
-MGR_NAME="{{ ansible_hostname }}"
+MGR_NAME="{{ ansible_facts['hostname'] }}"
 {% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mgr-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mgr.{{ ansible_facts['hostname'] }}.asok
 
 systemctl reset-failed ceph-mgr@${MGR_NAME}
 # First, restart the daemon
index 71675d6446a95ab9472541740f0768eae75f5549..2beade15a9fbd972b3d7b5ec1b474ffdf33493bb 100644 (file)
@@ -4,18 +4,18 @@ RETRIES="{{ handler_health_mon_check_retries }}"
 DELAY="{{ handler_health_mon_check_delay }}"
 MONITOR_NAME="{{ monitor_name }}"
 {% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # if daemon is uninstalled, no restarting is needed; so exit with success
-systemctl status ceph-mon@{{ ansible_hostname }} > /dev/null
+systemctl status ceph-mon@{{ ansible_facts['hostname'] }} > /dev/null
 if [[ $? -ne 0 ]]; then
   exit 0
 fi
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-mon.{{ ansible_facts['hostname'] }}.asok
 
 check_quorum() {
 while [ $RETRIES -ne 0 ]; do
@@ -34,7 +34,7 @@ exit 1
 }
 
 # First, restart the daemon
-systemctl restart ceph-mon@{{ ansible_hostname }}
+systemctl restart ceph-mon@{{ ansible_facts['hostname'] }}
 
 COUNT=10
 # Wait and ensure the socket exists after restarting the daemon
@@ -45,5 +45,5 @@ while [ $COUNT -ne 0 ]; do
 done
 # If we reach this point, it means the socket is not present.
 echo "Socket file ${SOCKET} could not be found, which means the monitor is not running. Showing ceph-mon unit logs now:"
-journalctl -u ceph-mon@{{ ansible_hostname }}
+journalctl -u ceph-mon@{{ ansible_facts['hostname'] }}
 exit 1
index c76432a3496fb9e3e1f05ae631174a2cb49699cb..b8510bd98ac84dfa3608fedbbf7d1ff2c1aed27c 100644 (file)
@@ -2,10 +2,10 @@
 
 RETRIES="{{ handler_health_nfs_check_retries }}"
 DELAY="{{ handler_health_nfs_check_delay }}"
-NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+NFS_NAME="ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
 PID=/var/run/ganesha.pid
 {% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}"
 {% endif %}
 
 # First, restart the daemon
index 1bdf1d7cffd9e0a53c7a5735dda12d258f791ff8..3a59841fa19e3269538ca2b4c0f1fd5e6816b3bd 100644 (file)
@@ -2,14 +2,14 @@
 
 RETRIES="{{ handler_health_rbd_mirror_check_retries }}"
 DELAY="{{ handler_health_rbd_mirror_check_delay }}"
-RBD_MIRROR_NAME="{{ ansible_hostname }}"
+RBD_MIRROR_NAME="{{ ansible_facts['hostname'] }}"
 {% if containerized_deployment | bool %}
-DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+DOCKER_EXEC="{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
 {% endif %}
 
 # Backward compatibility
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_fqdn }}.asok
-$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_hostname }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['fqdn'] }}.asok
+$DOCKER_EXEC test -S /var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok && SOCKET=/var/run/ceph/{{ cluster }}-client.rbd-mirror.{{ ansible_facts['hostname'] }}.asok
 
 # First, restart the daemon
 systemctl restart ceph-rbd-mirror@rbd-mirror.${RBD_MIRROR_NAME}
index 62a5c31180a760c18181826f501efb8afa6d6882..93eda64b8bb8547f7082116c7427153da88fafbe 100644 (file)
@@ -2,7 +2,7 @@
 
 RETRIES="{{ handler_health_rgw_check_retries }}"
 DELAY="{{ handler_health_rgw_check_delay }}"
-HOST_NAME="{{ ansible_hostname }}"
+HOST_NAME="{{ ansible_facts['hostname'] }}"
 RGW_NUMS={{ rgw_instances | length | int }}
 RGW_FRONTEND_SSL_CERT={{ radosgw_frontend_ssl_certificate }}
 if [ -n "$RGW_FRONTEND_SSL_CERT" ]; then
index 0c610b0256f4886f23da654be4ac759cf9b3e55b..54dbc4ffdb02542febb2edffab8e9f77511044f4 100644 (file)
@@ -15,7 +15,7 @@
   block:
     - name: install firewalld python binding
       package:
-        name: "python{{ ansible_python.version.major }}-firewall"
+        name: "python{{ ansible_facts['python']['version']['major'] }}-firewall"
       tags: with_pkg
       when: not is_atomic | bool
 
index 40d5876a19e95034573629877de5d0678d5b6ebe..e9620327b69c5b2857d06608003c279396b4c4ac 100644 (file)
@@ -2,7 +2,7 @@
 - name: update cache for Debian based OSs
   apt:
     update_cache: yes
-  when: ansible_os_family == "Debian"
+  when: ansible_facts['os_family'] == "Debian"
   register: result
   until: result is succeeded
 
@@ -10,7 +10,7 @@
   include_tasks: configure_firewall.yml
   when:
     - configure_firewall | bool
-    - ansible_os_family in ['RedHat', 'Suse']
+    - ansible_facts['os_family'] in ['RedHat', 'Suse']
   tags: configure_firewall
 
 - name: include_tasks setup_ntp.yml
index b796f12f80d511d7174ed57158df407c89e7b38b..e1fed70a214275eb65aa626fe44582b478579824 100644 (file)
@@ -3,13 +3,13 @@
   set_fact:
     chrony_daemon_name: chrony
     ntp_service_name: ntp
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: set ntp service and chrony daemon name for RedHat and Suse family
   set_fact:
     chrony_daemon_name: chronyd
     ntp_service_name: ntpd
-  when: ansible_os_family in ['RedHat', 'Suse']
+  when: ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 # Installation of NTP daemons needs to be a separate task since installations
 # can't happen on Atomic
index 2ae7efbdeabf19822cab227a93d4e578426475c0..ce0fecc5211333db26b80c2f8bd21bf91f6f397b 100644 (file)
@@ -35,13 +35,13 @@ trusted_ip_list: 192.168.122.1
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
 
 # TCMU_RUNNER resource limitation
-ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_tcmu_runner_docker_cpu_limit: 1
 
 # RBD_TARGET_GW resource limitation
-ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_target_gw_docker_cpu_limit: 1
 
 # RBD_TARGET_API resource limitation
-ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_target_api_docker_cpu_limit: 1
index 6ff5deaba98170c171cd38a96c730932d36f7c2c..0170d4929ad185972674cd9e7f251ce1a8dc515f 100644 (file)
@@ -28,7 +28,7 @@
 
 - name: add mgr ip address to trusted list with dashboard - ipv4
   set_fact:
-    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
+    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv4_addresses"] | ips_in_ranges(public_network.split(",")) | first }}'
   with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
   when:
     - dashboard_enabled | bool
@@ -36,7 +36,7 @@
 
 - name: add mgr ip address to trusted list with dashboard - ipv6
   set_fact:
-    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
+    trusted_ip_list: '{{ trusted_ip_list }},{{ hostvars[item]["ansible_facts"]["all_ipv6_addresses"] | ips_in_ranges(public_network.split(",")) | last }}'
   with_items: '{{ groups[mgr_group_name] | default(groups[mon_group_name]) }}'
   when:
     - dashboard_enabled | bool
@@ -53,7 +53,7 @@
 
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   delegate_to: "{{ groups[mon_group_name][0] }}"
   when: containerized_deployment | bool
 
@@ -67,4 +67,4 @@
   delegate_to: "{{ groups[mon_group_name][0] }}"
   environment:
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
-    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
\ No newline at end of file
+    CEPH_CONTAINER_BINARY: "{{ container_binary }}"
index cd98ff13aa86e60528e11e9ae3a12558b141ae70..83b03e7991ee38542fb159f140498cccac875083 100644 (file)
@@ -33,7 +33,7 @@
       command: >
         openssl req -newkey rsa:2048 -nodes -keyout {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.key
          -x509 -days 365 -out {{ iscsi_ssl_tmp_dir.path }}/iscsi-gateway.crt
-         -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_hostname }}"
+         -subj "/C=US/ST=./L=./O=RedHat/OU=Linux/CN={{ ansible_facts['hostname'] }}"
       delegate_to: localhost
       run_once: True
       with_items: "{{ crt_files_exist.results }}"
index 2590803f4a20b7eff3e4ec1324811513151f638b..d1773ca07a19cdf75973b81a673a1b8803b0b886 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: red hat based systems tasks
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
   block:
     - name: set_fact common pkgs and repos
       set_fact:
@@ -31,7 +31,7 @@
       block:
         - name: ceph-iscsi dependency repositories
           get_url:
-            url: 'https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo'
+            url: "https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
             dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
             force: true
           register: result
@@ -40,7 +40,7 @@
 
         - name: ceph-iscsi development repository
           get_url:
-            url: 'https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo'
+            url: "https://shaman.ceph.com/api/repos/{{ item }}/master/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/repo"
             dest: '/etc/yum.repos.d/{{ item }}-dev.repo'
             force: true
           register: result
@@ -50,7 +50,7 @@
 
         - name: ceph-iscsi stable repository
           get_url:
-            url: 'https://download.ceph.com/ceph-iscsi/{{ "3" if use_new_ceph_iscsi | bool else "2" }}/rpm/el{{ ansible_distribution_major_version }}/ceph-iscsi.repo'
+            url: "https://download.ceph.com/ceph-iscsi/{{ '3' if use_new_ceph_iscsi | bool else '2' }}/rpm/el{{ ansible_facts['distribution_major_version'] }}/ceph-iscsi.repo"
             dest: /etc/yum.repos.d/ceph-iscsi.repo
             force: true
           register: result
index 0b443ea9a4f703459bde021e3eb118a68b19225a..e249d02eb8bc7d56a8d44b6483413e6f62ba9999 100644 (file)
@@ -19,13 +19,13 @@ copy_admin_key: false
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mds_docker_extra_env' variable.
-ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mds_docker_cpu_limit: 4
 
 # we currently for MDS_NAME to hostname because of a bug in ceph-docker
 # fix here: https://github.com/ceph/ceph-docker/pull/770
 # this will go away soon.
-ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
+ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
 ceph_config_keys: [] # DON'T TOUCH ME
 
 
index aecc0cb527157fc2f8cfb6ee4e80c395b6c2e12c..83ba661112ec969378399f7dd79335caeea22bc3 100644 (file)
@@ -8,7 +8,7 @@
     mode: "{{ ceph_directories_mode }}"
   with_items:
     - /var/lib/ceph/bootstrap-mds/
-    - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}
+    - /var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}
 
 - name: get keys from monitors
   ceph_key:
index 08c4b0e790e3e44cec9abb34969c353a35d67641..8919366ba234e0750f1fc3772a5f2d7c562be328 100644 (file)
@@ -4,14 +4,14 @@
 
 - name: systemd start mds container
   systemd:
-    name: ceph-mds@{{ ansible_hostname }}
+    name: ceph-mds@{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
     daemon_reload: yes
 
 - name: wait for mds socket to exist
-  command: "{{ container_binary }} exec ceph-mds-{{ ansible_hostname }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_hostname }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_fqdn }}.asok'"
+  command: "{{ container_binary }} exec ceph-mds-{{ ansible_facts['hostname'] }} sh -c 'stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['hostname'] }}.asok || stat /var/run/ceph/{{ cluster }}-mds.{{ ansible_facts['fqdn'] }}.asok'"
   changed_when: false
   register: multi_mds_socket
   retries: 5
index 41e63150b47a02e411aeae4044e81d3e665f607b..ad744ed30341eb9c60b417fe63442be77e2dde1c 100644 (file)
@@ -3,10 +3,10 @@
   apt:
     name: ceph-mds
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else '' }}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   when:
     - mds_group_name in group_names
-    - ansible_os_family == 'Debian'
+    - ansible_facts['os_family'] == 'Debian'
   register: result
   until: result is succeeded
 
   until: result is succeeded
   when:
     - mds_group_name in group_names
-    - ansible_os_family in ['Suse', 'RedHat']
+    - ansible_facts['os_family'] in ['Suse', 'RedHat']
 
 - name: create mds keyring
   ceph_key:
-    name: "mds.{{ ansible_hostname }}"
+    name: "mds.{{ ansible_facts['hostname'] }}"
     cluster: "{{ cluster }}"
     user: client.bootstrap-mds
     user_key: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring"
@@ -30,7 +30,7 @@
       mon: "allow profile mds"
       mds: "allow"
       osd: "allow rwx"
-    dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}/keyring"
+    dest: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
     import_key: false
     owner: ceph
     group: ceph
@@ -43,7 +43,7 @@
     path: "/etc/systemd/system/ceph-mds@.service.d/"
   when:
     - ceph_mds_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mds systemd service overrides
   config_template:
     config_type: "ini"
   when:
     - ceph_mds_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: start and add that the metadata service to the init sequence
   service:
-    name: ceph-mds@{{ ansible_hostname }}
+    name: ceph-mds@{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index 6d60fd25af3962a0e63c7ce22462147124ceb2e6..27dc48e875b679d35ebaa69fdf308f60511bc3b9 100644 (file)
@@ -6,17 +6,17 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_mds_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_mds_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_mds_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_mds_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mds-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mds-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -32,12 +32,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=MDS \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_mds_docker_extra_env }} \
-  --name=ceph-mds-{{ ansible_hostname }} \
+  --name=ceph-mds-{{ ansible_facts['hostname'] }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mds-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index 1165be45529b4a64191e16b1f96f0b1c226293b9..2f559dec4fd2ffaf21dacbd3d989a1355c56c1dd 100644 (file)
@@ -33,7 +33,7 @@ ceph_mgr_packages:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
-ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mgr_docker_cpu_limit: 1
 
 ceph_mgr_docker_extra_env:
index 255c8bf5fef9dea7b3532f1da3c1cdae98e67ae0..a0fec866fd057fa05df68e7c2c1d62273b45af83 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: create mgr directory
   file:
-    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}
+    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}
     state: directory
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
@@ -9,7 +9,7 @@
 
 - name: fetch ceph mgr keyring
   ceph_key:
-    name: "mgr.{{ ansible_hostname }}"
+    name: "mgr.{{ ansible_facts['hostname'] }}"
     caps:
       mon: allow profile mgr
       osd: allow *
@@ -19,7 +19,7 @@
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     mode: "0400"
-    dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring"
+    dest: "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring"
   environment:
     CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
     CEPH_CONTAINER_BINARY: "{{ container_binary }}"
@@ -30,7 +30,7 @@
   block:
     - name: create ceph mgr keyring(s) on a mon node
       ceph_key:
-        name: "mgr.{{ hostvars[item]['ansible_hostname'] }}"
+        name: "mgr.{{ hostvars[item]['ansible_facts']['hostname'] }}"
         caps:
           mon: allow profile mgr
           osd: allow *
@@ -51,7 +51,7 @@
       set_fact:
         _mgr_keys:
           - { 'name': 'client.admin', 'path': "/etc/ceph/{{ cluster }}.client.admin.keyring", 'copy_key': copy_admin_key }
-          - { 'name': "mgr.{{ ansible_hostname }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring", 'copy_key': true }
+          - { 'name': "mgr.{{ ansible_facts['hostname'] }}", 'path': "/var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring", 'copy_key': true }
 
     - name: get keys from monitors
       ceph_key:
@@ -84,7 +84,7 @@
 
 - name: set mgr key permissions
   file:
-    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_hostname }}/keyring
+    path: /var/lib/ceph/mgr/{{ cluster }}-{{ ansible_facts['hostname'] }}/keyring
     owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
     mode: "{{ ceph_keyring_permissions }}"
index 649e34675d298aee7d8e29d7556834f235f3486f..c67c0833f6d0804bf4d39e4e4b1c3f907a50adb6 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
index cd4740ae1468c4c8e10d928e780ef39dddf4504e..d479c9eef3f9fa440fc7f376065ba5018171f7f1 100644 (file)
@@ -1,10 +1,10 @@
 ---
 - name: set_fact ceph_mgr_packages for sso
   set_fact:
-    ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_distribution_major_version | int == 8 else 'python-saml']) }}"
+    ceph_mgr_packages: "{{ ceph_mgr_packages | union(['python3-saml' if ansible_facts['distribution_major_version'] | int == 8 else 'python-saml']) }}"
   when:
     - dashboard_enabled | bool
-    - ansible_distribution == 'RedHat'
+    - ansible_facts['distribution'] == 'RedHat'
 
 - name: set_fact ceph_mgr_packages for dashboard
   set_fact:
@@ -15,8 +15,8 @@
   set_fact:
     ceph_mgr_packages: "{{ ceph_mgr_packages | union(['ceph-mgr-diskprediction-local']) }}"
   when:
-    - ansible_os_family != 'RedHat'
-    - ansible_distribution_major_version | int != 7
+    - ansible_facts['os_family'] != 'RedHat'
+    - ansible_facts['distribution_major_version'] | int != 7
 
 - name: install ceph-mgr packages on RedHat or SUSE
   package:
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
   register: result
   until: result is succeeded
-  when: ansible_os_family in ['RedHat', 'Suse']
+  when: ansible_facts['os_family'] in ['RedHat', 'Suse']
 
 - name: install ceph-mgr packages for debian
   apt:
     name: '{{ ceph_mgr_packages }}'
     state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}"
-    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_distribution_release ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
+    default_release: "{{ ceph_stable_release_uca | default('') if ceph_origin == 'repository' and ceph_repository == 'uca' else ''}}{{ ansible_facts['distribution_release'] ~ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports | bool else '' }}"
   register: result
   until: result is succeeded
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
index 348a718dde467a0d312381bfe86f206bac56daf9..658ca975803f5b5ec8fbe0e61dcb3b0858c6cba5 100644 (file)
@@ -5,7 +5,7 @@
     path: "/etc/systemd/system/ceph-mgr@.service.d/"
   when:
     - ceph_mgr_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mgr systemd service overrides
   config_template:
@@ -15,7 +15,7 @@
     config_type: "ini"
   when:
     - ceph_mgr_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: include_tasks systemd.yml
   include_tasks: systemd.yml
@@ -23,7 +23,7 @@
 
 - name: systemd start mgr
   systemd:
-    name: ceph-mgr@{{ ansible_hostname }}
+    name: ceph-mgr@{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index 5e4874837e9359e892c3570a39c281f96dac57cf..5f7f9d9ce890626b35ec39b9fc63b578bb0d1953 100644 (file)
@@ -11,11 +11,11 @@ After=network.target
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-mgr-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-mgr-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -31,12 +31,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=MGR \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_mgr_docker_extra_env }} \
-  --name=ceph-mgr-{{ ansible_hostname }} \
+  --name=ceph-mgr-{{ ansible_facts['hostname'] }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-mgr-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index ac362b41243f75701d1c729d305ef116b280224c..636bc4506c3e5dad9e4ecca84b16864a4ac17f01 100644 (file)
@@ -37,7 +37,7 @@ client_admin_ceph_authtool_cap:
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_mon_docker_extra_env' variable.
-ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_mon_docker_cpu_limit: 1
 ceph_mon_container_listen_port: 3300
 
index 7f67b455875de23db564b960ba12bed2c13ef68f..02a694f4e11a0b1144281ede1c206c5d57c326f1 100644 (file)
@@ -4,7 +4,7 @@
     {{ container_exec_cmd }}
     ceph
     --cluster {{ cluster }}
-    daemon mon.{{ ansible_hostname }}
+    daemon mon.{{ ansible_facts['hostname'] }}
     mon_status
     --format json
   register: ceph_health_raw
index 2bc52416465186cc310b392701df9810eac87f40..982bde22682cec74359caf2932d304ea95b5de3e 100644 (file)
@@ -7,7 +7,7 @@
         name: mon.
         cluster: "{{ cluster }}"
         user: mon.
-        user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_hostname'] }}/keyring"
+        user_key: "/var/lib/ceph/mon/{{ cluster }}-{{ hostvars[running_mon]['ansible_facts']['hostname'] }}/keyring"
         output_format: json
         state: info
       environment:
index 4aa692506d3a0d122ed6d4fa99569431bcbee5ae..848b744f279c96cf4320de5a7da2c883a44042ae 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: include deploy_monitors.yml
index 72fd5538fa310f1e379b03d313a35d875b66d4d2..8c3b14314f5ba721894de2b17d8baa3361ba14c6 100644 (file)
@@ -6,7 +6,7 @@
   when:
     - not containerized_deployment | bool
     - ceph_mon_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-mon systemd service overrides
   config_template:
@@ -17,7 +17,7 @@
   when:
     - not containerized_deployment | bool
     - ceph_mon_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: include_tasks systemd.yml
   include_tasks: systemd.yml
@@ -25,7 +25,7 @@
 
 - name: start the monitor service
   systemd:
-    name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_hostname }}
+    name: ceph-mon@{{ monitor_name if not containerized_deployment | bool else ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index e759a76c035e42358f456d7d47d35cf44f5fbe9c..eed365170de75e7a9c396796ab7a929faef51cd0 100644 (file)
@@ -28,7 +28,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name ceph-mon-%i \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /etc/localtime:/etc/localtime:ro \
   -v /var/log/ceph:/var/log/ceph:z \
-{% if ansible_distribution == 'RedHat' -%}
+{% if ansible_facts['distribution'] == 'RedHat' -%}
   -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
 {% endif -%}
 {% if mon_docker_privileged | bool -%}
index 2d07985d1ff2ac41e98c3ff044a7a0266f93351b..7bda075dea7a5d3059b836a9ba0143b306acab9f 100644 (file)
@@ -17,7 +17,7 @@ copy_admin_key: false
 ceph_nfs_enable_service: true
 
 # ceph-nfs systemd service uses ansible's hostname as an instance id,
-# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
+# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
 # ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
 # such case it's better to have constant instance id instead which
 # can be set by 'ceph_nfs_service_suffix'
@@ -74,7 +74,7 @@ ceph_nfs_rgw_sectype: "sys,krb5,krb5i,krb5p"
 # they must be configered.
 #ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
 #ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-rgw_client_name: client.rgw.{{ ansible_hostname }}
+rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
 
 ###################
 # CONFIG OVERRIDE #
index 09ccf43290df8ed52df40c511af4b97f221caaf0..bab8422fa0748f4a3cb53ee4905170900726d034 100644 (file)
@@ -1,7 +1,7 @@
 ---
 - name: set_fact container_exec_cmd_nfs
   set_fact:
-    container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
+    container_exec_cmd_nfs: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
   when: containerized_deployment | bool
 
 - name: create rgw nfs user "{{ ceph_nfs_rgw_user }}"
index f739b32d7b0a1978a259b206657aff53eb4c25fe..eb51e526b2f8f63282e3e0685697dc7583216354 100644 (file)
@@ -15,7 +15,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_distribution_major_version == '7'
+      when: ansible_facts['distribution_major_version'] == '7'
 
     - name: install nfs-ganesha-selinux and python3-policycoreutils on RHEL 8
       package:
@@ -23,7 +23,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_distribution_major_version == '8'
+      when: ansible_facts['distribution_major_version'] == '8'
 
     - name: add ganesha_t to permissive domain
       selinux_permissive:
index 8f336057e6dc2bb08952cf160617dc855b25bfab..b4e964236b7eef04b9d872a189c70c4e1c688101 100644 (file)
@@ -2,7 +2,7 @@
 # global/common requirement
 - name: stop nfs server service
   systemd:
-    name: "{{ 'nfs-server' if ansible_os_family == 'RedHat' else 'nfsserver' if ansible_os_family == 'Suse' else 'nfs-kernel-server' if ansible_os_family == 'Debian' }}"
+    name: "{{ 'nfs-server' if ansible_facts['os_family'] == 'RedHat' else 'nfsserver' if ansible_facts['os_family'] == 'Suse' else 'nfs-kernel-server' if ansible_facts['os_family'] == 'Debian' }}"
     state: stopped
     enabled: no
   failed_when: false
@@ -24,7 +24,7 @@
   import_tasks: ganesha_selinux_fix.yml
   when:
     - not containerized_deployment | bool
-    - ansible_os_family == 'RedHat'
+    - ansible_facts['os_family'] == 'RedHat'
 
 - name: nfs with external ceph cluster task related
   when:
@@ -40,7 +40,7 @@
         mode: "0755"
       with_items:
         - "{{ ceph_nfs_ceph_user }}"
-        - "{{ ansible_hostname }}"
+        - "{{ ansible_facts['hostname'] }}"
 
     - name: set_fact rgw_client_name
       set_fact:
@@ -55,7 +55,7 @@
         group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
       with_nested:
         - "{{ hostvars[groups['_filtered_clients'][0]]['slurp_client_keys']['results'] | default([]) }}"
-        - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring']
+        - ['/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ceph_nfs_ceph_user }}/keyring', "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"]
       when:
         - not item.0.get('skipped', False)
         - item.0.item.name == 'client.' + ceph_nfs_ceph_user or item.0.item.name == rgw_client_name
index e6730080a1094b3e2a0f818ef456117f7673d625..177909d04f05f04d9182afbcc3fc0759ba65e4b3 100644 (file)
@@ -3,7 +3,7 @@
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
       with_items: "{{ groups.get(mon_group_name, []) }}"
       delegate_to: "{{ item }}"
       delegate_facts: true
index 727fccdcdae5dd2985a42c7b75d6fabeb5d669bf..862aaac1b4865b001e1d3dae01a9a0cdc3745405 100644 (file)
@@ -1,11 +1,11 @@
 ---
 - name: include red hat based system related tasks
   include_tasks: pre_requisite_non_container_red_hat.yml
-  when: ansible_os_family == 'RedHat'
+  when: ansible_facts['os_family'] == 'RedHat'
 
 - name: include debian based system related tasks
   include_tasks: pre_requisite_non_container_debian.yml
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
 
 - name: install nfs rgw/cephfs gateway - SUSE/openSUSE
   zypper:
@@ -18,7 +18,7 @@
   when:
     - (ceph_origin == 'repository' or ceph_origin == 'distro')
     - ceph_repository != 'rhcs'
-    - ansible_os_family == 'Suse'
+    - ansible_facts['os_family'] == 'Suse'
     - item.install | bool
   register: result
   until: result is succeeded
@@ -35,7 +35,7 @@
   with_items:
     - { name: "/var/lib/ceph/bootstrap-rgw", create: "{{ nfs_obj_gw }}" }
     - { name: "/var/lib/ceph/radosgw", create: "{{ nfs_obj_gw }}" }
-    - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}", create: "{{ nfs_obj_gw }}" }
+    - { name: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}", create: "{{ nfs_obj_gw }}" }
     - { name: "{{ rbd_client_admin_socket_path }}", create: "{{ nfs_obj_gw }}" }
     - { name: "/var/log/ceph", create: true }
     - { name: "/var/log/ganesha", create: true, owner: root, group: root }
       block:
         - name: create rados gateway keyring
           ceph_key:
-            name: "client.rgw.{{ ansible_hostname }}"
+            name: "client.rgw.{{ ansible_facts['hostname'] }}"
             cluster: "{{ cluster }}"
             user: client.bootstrap-rgw
             user_key: "/var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring"
             caps:
               mon: "allow rw"
               osd: "allow rwx"
-            dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}/keyring"
+            dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}/keyring"
             import_key: false
             owner: ceph
             group: ceph
index a679b5c673c4f70af649435c843414027df0e8ee..403971aa23d9b67858c0e4ee54ccea8cddc84b28 100644 (file)
@@ -9,7 +9,7 @@
       block:
         - name: add nfs-ganesha stable repository
           apt_repository:
-            repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_distribution_release) }} main"
+            repo: "deb {{ nfs_ganesha_stable_deb_repo }} {{ ceph_stable_distro_source | default(ansible_facts['distribution_release']) }} main"
             state: present
             update_cache: no
           register: add_ganesha_apt_repo
@@ -30,7 +30,7 @@
       block:
         - name: fetch nfs-ganesha development repository
           uri:
-            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_release }}/flavors/{{ nfs_ganesha_flavor }}/repo
+            url: https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_release'] }}/flavors/{{ nfs_ganesha_flavor }}/repo
             return_content: yes
           register: nfs_ganesha_dev_apt_repo
 
index 341f3961f3b3ca6d9fcba979cf210318d0bac477..0571ac7290e9fa7bc21cf179aaab9885e895ee58 100644 (file)
@@ -31,7 +31,7 @@
       block:
         - name: add nfs-ganesha dev repo
           get_url:
-            url: 'https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/flavors/{{ nfs_ganesha_flavor }}/repo'
+            url: "https://shaman.ceph.com/api/repos/nfs-ganesha/next/latest/{{ ansible_facts['distribution'] | lower }}/{{ ansible_facts['distribution_major_version'] }}/flavors/{{ nfs_ganesha_flavor }}/repo"
             dest: /etc/yum.repos.d/nfs-ganesha-dev.repo
             force: true
       when:
index d3e4ac1f8fdd1e79435649b14cb1c0990e1fc4c7..c7b304eb8ba8e0b0cc8a6dab18e6e0ee64da8959 100644 (file)
@@ -8,7 +8,7 @@
 
   - name: set_fact container_exec_cmd_nfs - internal
     set_fact:
-      exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }} rados"
+      exec_cmd_nfs: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }} rados"
       delegate_node: "{{ groups[mon_group_name][0] }}"
     when: groups.get(mon_group_name, []) | length > 0
 
@@ -72,7 +72,7 @@
 
 - name: systemd start nfs container
   systemd:
-    name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
+    name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }}
     state: started
     enabled: yes
     masked: no
index e9a38c98b3de7798bd2b95ae051890bf941d81ea..13d7943e0fcdfbc66e7dc2dc64c76a716518bde0 100644 (file)
@@ -15,7 +15,7 @@ ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
 ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-nfs-%i
 {% endif %}
 ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-nfs-%i
-ExecStartPre={{ '/bin/mkdir' if ansible_os_family == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
+ExecStartPre={{ '/bin/mkdir' if ansible_facts['os_family'] == 'Debian' else '/usr/bin/mkdir' }} -p /etc/ceph /etc/ganesha /var/lib/nfs/ganesha /var/log/ganesha
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -36,7 +36,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CEPH_DAEMON=NFS \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
   {{ ceph_nfs_docker_extra_env }} \
-  --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_hostname) }} \
+  --name=ceph-nfs-{{ ceph_nfs_service_suffix | default(ansible_facts['hostname']) }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index a5cf3a31240a48210f524e531ef5166b346c66da..db21e623eb473a8e08b77fb04fcc77ad42baebd7 100644 (file)
@@ -161,7 +161,7 @@ ceph_config_keys: [] # DON'T TOUCH ME
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_osd_docker_extra_env' variable.
-ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_osd_docker_cpu_limit: 4
 
 # The next two variables are undefined, and thus, unused by default.
index 575318b02dd83065cd72ffca827755894c170b14..c224ae6b64cf3028fc00cb924608ba8653c82e7d 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: set_fact container_exec_cmd
   set_fact:
-    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_hostname'] }}"
+    container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[item]['ansible_facts']['hostname'] }}"
   with_items: "{{ groups.get(mon_group_name, []) }}"
   delegate_to: "{{ item }}"
   delegate_facts: true
@@ -23,7 +23,7 @@
   until: result is succeeded
   when:
     - not containerized_deployment | bool
-    - ansible_os_family != 'ClearLinux'
+    - ansible_facts['os_family'] != 'ClearLinux'
 
 - name: install numactl when needed
   package:
index c1e9100deeb9f60e25c3e1a68895ae12889c8c42..30417976ced2e146fc3dd2a431800510b04e5817 100644 (file)
@@ -27,7 +27,7 @@
     path: "/etc/systemd/system/ceph-osd@.service.d/"
   when:
     - ceph_osd_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-osd systemd service overrides
   config_template:
@@ -37,7 +37,7 @@
     config_type: "ini"
   when:
     - ceph_osd_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: ensure "/var/lib/ceph/osd/{{ cluster }}-{{ item }}" is present
   file:
index 78d8d2dbdd366c4c3f63a60fc8afd05494c26808..f92f19a459f0fd3a5e76c608eb641f7975e1088d 100644 (file)
@@ -2,7 +2,7 @@
 - name: debian based systems tasks
   when:
     - osd_objectstore == 'filestore'
-    - ansible_os_family == "Debian"
+    - ansible_facts['os_family'] == "Debian"
   block:
     - name: disable osd directory parsing by updatedb
       command: updatedb -e /var/lib/ceph
@@ -43,7 +43,7 @@
 
 - name: set_fact vm_min_free_kbytes
   set_fact:
-    vm_min_free_kbytes: "{{ 4194303 if ansible_memtotal_mb >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
+    vm_min_free_kbytes: "{{ 4194303 if ansible_facts['memtotal_mb'] >= 49152 else default_vm_min_free_kbytes.content | b64decode | trim }}"
 
 - name: apply operating system tuning
   sysctl:
index 32d78a5668b2c03e1ef942311f26e30a04684cb2..216db6b17c10366637614ab952bd8d2b7c0df3fa 100644 (file)
@@ -7,7 +7,7 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_osd_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_osd_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_osd_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_osd_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=-/etc/environment
@@ -48,7 +48,7 @@ numactl \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /var/run/udev/:/var/run/udev/ \
   -v /var/log/ceph:/var/log/ceph:z \
-  {% if ansible_distribution == 'Ubuntu' -%}
+  {% if ansible_facts['distribution'] == 'Ubuntu' -%}
   --security-opt apparmor:unconfined \
   {% endif -%}
   {{ container_env_args }} \
index 6aba7709eb875127b55bdbca49657e64f9a2bf7c..28280c3d683f5b378594ed47b7f07fa4bd07bcbf 100644 (file)
@@ -35,7 +35,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name=alertmanager \
   --cluster.peer={{ peer }}:{{ alertmanager_cluster_port }} \
 {% endfor %}
   --storage.path=/alertmanager \
-  --web.external-url=http://{{ ansible_fqdn }}:{{ alertmanager_port }}/ \
+  --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ alertmanager_port }}/ \
   --web.listen-address={{ grafana_server_addr }}:{{ alertmanager_port }}
 {% if container_binary == 'podman' %}
 ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index c2600acec42529542e78d87c756f826379132376..2ca58f417e7440f4ac7e6c79395a1266f64067c6 100644 (file)
@@ -11,5 +11,5 @@ receivers:
 - name: 'ceph-dashboard'
   webhook_configs:
 {% for host in groups['mgrs'] | default(groups['mons']) %}
-  - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
+  - url: '{{ dashboard_protocol }}://{{ hostvars[host]['ansible_facts']['fqdn'] }}:{{ dashboard_port }}/api/prometheus_receiver'
 {% endfor %}
index 3a17da6f01647a83b4dd26050147de8fd65aaa98..601f19a1db9b4f85749c14d2a2f4f332142bd922 100644 (file)
@@ -31,7 +31,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --name=prometheus \
   {{ prometheus_container_image }} \
   --config.file=/etc/prometheus/prometheus.yml \
   --storage.tsdb.path=/prometheus \
-  --web.external-url=http://{{ ansible_fqdn }}:{{ prometheus_port }}/ \
+  --web.external-url=http://{{ ansible_facts['fqdn'] }}:{{ prometheus_port }}/ \
   --web.listen-address={{ grafana_server_addr }}:{{ prometheus_port }}
 {% if container_binary == 'podman' %}
 ExecStop=/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
index 0334dd7ef586762291a0004798c17ca033675987..c20664c36f32368428808b2b6928041ee389ff2d 100644 (file)
@@ -22,14 +22,14 @@ scrape_configs:
 {% for host in (groups['all'] | difference(groups[monitoring_group_name] | union(groups.get(client_group_name, [])))) %}
       - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
   - job_name: 'grafana'
     static_configs:
 {% for host in groups[monitoring_group_name] %}
       - targets: ['{{ host }}:{{ node_exporter_port }}']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
 {% if iscsi_gw_group_name in groups %}
   - job_name: 'iscsi-gws'
@@ -37,7 +37,7 @@ scrape_configs:
 {% for host in groups[iscsi_gw_group_name] %}
       - targets: ['{{ host }}:9287']
         labels:
-          instance: "{{ hostvars[host]['ansible_nodename'] }}"
+          instance: "{{ hostvars[host]['ansible_facts']['nodename'] }}"
 {% endfor %}
 {% endif %}
 alerting:
index 75af947ff920ee5205c25a7a1db6b8d17bc7dc1e..a99f2ba8a8eaae94190d8d850e2379d84f13c1e8 100644 (file)
@@ -42,7 +42,7 @@ ceph_rbd_mirror_remote_user: ""
 # For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
 # Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
 # These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
-ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
+ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
 ceph_rbd_mirror_docker_cpu_limit: 1
 
 ceph_rbd_mirror_docker_extra_env:
index 4eb7724eec4862c2dafd30c969ad16f710c5b7e1..14f5284d2fa1414ad94b117e7363ddc08529214d 100644 (file)
 
 - name: create rbd-mirror keyring
   ceph_key:
-    name: "client.rbd-mirror.{{ ansible_hostname }}"
+    name: "client.rbd-mirror.{{ ansible_facts['hostname'] }}"
     cluster: "{{ cluster }}"
     user: client.bootstrap-rbd-mirror
     user_key: "/var/lib/ceph/bootstrap-rbd-mirror/{{ cluster }}.keyring"
     caps:
       mon: "profile rbd-mirror"
       osd: "profile rbd"
-    dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring"
+    dest: "/etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring"
     import_key: false
     owner: ceph
     group: ceph
index 4da988c1bd02c323efc4b77e771d09c182615eb7..07a1384fd1bc930e4c49c2eb44a7129469cec58d 100644 (file)
@@ -1,14 +1,14 @@
 ---
 - name: enable mirroring on the pool
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool enable {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_mode }}"
   changed_when: false
 
 - name: list mirroring peer
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool info {{ ceph_rbd_mirror_pool }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool info {{ ceph_rbd_mirror_pool }}"
   changed_when: false
   register: mirror_peer
 
 - name: add a mirroring peer
-  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_hostname }}.keyring --name client.rbd-mirror.{{ ansible_hostname }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
+  command: "{{ container_exec_cmd | default('') }} rbd --cluster {{ cluster }} --keyring /etc/ceph/{{ cluster }}.client.rbd-mirror.{{ ansible_facts['hostname'] }}.keyring --name client.rbd-mirror.{{ ansible_facts['hostname'] }} mirror pool peer add {{ ceph_rbd_mirror_pool }} {{ ceph_rbd_mirror_remote_user }}@{{ ceph_rbd_mirror_remote_cluster }}"
   changed_when: false
   when: ceph_rbd_mirror_remote_user in mirror_peer.stdout
index 1544c17b70eb5a3a624b8e794b9b01b397cf8330..98a6404ae0fd81265e3dddabb9de4074ce4be9b3 100644 (file)
@@ -16,7 +16,7 @@
   block:
     - name: set_fact container_exec_cmd
       set_fact:
-        container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_hostname }}"
+        container_exec_cmd: "{{ container_binary }} exec ceph-rbd-mirror-{{ ansible_facts['hostname'] }}"
 
     - name: include start_container_rbd_mirror.yml
       include_tasks: start_container_rbd_mirror.yml
index cebeccff422df08ff5556c5709f579d096436731..c1ac8cd37c34fb4ba580d13fcc57026039faaf98 100644 (file)
@@ -5,7 +5,7 @@
 
 - name: systemd start rbd mirror container
   systemd:
-    name: ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}
+    name: ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}
     state: started
     enabled: yes
     masked: no
index 70140c5d138634f44cf19a7d5f9d6e155dd250bd..cbc32748b6e3abe7c8e6fef99fc40ba58d28345b 100644 (file)
@@ -5,7 +5,7 @@
     path: "/etc/systemd/system/ceph-rbd-mirror@.service.d/"
   when:
     - ceph_rbd_mirror_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: add ceph-rbd-mirror systemd service overrides
   config_template:
@@ -15,7 +15,7 @@
     config_type: "ini"
   when:
     - ceph_rbd_mirror_systemd_overrides is defined
-    - ansible_service_mgr == 'systemd'
+    - ansible_facts['service_mgr'] == 'systemd'
 
 - name: stop and remove the generic rbd-mirror service instance
   service:
@@ -34,7 +34,7 @@
 
 - name: start and add the rbd-mirror service instance
   service:
-    name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
+    name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
     state: started
     enabled: yes
     masked: no
index 830d29ae245bfdc73487cbfd98a8f628ab7e50ef..8dd83bd8eafb55a988592cb80fbef14e4fbd31fd 100644 (file)
@@ -11,11 +11,11 @@ After=network.target
 EnvironmentFile=-/etc/environment
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -30,13 +30,13 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -e CLUSTER={{ cluster }} \
   -e CEPH_DAEMON=RBD_MIRROR \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-  --name=ceph-rbd-mirror-{{ ansible_hostname }} \
+  --name=ceph-rbd-mirror-{{ ansible_facts['hostname'] }} \
   {{ ceph_rbd_mirror_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_hostname }}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rbd-mirror-{{ ansible_facts['hostname'] }}
 {% endif %}
 KillMode=none
 Restart=always
index 65bd791869e142a793cb953598e760975534807e..8f448160d550a4378aed41680b36e445e4a22852 100644 (file)
@@ -46,6 +46,6 @@ backend rgw-backend
     option httpchk HEAD /
 {% for host in groups[rgw_group_name] %}
 {% for instance in hostvars[host]['rgw_instances'] %}
-       server {{ 'server-' + hostvars[host]['ansible_hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
+       server {{ 'server-' + hostvars[host]['ansible_facts']['hostname'] + '-' + instance['instance_name'] }} {{ instance['radosgw_address'] }}:{{ instance['radosgw_frontend_port'] }} weight 100 check
 {% endfor %}
 {% endfor %}
index f4bab87c15ce7c0958b37d29a314066df3b49ddb..5d993b360e6d14c1effa178652c2d39ae93faa77 100644 (file)
@@ -1,6 +1,6 @@
 ---
 - name: restart rgw
   service:
-    name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     state: restarted
   with_items: "{{ rgw_instances }}"
index 3af8405da5634a3cc320c56ce764aa3d8e3291ea..acfe50bb4e097be3f233a9da71866c6226be088c 100644 (file)
@@ -5,7 +5,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_pkg_mgr == 'yum' or ansible_pkg_mgr == 'dnf'
+  when: ansible_facts['pkg_mgr'] == 'yum' or ansible_facts['pkg_mgr'] == 'dnf'
 
 - name: install libnss3-tools on debian
   package:
@@ -13,7 +13,7 @@
     state: present
   register: result
   until: result is succeeded
-  when: ansible_pkg_mgr == 'apt'
+  when: ansible_facts['pkg_mgr'] == 'apt'
 
 - name: create nss directory for keystone certificates
   file:
index fee9be8387226d061b7a6a90f72348d6e07292bd..c10fbf5e362625fce5578426f5c94cad48238c56 100644 (file)
@@ -1,11 +1,11 @@
 ---
 - name: create rgw keyrings
   ceph_key:
-    name: "client.rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
+    name: "client.rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
     cluster: "{{ cluster }}"
     user: "client.bootstrap-rgw"
     user_key: /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
-    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}/keyring"
+    dest: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}/keyring"
     caps:
       osd: 'allow rwx'
       mon: 'allow rw'
index d0d96314ee6b3e7567ba9382015752d7efe6b723..f4e3296ad16bdf2ea57e794f552149971ed8fbef 100644 (file)
@@ -4,7 +4,7 @@
 
 - name: systemd start rgw container
   systemd:
-    name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+    name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
     state: started
     enabled: yes
     masked: no
index 1431d988b4a133f164b117e7d1ed1ff90d790894..564540a86dd61c41af383ba9d6ee5338244f5633 100644 (file)
@@ -15,7 +15,7 @@
 
 - name: start rgw instance
   service:
-    name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
+    name: ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}
     state: started
     enabled: yes
     masked: no
index 54bd5b0c10d1071df1238f9e535477509f51945f..d0f8d5358ed08f907840af610ce9c633138b223b 100644 (file)
@@ -6,17 +6,17 @@ Requires=docker.service
 {% else %}
 After=network.target
 {% endif %}
-{% set cpu_limit = ansible_processor_vcpus|int if ceph_rgw_docker_cpu_limit|int > ansible_processor_vcpus|int else ceph_rgw_docker_cpu_limit|int %}
+{% set cpu_limit = ansible_facts['processor_vcpus']|int if ceph_rgw_docker_cpu_limit|int > ansible_facts['processor_vcpus']|int else ceph_rgw_docker_cpu_limit|int %}
 
 [Service]
 EnvironmentFile=/var/lib/ceph/radosgw/{{ cluster }}-%i/EnvironmentFile
 {% if container_binary == 'podman' %}
 ExecStartPre=-/usr/bin/rm -f /%t/%n-pid /%t/%n-cid
-ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm --storage ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% else %}
-ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% endif %}
-ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStartPre=-/usr/bin/{{ container_binary }} rm ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
 {% if container_binary == 'podman' %}
   -d --log-driver journald --conmon-pidfile /%t/%n-pid --cidfile /%t/%n-cid \
@@ -34,7 +34,7 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   -v /var/run/ceph:/var/run/ceph:z \
   -v /etc/localtime:/etc/localtime:ro \
   -v /var/log/ceph:/var/log/ceph:z \
-  {% if ansible_distribution == 'RedHat' -%}
+  {% if ansible_facts['distribution'] == 'RedHat' -%}
   -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:z \
   {% endif -%}
   {% if radosgw_frontend_ssl_certificate -%}
@@ -42,15 +42,15 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \
   {% endif -%}
   -e CEPH_DAEMON=RGW \
   -e CLUSTER={{ cluster }} \
-  -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \
+  -e RGW_NAME={{ ansible_facts['hostname'] }}.${INST_NAME} \
   -e CONTAINER_IMAGE={{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
-  --name=ceph-rgw-{{ ansible_hostname }}-${INST_NAME} \
+  --name=ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME} \
   {{ ceph_rgw_docker_extra_env }} \
   {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
 {% if container_binary == 'podman' %}
 ExecStop=-/usr/bin/sh -c "/usr/bin/{{ container_binary }} rm -f `cat /%t/%n-cid`"
 {% else %}
-ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_hostname }}-${INST_NAME}
+ExecStopPost=-/usr/bin/{{ container_binary }} stop ceph-rgw-{{ ansible_facts['hostname'] }}-${INST_NAME}
 {% endif %}
 KillMode=none
 Restart=always
index 5f805473cf824a7085d70141b848ff4229556a19..17251127eed777199a8507a6e8d5ecf21cebe9ef 100644 (file)
@@ -2,23 +2,23 @@
 - name: "fail if {{ monitor_interface }} does not exist on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not exist on {{ inventory_hostname }}"
-  when: monitor_interface not in ansible_interfaces
+  when: monitor_interface not in ansible_facts['interfaces']
 
 - name: "fail if {{ monitor_interface }} is not active on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} is not active on {{ inventory_hostname }}"
-  when: not hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['active']
+  when: not hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['active']
 
 - name: "fail if {{ monitor_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv4"
-    - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv4'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv4'] is not defined
 
 - name: "fail if {{ monitor_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ monitor_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv6"
-    - hostvars[inventory_hostname]['ansible_' + (monitor_interface | replace('-', '_'))]['ipv6'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(monitor_interface | replace('-', '_'))]['ipv6'] is not defined
index 73db6078e2a3ee9c54cd3a07853bd2dc3ea4fa26..c2438cf3dd3f40e24e9c2b2bafe2525420574196 100644 (file)
@@ -2,23 +2,23 @@
 - name: "fail if {{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not exist on {{ inventory_hostname }}"
-  when: radosgw_interface not in ansible_interfaces
+  when: radosgw_interface not in ansible_facts['interfaces']
 
 - name: "fail if {{ radosgw_interface }} is not active on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} is not active on {{ inventory_hostname }}"
-  when: hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['active'] == "false"
+  when: hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['active'] == "false"
 
 - name: "fail if {{ radosgw_interface }} does not have any ip v4 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not have any IPv4 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv4"
-    - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv4'] is not defined
 
 - name: "fail if {{ radosgw_interface }} does not have any ip v6 address on {{ inventory_hostname }}"
   fail:
     msg: "{{ radosgw_interface }} does not have any IPv6 address on {{ inventory_hostname }}"
   when:
     - ip_version == "ipv6"
-    - hostvars[inventory_hostname]['ansible_' + (radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
+    - hostvars[inventory_hostname]['ansible_facts'][(radosgw_interface | replace('-', '_'))]['ipv6'] is not defined
index 3bcd462691ec37977deba4d41410dc6d952687ba..734cd69700b572ee64c0214f0fd6659fb9ee6fa9 100644 (file)
@@ -2,4 +2,4 @@
 - name: "fail if {{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
   fail:
     msg: "{{ inventory_hostname }} does not have any {{ ip_version }} address on {{ monitor_address_block }}"
-  when: hostvars[inventory_hostname]['ansible_all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
+  when: hostvars[inventory_hostname]['ansible_facts']['all_' + ip_version + '_addresses'] | ips_in_ranges(hostvars[inventory_hostname]['monitor_address_block'].split(',')) | length == 0
index 0f9a803fff5c3b7462bb58b02f6208951549858c..7bf11b31c081e9ab081652610b4c0ec7a88e7952 100644 (file)
@@ -2,7 +2,7 @@
 - name: fail on unsupported distribution for iscsi gateways
   fail:
     msg: "iSCSI gateways can only be deployed on Red Hat Enterprise Linux, CentOS or Fedora"
-  when: ansible_distribution not in ['RedHat', 'CentOS', 'Fedora']
+  when: ansible_facts['distribution'] not in ['RedHat', 'CentOS', 'Fedora']
 
 - name: make sure gateway_ip_list is configured
   fail:
@@ -31,7 +31,7 @@
     - " '' in client_connections | selectattr('status', 'match', 'present') | map(attribute='chap') | list"
 
 - name: fail on unsupported distribution version for iscsi gateways
-  command: 'grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_kernel }}'
+  command: "grep -q {{ item }}=m {% if is_atomic|bool %}/usr/lib/ostree-boot{% else %}/boot{% endif %}/config-{{ ansible_facts['kernel'] }}"
   register: iscsi_kernel
   changed_when: false
   failed_when: iscsi_kernel.rc != 0
@@ -39,4 +39,4 @@
     - CONFIG_TARGET_CORE
     - CONFIG_TCM_USER2
     - CONFIG_ISCSI_TARGET
-  when: ansible_distribution in ['RedHat', 'CentOS']
+  when: ansible_facts['distribution'] in ['RedHat', 'CentOS']
index 9f5b6ffde5e996e4a258d775c3265e68fe988022..91ef3a20077bf8733f6e17daf3615dfc6085c252 100644 (file)
@@ -12,4 +12,4 @@
     msg: "ceph-nfs packages are not available from openSUSE Leap 15.x repositories (ceph_origin = 'distro')"
   when:
     - ceph_origin == 'distro'
-    - ansible_distribution == 'openSUSE Leap'
+    - ansible_facts['distribution'] == 'openSUSE Leap'
index 6794fd28a854f7be1ba1e9649b6e3214e76a3f23..62b36089b78cbf3dd077fe6dacdb84194f87d899 100644 (file)
 
 - name: fail on unsupported system
   fail:
-    msg: "System not supported {{ ansible_system }}"
-  when: ansible_system not in ['Linux']
+    msg: "System not supported {{ ansible_facts['system'] }}"
+  when: ansible_facts['system'] not in ['Linux']
 
 - name: fail on unsupported architecture
   fail:
-    msg: "Architecture not supported {{ ansible_architecture }}"
-  when: ansible_architecture not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
+    msg: "Architecture not supported {{ ansible_facts['architecture'] }}"
+  when: ansible_facts['architecture'] not in ['x86_64', 'ppc64le', 'armv7l', 'aarch64']
 
 - name: fail on unsupported distribution
   fail:
-    msg: "Distribution not supported {{ ansible_os_family }}"
-  when: ansible_os_family not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
+    msg: "Distribution not supported {{ ansible_facts['os_family'] }}"
+  when: ansible_facts['os_family'] not in ['Debian', 'RedHat', 'ClearLinux', 'Suse']
 
 - name: fail on unsupported CentOS release
   fail:
-    msg: "CentOS release {{ ansible_distribution_major_version }} not supported with dashboard"
+    msg: "CentOS release {{ ansible_facts['distribution_major_version'] }} not supported with dashboard"
   when:
-    - ansible_distribution == 'CentOS'
-    - ansible_distribution_major_version | int == 7
+    - ansible_facts['distribution'] == 'CentOS'
+    - ansible_facts['distribution_major_version'] | int == 7
     - not containerized_deployment | bool
     - dashboard_enabled | bool
 
 - name: red hat based systems tasks
   when:
     - ceph_repository == 'rhcs'
-    - ansible_distribution == 'RedHat'
+    - ansible_facts['distribution'] == 'RedHat'
   block:
     - name: fail on unsupported distribution for red hat ceph storage
       fail:
-        msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Ceph Storage, only RHEL >= 8.2"
-      when: ansible_distribution_version is version('8.2', '<')
+        msg: "Distribution not supported {{ ansible_facts['distribution_version'] }} by Red Hat Ceph Storage, only RHEL >= 8.2"
+      when: ansible_facts['distribution_version'] is version('8.2', '<')
 
     - name: subscription manager related tasks
       when: ceph_repository_type == 'cdn'
 
 - name: fail on unsupported distribution for ubuntu cloud archive
   fail:
-    msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_distribution }}"
+    msg: "Distribution not supported by Ubuntu Cloud Archive: {{ ansible_facts['distribution'] }}"
   when:
     - ceph_repository == 'uca'
-    - ansible_distribution != 'Ubuntu'
+    - ansible_facts['distribution'] != 'Ubuntu'
 
 - name: "fail on unsupported SUSE/openSUSE distribution (only 15.x supported)"
   fail:
-    msg: "Distribution not supported: {{ ansible_distribution }} {{ ansible_distribution_major_version }}"
+    msg: "Distribution not supported: {{ ansible_facts['distribution'] }} {{ ansible_facts['distribution_major_version'] }}"
   when:
-    - ansible_distribution == 'openSUSE Leap' or ansible_distribution == 'SUSE'
-    - ansible_distribution_major_version != '15'
+    - ansible_facts['distribution'] == 'openSUSE Leap' or ansible_facts['distribution'] == 'SUSE'
+    - ansible_facts['distribution_major_version'] != '15'
 
 - name: fail if systemd is not present
   fail:
     msg: "Systemd must be present"
-  when: ansible_service_mgr != 'systemd'
+  when: ansible_facts['service_mgr'] != 'systemd'
index b6d33a690ab2808756e0d93c166e24182f1c627c..c846eee1e8541ca8a37f12a79620295d29fb95b0 100644 (file)
@@ -90,7 +90,7 @@
       with_items: '{{ lvm_volumes }}'
 
 - name: debian based systems tasks
-  when: ansible_os_family == 'Debian'
+  when: ansible_facts['os_family'] == 'Debian'
   block:
     - name: fail if local scenario is enabled on debian
       fail:
 # - ceph_origin == 'distro'
 # - ceph_origin == 'repository' and ceph_repository == 'obs'
 - name: SUSE/openSUSE Leap based system tasks
-  when: ansible_os_family == 'Suse'
+  when: ansible_facts['os_family'] == 'Suse'
   block:
     - name: Check ceph_origin definition on SUSE/openSUSE Leap
       fail:
     msg: installation can't happen on Atomic and ntpd needs to be installed
   when:
     - is_atomic | default(False) | bool
-    - ansible_os_family == 'RedHat'
+    - ansible_facts['os_family'] == 'RedHat'
     - ntp_daemon_type == 'ntpd'
 
 - name: make sure journal_size configured
index 0106654b86cd84658146c4e6b91ea6406073d455..f8644ccb9b404d54330050c729ae0bbb41b71735 100644 (file)
 
     - name: set_fact container_binary
       set_fact:
-        container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_distribution == 'Fedora') or (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8') else 'docker' }}"
+        container_binary: "{{ 'podman' if (podman_binary.stat.exists and ansible_facts['distribution'] == 'Fedora') or (ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] == '8') else 'docker' }}"
 
     - name: get ceph status from the first monitor
       command: >
-        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s
+        {{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }} ceph --cluster {{ cluster }} -s
       register: ceph_status
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
index 59f1b13a6fd5a82f3b7bf0d677431bd073881f90..c88ef61217934e935378e2262873cbdd0492484c 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.55.0/24"
 cluster_network: "192.168.56.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index e77d2a7735c98246ff127a21896139bc356882b1..ef109c9b8b6b20c1fcf9a5b72dd89fbf30191de7 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.53.0/24"
 cluster_network: "192.168.54.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index a4fd033112b0de2d4ea44f298a560e0e64adc21d..e721a4cfc1a6523727cc7f621a57addbe38eee51 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.67.0/24"
 cluster_network: "192.168.68.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 770ca2ab0a2e93046db18ad257a02bbb0c0b9eaa..119d13167eb3a37c5137a072ed5f99346e779f0f 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.65.0/24"
 cluster_network: "192.168.66.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index f04ca8775a59475bcd598d9b27a867a01cd342a6..fc557473e9aa43163c0a25aa8d5e0267029b96cc 100644 (file)
@@ -6,8 +6,8 @@ containerized_deployment: true
 cluster: ceph
 public_network: "192.168.71.0/24"
 cluster_network: "192.168.72.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 829ae401519fb3397edf4de9bc1a992d15077a88..e7326ec8ec0077bfcf826198237adc6aad509423 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.69.0/24"
 cluster_network: "192.168.70.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index eb58e03278cfaa732b5e2a97b2922d7fee536ff0..aa354409c4a5fc2e8275a55dcb150be952f4ed7a 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.19.0/24"
index 0eef250985ef69e33053b57c4f935f672d5d3bde..f50a1bba030e30b278d10b8abcd6bc5e214b69e1 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 openstack_config: True
 dashboard_enabled: False
index d3fb25ed9177ea867812a17e9a1e28dfca2eb5d0..1904765fa0819d9848fcecd18e203107b1f47408 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.17.0/24"
 cluster_network: "192.168.18.0/24"
index cee1376ddb8540d8fbc956f3708a1903346ddd55..7fcf472584e53753e65f705750945b353d36f78e 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.17.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.17.12
 
 [mgrs]
index ae3b46b05df67c1b4908856f4266b290f9b6d1f9..16fde8399a8de469c7a4ee1b2c7f2a90f9c1f39d 100644 (file)
@@ -3,7 +3,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.1.0/24"
 cluster_network: "192.168.2.0/24"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_conf_overrides:
   global:
     mon_allow_pool_size_one: true
index ace86a0e9c3733628ef0b32f323a3672138840c0..4fe0c97cea471842d55f9339812154cc2e05787c 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [mgrs]
index 047d0fb8272b52525ce04479cc301694eb68bdfb..fabb659f0bc76283b6507fbbfa46ca280a2f2436 100644 (file)
@@ -3,7 +3,7 @@ docker=True
 
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [mgrs]
index 9b5c3365d8fb57d17dc58fa56fe41c52cfd8c37d..935f57955b9e7abd5373050b5f4bd6ab1dee5d37 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.15.0/24"
index cee908e23fe2f141ffd5c259f351c59ce469425e..06b8ffd506638fae4c5e4b8076cb552cf2998113 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.15.0/24"
 cluster_network: "192.168.16.0/24"
index 08f2a466b783d0eeebdaeb46f59d8fba39378b4f..a6a5caaa570c8c748911add54493b2b3812425a4 100644 (file)
@@ -5,8 +5,8 @@ docker: True
 container_binary: docker
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.58.0/24"
 cluster_network: "192.168.59.0/24"
index 8ff02aa512d4d74d49c4e50a973a03d43b0121fe..8eef0472855cfb6390d6cd816ac11267a6fca110 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 radosgw_num_instances: 2
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.31.0/24"
index 81f37e7a60532a20ea846833e31099fc5ef98150..f941ecc7bcb3020ef90940154876d966e9aa43a1 100644 (file)
@@ -2,8 +2,8 @@
 containerized_deployment: False
 ceph_origin: repository
 ceph_repository: community
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 openstack_config: True
 dashboard_enabled: False
index 99baaf341511546923f01e2385eaca19972c02be..279909914167d4eb306d8c8af8effefff5c46892 100644 (file)
@@ -8,7 +8,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.43.0/24"
 cluster_network: "192.168.44.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 2048
 copy_admin_key: true
 containerized_deployment: true
index e707a399bea5f7d7766db7d077394bddf7aabd22..43331a43a5533afdb34410c14d2a3922ea7adbb5 100644 (file)
@@ -4,7 +4,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.41.0/24"
 cluster_network: "192.168.42.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 2048
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
index 1c396f4b876a8b5938e8a1ae4017048ef19ef4d0..37f6b06714ef8c58e02a350f5249ed4c17018197 100644 (file)
@@ -10,8 +10,8 @@ ceph_repository: dev
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 crush_device_class: test
index fefa126dc1c4569ac3a7f3deca8d96d4215f6a47..149cd228fd864ca95df6a589f7dc0d72fa60db27 100644 (file)
@@ -5,8 +5,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
 copy_admin_key: true
index d34ab613ef5ccca75d9ea3d7f15f8ab97a9de7bb..3786a7a33361ae701f625dbf807e0a340a9e89e6 100644 (file)
@@ -10,8 +10,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 2048
 osd_objectstore: "bluestore"
 crush_device_class: test
index 2a4b8fc879d3bf71b032cea82830a166290208e5..4b8aa59eec8d45401efa52532ecac5edf30a1b5c 100644 (file)
@@ -5,8 +5,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 crush_device_class: test
 copy_admin_key: true
index 7d7400f2fca1e9966efe93e36a15ce37b2b054c2..9fd809ba4a0cc7bf54abb58756f09c4d31805d35 100644 (file)
@@ -8,7 +8,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.33.0/24"
 cluster_network: "192.168.34.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 containerized_deployment: true
index 0f0883bcf4e7ad42fdc439098b97ef9cadc2449a..dd0b490330d47aa5719e3c2d1add94d30888c8eb 100644 (file)
@@ -4,7 +4,7 @@ ceph_origin: repository
 ceph_repository: community
 public_network: "192.168.39.0/24"
 cluster_network: "192.168.40.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 copy_admin_key: true
 # test-volume is created by tests/functional/lvm_setup.yml from /dev/sdb
index ac5083b88932cd4b97d63e1a880074645c525691..ea003750c4aa22cf5638a656088a3de3b67fbee1 100644 (file)
@@ -4,8 +4,8 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.30.0/24"
 cluster_network: "192.168.31.0/24"
index 4476d664b7e05bdb2e994f1b8a05a41f2916af40..35f25be6f0839182fbdbcb398d28abc8a02b6be5 100644 (file)
@@ -6,8 +6,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.105.0/24"
 cluster_network: "192.168.106.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 2704484f73ab6fa8f1fd61f177408b37706b7a17..1a3a58021b476d4f39e7955230d817a2c2ee3099 100644 (file)
@@ -6,8 +6,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.107.0/24"
 cluster_network: "192.168.108.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index bc9d2789059e5b5ff7a55c5edc4720fe3a8448c9..5b14521a9082b399e77284b2c6543cdfd408ac3c 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.101.0/24"
 cluster_network: "192.168.102.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index e1c5a8e5f3e6f59a67ccbca3c140e673ff1afebe..20b4537de3e6ff2e3dd7d88a78eb1257ab5aa89a 100644 (file)
@@ -4,8 +4,8 @@ ceph_repository: community
 cluster: ceph
 public_network: "192.168.103.0/24"
 cluster_network: "192.168.104.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 journal_size: 100
 osd_objectstore: "bluestore"
 copy_admin_key: true
index 14804669b440cb79e40573944b73966a3b69baae..7bc97ec14eeb3a482d3c54c6d44ce49dae296768 100644 (file)
@@ -81,7 +81,7 @@
         state: present
       register: result
       until: result is succeeded
-      when: ansible_os_family == 'RedHat'
+      when: ansible_facts['os_family'] == 'RedHat'
 
     - name: allow insecure docker registries
       lineinfile:
index 67bc40a779f7392e3dc33045fb6321fa30711d84..2173a5fa1f18fc846ec0a613a5fa9299141357e1 100644 (file)
 
     - name: get root mount information
       set_fact:
-        rootmount: "{{ ansible_mounts|json_query('[?mount==`/`]|[0]') }}"
+        rootmount: "{{ ansible_facts['mounts']|json_query('[?mount==`/`]|[0]') }}"
 
     # mount -o remount doesn't work on RHEL 8 for now
     - name: add mount options to /
       mount:
         path: '{{ rootmount.mount }}'
         src: '{{ rootmount.device }}'
-        opts: 'noatime,nodiratime{% if ansible_os_family == "RedHat" and ansible_distribution_major_version | int < 8 %},nobarrier{% endif %}'
+        opts: "noatime,nodiratime{% if ansible_facts['os_family'] == 'RedHat' and ansible_facts['distribution_major_version'] | int < 8 %},nobarrier{% endif %}"
         fstype: '{{ rootmount.fstype }}'
         state: mounted
 
@@ -63,8 +63,8 @@
             option: metalink
             state: absent
       when:
-        - ansible_distribution == 'CentOS'
-        - ansible_distribution_major_version | int == 7
+        - ansible_facts['distribution'] == 'CentOS'
+        - ansible_facts['distribution_major_version'] | int == 7
         - not is_atomic | bool
 
     - name: resize logical volume for root partition to fill remaining free space
index 377b1da6f7fd88e2dbc621ad54a446a365bbf637..7c74f2d68d1766447664c3f8df609c02e3470244 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.79.0/24"
 cluster_network: "192.168.80.0/24"
index 40f5fc67c102df81f61c3b66aaf5d37dba5e002a..b232a34f745bcc11555ef54f98c138976f856566 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.83.0/24"
 cluster_network: "192.168.84.0/24"
index 2ed4f0339960f6be64d840b087337933cd962c9c..98a9c9e86a3718e2b072c2fdb13a9016f57f4496 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.17.0/24"
 cluster_network: "192.168.18.0/24"
index 541558a7a7364db09bc0bf5d615f586cdf1c2594..c0230214637576608b9e426cd9ad548c630b5dcc 100644 (file)
@@ -1,6 +1,6 @@
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [osds]
index c95d9d2f19977a56da8393ff86cd04077d634b08..b995e9b99339a09c4f2ab73005c894b42dcb8343 100644 (file)
@@ -3,7 +3,7 @@ docker=True
 
 [mons]
 mon0 monitor_address=192.168.1.10
-mon1 monitor_interface="{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+mon1 monitor_interface="{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 mon2 monitor_address=192.168.1.12
 
 [osds]
index f85a7cfc9479aa960a91be976bc10a82fb0f28cc..7bf97528375ca0f904a66b539d8527391aecb763 100644 (file)
@@ -4,7 +4,7 @@
 docker: True
 
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 public_network: "192.168.73.0/24"
 cluster_network: "192.168.74.0/24"
index d18228d30940b4a5db2a6039625eeb45dd9c045b..f62124dca011a7e1e0eaf5a8beaf00a8f5ae5209 100644 (file)
@@ -5,7 +5,7 @@ docker: True
 public_network: "192.168.87.0/24"
 cluster_network: "192.168.88.0/24"
 containerized_deployment: True
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_conf_overrides:
   global:
index 3f5e185d3e7affcb1e3770565d4fbd9358ef9874..6119863cfddf3c3e861e33ba87cf92b590158581 100644 (file)
@@ -6,8 +6,8 @@ docker: True
 containerized_deployment: True
 public_network: "192.168.91.0/24"
 cluster_network: "192.168.92.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 ceph_mon_docker_subnet: "{{ public_network }}"
 ceph_conf_overrides:
   global:
index 025edfbdb4410a5e61e487dd0daa13768349cd10..f67e9215405d507afc103cd5b254600df4aebd96 100644 (file)
@@ -3,8 +3,8 @@ ceph_origin: repository
 ceph_repository: dev
 public_network: "192.168.89.0/24"
 cluster_network: "192.168.90.0/24"
-monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
-radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
+monitor_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
+radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
 osd_objectstore: "bluestore"
 copy_admin_key: true
 ceph_conf_overrides: