+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-######################################
-# Releases name to number dictionary #
-######################################
-#ceph_release_num:
-# dumpling: 0.67
-# emperor: 0.72
-# firefly: 0.80
-# giant: 0.87
-# hammer: 0.94
-# infernalis: 9
-# jewel: 10
-# kraken: 11
-# luminous: 12
-# mimic: 13
-# nautilus: 14
-# octopus: 15
-# pacific: 16
-# quincy: 17
-# reef: 18
-# dev: 99
-
-
-# The 'cluster' variable determines the name of the cluster.
-# Changing the default value to something else means that you will
-# need to change all the command line calls as well, for example if
-# your cluster name is 'foo':
-# "ceph health" will become "ceph --cluster foo health"
-#
-# An easier way to handle this is to use the environment variable CEPH_ARGS
-# So run: "export CEPH_ARGS="--cluster foo"
-# With that you will be able to run "ceph health" normally
-#cluster: ceph
-
-# Inventory host group variables
-#mon_group_name: mons
-#osd_group_name: osds
-#rgw_group_name: rgws
-#mds_group_name: mdss
-#nfs_group_name: nfss
-#rbdmirror_group_name: rbdmirrors
-#client_group_name: clients
-#mgr_group_name: mgrs
-#rgwloadbalancer_group_name: rgwloadbalancers
-#monitoring_group_name: monitoring
-#adopt_label_group_names:
-# - "{{ mon_group_name }}"
-# - "{{ osd_group_name }}"
-# - "{{ rgw_group_name }}"
-# - "{{ mds_group_name }}"
-# - "{{ nfs_group_name }}"
-# - "{{ rbdmirror_group_name }}"
-# - "{{ client_group_name }}"
-# - "{{ mgr_group_name }}"
-# - "{{ rgwloadbalancer_group_name }}"
-# - "{{ monitoring_group_name }}"
-
-# If configure_firewall is true, then ansible will try to configure the
-# appropriate firewalling rules so that Ceph daemons can communicate
-# with each others.
-#configure_firewall: true
-
-# Open ports on corresponding nodes if firewall is installed on it
-#ceph_mon_firewall_zone: public
-#ceph_mgr_firewall_zone: public
-#ceph_osd_firewall_zone: public
-#ceph_rgw_firewall_zone: public
-#ceph_mds_firewall_zone: public
-#ceph_nfs_firewall_zone: public
-#ceph_rbdmirror_firewall_zone: public
-#ceph_dashboard_firewall_zone: public
-#ceph_rgwloadbalancer_firewall_zone: public
-
-# cephadm account for remote connections
-#cephadm_ssh_user: root
-#cephadm_ssh_priv_key_path: "/home/{{ cephadm_ssh_user }}/.ssh/id_rsa"
-#cephadm_ssh_pub_key_path: "{{ cephadm_ssh_priv_key_path }}.pub"
-#cephadm_mgmt_network: "{{ public_network }}"
-
-############
-# PACKAGES #
-############
-#debian_package_dependencies: []
-
-#centos_package_dependencies:
-# - epel-release
-# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
-
-#redhat_package_dependencies: []
-
-#suse_package_dependencies: []
-
-# Whether or not to install the ceph-test package.
-#ceph_test: false
-
-# Enable the ntp service by default to avoid clock skew on ceph nodes
-# Disable if an appropriate NTP client is already installed and configured
-#ntp_service_enabled: true
-
-# Set type of NTP client daemon to use, valid entries are chronyd, ntpd or timesyncd
-#ntp_daemon_type: chronyd
-
-# This variable determines if ceph packages can be updated. If False, the
-# package resources will use "state=present". If True, they will use
-# "state=latest".
-#upgrade_ceph_packages: false
-
-#ceph_use_distro_backports: false # DEBIAN ONLY
-#ceph_directories_mode: "0755"
-
-###########
-# INSTALL #
-###########
-# ORIGIN SOURCE
-#
-# Choose between:
-# - 'repository' means that you will get ceph installed through a new repository. Later below choose between 'community', 'rhcs', 'dev' or 'obs'
-# - 'distro' means that no separate repo file will be added
-# you will get whatever version of Ceph is included in your Linux distro.
-# 'local' means that the ceph binaries will be copied over from the local machine
-ceph_origin: repository
-#valid_ceph_origins:
-# - repository
-# - distro
-# - local
-
-
-ceph_repository: rhcs
-#valid_ceph_repository:
-# - community
-# - rhcs
-# - dev
-# - uca
-# - custom
-# - obs
-
-
-# REPOSITORY: COMMUNITY VERSION
-#
-# Enabled when ceph_repository == 'community'
-#
-#ceph_mirror: https://download.ceph.com
-#ceph_stable_key: https://download.ceph.com/keys/release.asc
-#ceph_stable_release: reef
-#ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}"
-
-#nfs_ganesha_stable: true # use stable repos for nfs-ganesha
-#centos_release_nfs: centos-release-nfs-ganesha4
-#nfs_ganesha_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/nfs-ganesha-4/ubuntu
-#nfs_ganesha_apt_keyserver: keyserver.ubuntu.com
-#nfs_ganesha_apt_key_id: EA914D611053D07BD332E18010353E8834DC57CA
-#libntirpc_stable_deb_repo: http://ppa.launchpad.net/nfs-ganesha/libntirpc-4/ubuntu
-
-# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
-# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
-# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-# ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
-
-
-# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
-#
-# Enabled when ceph_repository == 'rhcs'
-#
-# This version is supported on RHEL 8
-#
-ceph_rhcs_version: 5
-
-
-# REPOSITORY: UBUNTU CLOUD ARCHIVE
-#
-# Enabled when ceph_repository == 'uca'
-#
-# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
-# usually has newer Ceph releases than the normal distro repository.
-#
-#
-#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-#ceph_stable_openstack_release_uca: queens
-#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
-
-# REPOSITORY: openSUSE OBS
-#
-# Enabled when ceph_repository == 'obs'
-#
-# This allows the install of Ceph from the openSUSE OBS repository. The OBS repository
-# usually has newer Ceph releases than the normal distro repository.
-#
-#
-#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
-
-# REPOSITORY: DEV
-#
-# Enabled when ceph_repository == 'dev'
-#
-#ceph_dev_branch: main # development branch you would like to use e.g: main, wip-hack
-#ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built)
-
-#nfs_ganesha_dev: false # use development repos for nfs-ganesha
-
-# Set this to choose the version of ceph dev libraries used in the nfs-ganesha packages from shaman
-# flavors so far include: ceph_main, ceph_jewel, ceph_kraken, ceph_luminous
-#nfs_ganesha_flavor: "ceph_main"
-
-
-# REPOSITORY: CUSTOM
-#
-# Enabled when ceph_repository == 'custom'
-#
-# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
-# a URL to the .repo file to be installed on the targets. For deb,
-# ceph_custom_repo should be the URL to the repo base.
-#
-# ceph_custom_key: https://server.domain.com/ceph-custom-repo/key.asc
-#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
-
-
-# ORIGIN: LOCAL CEPH INSTALLATION
-#
-# Enabled when ceph_repository == 'local'
-#
-# Path to DESTDIR of the ceph install
-# ceph_installation_dir: "/path/to/ceph_installation/"
-# Whether or not to use installer script rundep_installer.sh
-# This script takes in rundep and installs the packages line by line onto the machine
-# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
-# all runtime dependencies installed
-# use_installer: false
-# Root directory for ceph-ansible
-# ansible_dir: "/path/to/ceph-ansible"
-
-
-######################
-# CEPH CONFIGURATION #
-######################
-
-## Ceph options
-#
-# Each cluster requires a unique, consistent filesystem ID. By
-# default, the playbook generates one for you.
-# If you want to customize how the fsid is
-# generated, you may find it useful to disable fsid generation to
-# avoid cluttering up your ansible repo. If you set `generate_fsid` to
-# false, you *must* generate `fsid` in another way.
-# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
-#fsid: "{{ cluster_uuid.stdout }}"
-#generate_fsid: true
-
-#ceph_conf_key_directory: /etc/ceph
-
-#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
-
-# Permissions for keyring files in /etc/ceph
-#ceph_keyring_permissions: '0600'
-
-#cephx: true
-
-## Client options
-#
-#rbd_cache: "true"
-#rbd_cache_writethrough_until_flush: "true"
-#rbd_concurrent_management_ops: 20
-
-#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
-
-# Permissions for the rbd_client_log_path and
-# rbd_client_admin_socket_path. Depending on your use case for Ceph
-# you may want to change these values. The default, which is used if
-# any of the variables are unset or set to a false value (like `null`
-# or `false`) is to automatically determine what is appropriate for
-# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
-# for infernalis releases, and root:root and 1777 for pre-infernalis
-# releases.
-#
-# For other use cases, including running Ceph with OpenStack, you'll
-# want to set these differently:
-#
-# For OpenStack on RHEL, you'll want:
-# rbd_client_directory_owner: "qemu"
-# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
-# rbd_client_directory_mode: "0755"
-#
-# For OpenStack on Ubuntu or Debian, set:
-# rbd_client_directory_owner: "libvirt-qemu"
-# rbd_client_directory_group: "kvm"
-# rbd_client_directory_mode: "0755"
-#
-# If you set rbd_client_directory_mode, you must use a string (e.g.,
-# 'rbd_client_directory_mode: "0755"', *not*
-# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
-# must be in octal or symbolic form
-#rbd_client_directory_owner: ceph
-#rbd_client_directory_group: ceph
-#rbd_client_directory_mode: "0770"
-
-#rbd_client_log_path: /var/log/ceph
-#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
-#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
-
-## Monitor options
-#
-# You must define either monitor_interface, monitor_address or monitor_address_block.
-# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
-# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable.
-# Preference will go to monitor_address if both monitor_address and monitor_interface are defined.
-#monitor_interface: interface
-#monitor_address: x.x.x.x
-#monitor_address_block: subnet
-# set to either ipv4 or ipv6, whichever your network is using
-#ip_version: ipv4
-
-#mon_host_v1:
-# enabled: true
-# suffix: ':6789'
-#mon_host_v2:
-# suffix: ':3300'
-
-#enable_ceph_volume_debug: false
-
-##########
-# CEPHFS #
-##########
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-# cephfs_data_pool:
-# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
-# target_size_ratio: 0.2
-#cephfs: cephfs # name of the ceph filesystem
-#cephfs_data_pool:
-# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
-#cephfs_metadata_pool:
-# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
-#cephfs_pools:
-# - "{{ cephfs_data_pool }}"
-# - "{{ cephfs_metadata_pool }}"
-
-## OSD options
-#
-#lvmetad_disabled: false
-#is_hci: false
-#hci_safety_factor: 0.2
-#non_hci_safety_factor: 0.7
-#safety_factor: "{{ hci_safety_factor if is_hci | bool else non_hci_safety_factor }}"
-#osd_memory_target: 4294967296
-#journal_size: 5120 # OSD journal size in MB
-#block_db_size: -1 # block db size in bytes for the ceph-volume lvm batch. -1 means use the default of 'as big as possible'.
-#public_network: 0.0.0.0/0
-#cluster_network: "{{ public_network | regex_replace(' ', '') }}"
-#osd_mkfs_type: xfs
-#osd_mkfs_options_xfs: -f -i size=2048
-#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
-#osd_objectstore: bluestore
-
-# Any device containing these patterns in their path will be excluded.
-#osd_auto_discovery_exclude: "dm-*|loop*|md*|rbd*"
-
-## MDS options
-#
-#mds_max_mds: 1
-
-## Rados Gateway options
-#
-#radosgw_frontend_type: beast # For additional frontends see: https://docs.ceph.com/en/latest/radosgw/frontends/
-
-#radosgw_frontend_port: 8080
-# The server private key, public certificate and any other CA or intermediate certificates should be in one file, in PEM format.
-#radosgw_frontend_ssl_certificate: ""
-#radosgw_frontend_ssl_certificate_data: "" # certificate contents to be written to path defined by radosgw_frontend_ssl_certificate
-#radosgw_frontend_options: ""
-#radosgw_thread_pool_size: 512
-
-
-# You must define either radosgw_interface, radosgw_address.
-# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml).
-# Eg. If you want to specify for each radosgw node which address the radosgw will bind to you can set it in your **inventory host file** by using 'radosgw_address' variable.
-# Preference will go to radosgw_address if both radosgw_address and radosgw_interface are defined.
-#radosgw_interface: interface
-#radosgw_address: x.x.x.x
-#radosgw_address_block: subnet
-#radosgw_keystone_ssl: false # activate this when using keystone PKI keys
-#radosgw_num_instances: 1
-# Rados Gateway options
-#email_address: foo@bar.com
-
-
-## Testing mode
-# enable this mode _only_ when you have a single node
-# if you don't want it keep the option commented
-# common_single_host_mode: true
-
-## Handlers - restarting daemons after a config change
-# if for whatever reasons the content of your ceph configuration changes
-# ceph daemons will be restarted as well. At the moment, we can not detect
-# which config option changed so all the daemons will be restarted. Although
-# this restart will be serialized for each node, in between a health check
-# will be performed so we make sure we don't move to the next node until
-# ceph is not healthy
-# Obviously between the checks (for monitors to be in quorum and for osd's pgs
-# to be clean) we have to wait. These retries and delays can be configurable
-# for both monitors and osds.
-#
-# Monitor handler checks
-#handler_health_mon_check_retries: 10
-#handler_health_mon_check_delay: 20
-#
-# OSD handler checks
-#handler_health_osd_check_retries: 40
-#handler_health_osd_check_delay: 30
-#handler_health_osd_check: true
-#
-# MDS handler checks
-#handler_health_mds_check_retries: 5
-#handler_health_mds_check_delay: 10
-#
-# RGW handler checks
-#handler_health_rgw_check_retries: 5
-#handler_health_rgw_check_delay: 10
-
-# NFS handler checks
-#handler_health_nfs_check_retries: 5
-#handler_health_nfs_check_delay: 10
-
-# RBD MIRROR handler checks
-#handler_health_rbd_mirror_check_retries: 5
-#handler_health_rbd_mirror_check_delay: 10
-
-# MGR handler checks
-#handler_health_mgr_check_retries: 5
-#handler_health_mgr_check_delay: 10
-
-## health mon/osds check retries/delay:
-
-#health_mon_check_retries: 20
-#health_mon_check_delay: 10
-#health_osd_check_retries: 20
-#health_osd_check_delay: 10
-
-##############
-# RBD-MIRROR #
-##############
-
-#ceph_rbd_mirror_pool: "rbd"
-
-###############
-# NFS-GANESHA #
-###############
-#
-# Access type options
-#
-# Enable NFS File access
-# If set to true, then ganesha is set up to export the root of the
-# Ceph filesystem, and ganesha's attribute and directory caching is disabled
-# as much as possible since libcephfs clients also caches the same
-# information.
-#
-# Set this to true to enable File access via NFS. Requires an MDS role.
-#nfs_file_gw: false
-# Set this to true to enable Object access via NFS. Requires an RGW role.
-#nfs_obj_gw: "{{ False if groups.get(mon_group_name, []) | length == 0 else True }}"
-
-
-###################
-# CONFIG OVERRIDE #
-###################
-
-# Ceph configuration file override.
-# This allows you to specify more configuration options
-# using an INI style format.
-#
-# When configuring RGWs, make sure you use the form [client.rgw.*]
-# instead of [client.radosgw.*].
-# For more examples check the profiles directory of https://github.com/ceph/ceph-ansible.
-#
-# The following sections are supported: [global], [mon], [osd], [mds], [client]
-#
-# Example:
-# ceph_conf_overrides:
-# global:
-# foo: 1234
-# bar: 5678
-# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
-# rgw_zone: zone1
-#
-#ceph_conf_overrides: {}
-
-
-#############
-# OS TUNING #
-#############
-
-#disable_transparent_hugepage: "{{ false if osd_objectstore == 'bluestore' }}"
-#os_tuning_params:
-# - { name: fs.file-max, value: 26234859 }
-# - { name: vm.zone_reclaim_mode, value: 0 }
-# - { name: vm.swappiness, value: 10 }
-# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
-
-# For Debian & Red Hat/CentOS installs set TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES
-# Set this to a byte value (e.g. 134217728)
-# A value of 0 will leave the package default.
-#ceph_tcmalloc_max_total_thread_cache: 134217728
-
-
-##########
-# DOCKER #
-##########
-ceph_docker_image: "rhceph/rhceph-5-rhel8"
-ceph_docker_image_tag: "latest"
-ceph_docker_registry: "registry.redhat.io"
-ceph_docker_registry_auth: true
-# ceph_docker_registry_username:
-# ceph_docker_registry_password:
-# ceph_docker_http_proxy:
-# ceph_docker_https_proxy:
-#ceph_docker_no_proxy: "localhost,127.0.0.1"
-## Client only docker image - defaults to {{ ceph_docker_image }}
-#ceph_client_docker_image: "{{ ceph_docker_image }}"
-#ceph_client_docker_image_tag: "{{ ceph_docker_image_tag }}"
-#ceph_client_docker_registry: "{{ ceph_docker_registry }}"
-containerized_deployment: true
-#container_binary:
-#timeout_command: "{{ 'timeout --foreground -s KILL ' ~ docker_pull_timeout if (docker_pull_timeout != '0') and (ceph_docker_dev_image is undefined or not ceph_docker_dev_image) else '' }}"
-
-
-# this is only here for usage with the rolling_update.yml playbook
-# do not ever change this here
-#rolling_update: false
-
-#####################
-# Docker pull retry #
-#####################
-#docker_pull_retry: 3
-#docker_pull_timeout: "300s"
-
-
-#############
-# OPENSTACK #
-#############
-#openstack_config: false
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-# openstack_glance_pool:
-# name: "images"
-# rule_name: "my_replicated_rule"
-# application: "rbd"
-# pg_autoscale_mode: false
-# pg_num: 16
-# pgp_num: 16
-# target_size_ratio: 0.2
-#openstack_glance_pool:
-# name: "images"
-# application: "rbd"
-#openstack_cinder_pool:
-# name: "volumes"
-# application: "rbd"
-#openstack_nova_pool:
-# name: "vms"
-# application: "rbd"
-#openstack_cinder_backup_pool:
-# name: "backups"
-# application: "rbd"
-#openstack_gnocchi_pool:
-# name: "metrics"
-# application: "rbd"
-#openstack_cephfs_data_pool:
-# name: "manila_data"
-# application: "cephfs"
-#openstack_cephfs_metadata_pool:
-# name: "manila_metadata"
-# application: "cephfs"
-#openstack_pools:
-# - "{{ openstack_glance_pool }}"
-# - "{{ openstack_cinder_pool }}"
-# - "{{ openstack_nova_pool }}"
-# - "{{ openstack_cinder_backup_pool }}"
-# - "{{ openstack_gnocchi_pool }}"
-# - "{{ openstack_cephfs_data_pool }}"
-# - "{{ openstack_cephfs_metadata_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-#openstack_keys:
-# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
-#############
-# DASHBOARD #
-#############
-#dashboard_enabled: true
-# Choose http or https
-# For https, you should set dashboard.crt/key and grafana.crt/key
-# If you define the dashboard_crt and dashboard_key variables, but leave them as '',
-# then we will autogenerate a cert and keyfile
-#dashboard_protocol: https
-#dashboard_port: 8443
-# set this variable to the network you want the dashboard to listen on. (Default to public_network)
-#dashboard_network: "{{ public_network }}"
-#dashboard_admin_user: admin
-#dashboard_admin_user_ro: false
-# This variable must be set with a strong custom password when dashboard_enabled is True
-# dashboard_admin_password: p@ssw0rd
-# We only need this for SSL (https) connections
-#dashboard_crt: ''
-#dashboard_key: ''
-#dashboard_certificate_cn: ceph-dashboard
-#dashboard_tls_external: false
-#dashboard_grafana_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not grafana_crt and not grafana_key else false }}"
-#dashboard_rgw_api_user_id: ceph-dashboard
-#dashboard_rgw_api_admin_resource: ''
-#dashboard_rgw_api_no_ssl_verify: false
-#dashboard_frontend_vip: ''
-#dashboard_disabled_features: []
-#prometheus_frontend_vip: ''
-#alertmanager_frontend_vip: ''
-node_exporter_container_image: registry.redhat.io/openshift4/ose-prometheus-node-exporter:v4.6
-#node_exporter_port: 9100
-#grafana_admin_user: admin
-# This variable must be set with a strong custom password when dashboard_enabled is True
-# grafana_admin_password: admin
-# We only need this for SSL (https) connections
-#grafana_crt: ''
-#grafana_key: ''
-# When using https, please fill with a hostname for which grafana_crt is valid.
-#grafana_server_fqdn: ''
-grafana_container_image: registry.redhat.io/rhceph/rhceph-5-dashboard-rhel8:5
-#grafana_container_cpu_period: 100000
-#grafana_container_cpu_cores: 2
-# container_memory is in GB
-#grafana_container_memory: 4
-#grafana_uid: 472
-#grafana_datasource: Dashboard
-#grafana_dashboards_path: "/etc/grafana/dashboards/ceph-dashboard"
-#grafana_dashboard_version: main
-#grafana_dashboard_files:
-# - ceph-cluster.json
-# - cephfs-overview.json
-# - host-details.json
-# - hosts-overview.json
-# - osd-device-details.json
-# - osds-overview.json
-# - pool-detail.json
-# - pool-overview.json
-# - radosgw-detail.json
-# - radosgw-overview.json
-# - radosgw-sync-overview.json
-# - rbd-details.json
-# - rbd-overview.json
-#grafana_plugins:
-# - vonage-status-panel
-# - grafana-piechart-panel
-#grafana_allow_embedding: true
-#grafana_port: 3000
-#grafana_network: "{{ public_network }}"
-#grafana_conf_overrides: {}
-prometheus_container_image: registry.redhat.io/openshift4/ose-prometheus:v4.6
-#prometheus_container_cpu_period: 100000
-#prometheus_container_cpu_cores: 2
-# container_memory is in GB
-#prometheus_container_memory: 4
-#prometheus_data_dir: /var/lib/prometheus
-#prometheus_conf_dir: /etc/prometheus
-#prometheus_user_id: '65534' # This is the UID used by the prom/prometheus container image
-#prometheus_port: 9092
-#prometheus_conf_overrides: {}
-# Uncomment out this variable if you need to customize the retention period for prometheus storage.
-# set it to '30d' if you want to retain 30 days of data.
-# prometheus_storage_tsdb_retention_time: 15d
-alertmanager_container_image: registry.redhat.io/openshift4/ose-prometheus-alertmanager:v4.6
-#alertmanager_container_cpu_period: 100000
-#alertmanager_container_cpu_cores: 2
-# container_memory is in GB
-#alertmanager_container_memory: 4
-#alertmanager_data_dir: /var/lib/alertmanager
-#alertmanager_conf_dir: /etc/alertmanager
-#alertmanager_port: 9093
-#alertmanager_cluster_port: 9094
-#alertmanager_conf_overrides: {}
-#alertmanager_dashboard_api_no_ssl_verify: "{{ true if dashboard_protocol == 'https' and not dashboard_crt and not dashboard_key else false }}"
-
-#no_log_on_ceph_key_tasks: true
-
-###############
-# DEPRECATION #
-###############
-
-
-######################################################
-# VARIABLES BELOW SHOULD NOT BE MODIFIED BY THE USER #
-# *DO NOT* MODIFY THEM #
-######################################################
-
-#container_exec_cmd:
-#docker: false
-#ceph_volume_debug: "{{ enable_ceph_volume_debug | ternary(1, 0) }}"
-