## Special notes
If you are looking at deploying a Ceph version older than Jewel.
-It is highly recommended that you apply the following settings to your `group_vars/all` file on the `ceph_conf_overrides` variable:
+It is highly recommended that you apply the following settings to your `group_vars/all.yml` file on the `ceph_conf_overrides` variable:
```
ceph_conf_overrides:
The Vagrantfile specifies an fsid for the cluster and a secret key for the
monitor. If using these playbooks in production, you must generate your own `fsid`
-in `group_vars/all` and `monitor_secret` in `group_vars/mons`. Those files contain
+in `group_vars/all.yml` and `monitor_secret` in `group_vars/mons.yml`. Those files contain
information about how to generate appropriate values for these variables.
## Specifying package origin
### For Atomic systems
If you want to run containerized deployment on Atomic systems (RHEL/CentOS Atomic), please copy
-[vagrant_variables.yml.atomic](vagrant_variables.yml.atomic) to vagrant_variables.yml, and copy [group_vars/all.docker](group_vars/all.docker) to `group_vars/all`.
+[vagrant_variables.yml.atomic](vagrant_variables.yml.atomic) to vagrant_variables.yml, and copy [group_vars/all.docker.yml.sample](group_vars/all.docker.yml.sample) to `group_vars/all.yml`.
Since `centos/atomic-host` VirtualBox box doesn't have spare storage controller to attach more disks, it is likely the first time `vagrant up --provider=virtualbox` runs, it will fail to attach to a storage controller. In such case, run the following command:
```bash
$ cp site.yml.sample site.yml
-$ cp group_vars/all.docker.sample group_vars/all
+$ cp group_vars/all.docker.yml.sample group_vars/all.yml
$ cp vagrant_variables.yml.openstack vagrant_variables.yml
```
* Edit `vagrant_variables.yml`:
}
function cp_var {
- cp group_vars/all.sample group_vars/all
- cp group_vars/osds.sample group_vars/osds
+ cp group_vars/all.yml.sample group_vars/all.yml
+ cp group_vars/osds.yml.sample group_vars/osds.yml
cp site.yml.sample site.yml
}
function populate_vars {
- sed -i "s/#osd_auto_discovery: false/osd_auto_discovery: true/" group_vars/osds
- sed -i "s/#journal_collocation: false/journal_collocation: true/" group_vars/osds
- sed -i "s/#pool_default_size: 3/pool_default_size: 2/" group_vars/all
- sed -i "s/#monitor_address: 0.0.0.0/monitor_address: ${IP}/" group_vars/all
- sed -i "s/#journal_size: 0/journal_size: 100/" group_vars/all
- sed -i "s|#public_network: 0.0.0.0\/0|public_network: ${SUBNET}|" group_vars/all
- sed -i "s/#common_single_host_mode: true/common_single_host_mode: true/" group_vars/all
+ sed -i "s/#osd_auto_discovery: false/osd_auto_discovery: true/" group_vars/osds.yml
+ sed -i "s/#journal_collocation: false/journal_collocation: true/" group_vars/osds.yml
+ sed -i "s/#pool_default_size: 3/pool_default_size: 2/" group_vars/all.yml
+ sed -i "s/#monitor_address: 0.0.0.0/monitor_address: ${IP}/" group_vars/all.yml
+ sed -i "s/#journal_size: 0/journal_size: 100/" group_vars/all.yml
+ sed -i "s|#public_network: 0.0.0.0\/0|public_network: ${SUBNET}|" group_vars/all.yml
+ sed -i "s/#common_single_host_mode: true/common_single_host_mode: true/" group_vars/all.yml
if [[ ${SOURCE} == 'stable' ]]; then
- sed -i "s/#ceph_stable: false/ceph_stable: true/" group_vars/all
+ sed -i "s/#ceph_stable: false/ceph_stable: true/" group_vars/all.yml
else
- sed -i "s/#ceph_dev: false/ceph_dev: true/" group_vars/all
- sed -i "s|#ceph_dev_branch: master|ceph_dev_branch: ${BRANCH}|" group_vars/all
+ sed -i "s/#ceph_dev: false/ceph_dev: true/" group_vars/all.yml
+ sed -i "s|#ceph_dev_branch: master|ceph_dev_branch: ${BRANCH}|" group_vars/all.yml
fi
}
for role in "$basedir"/roles/ceph-*; do
rolename=$(basename "$role")
if [[ $rolename == "ceph-common" ]]; then
- output="all.sample"
+ output="all.yml.sample"
elif [[ $rolename == "ceph-agent" ]]; then
- output="agent.sample"
+ output="agent.yml.sample"
elif [[ $rolename == "ceph-fetch-keys" ]]; then
- output="ceph-fetch-keys.sample"
+ output="ceph-fetch-keys.yml.sample"
else
- output="${rolename:5}s.sample"
+ output="${rolename:5}s.yml.sample"
fi
cat <<EOF > "$basedir"/group_vars/"$output"
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-# The agent needs an agent_master_host variable defined so that it can connect
-# and push information back to it
-#agent_master_host: "localhost"
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+# The agent needs an agent_master_host variable defined so that it can connect
+# and push information back to it
+#agent_master_host: "localhost"
+
+++ /dev/null
----
-dummy:
-
-##########
-# GLOBAL #
-##########
-#docker: true
-#ceph_docker_dev_image: false
-
-#######
-# MON #
-#######
-#mon_containerized_deployment: true
-#mon_containerized_deployment_with_kv: false
-#mon_containerized_default_ceph_conf_with_kv: true
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#kv_port: 4001
-#mon_docker_privileged: true
-#ceph_mon_docker_username: ceph
-#ceph_mon_docker_imagename: daemon
-#ceph_mon_docker_image_tag: latest
-#ceph_mon_docker_interface: "{{ monitor_interface }}"
-#ceph_mon_docker_subnet: "{{ public_network }}" # subnet of the ceph_mon_docker_interface
-#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
-
-#######
-# OSD #
-#######
-#osd_containerized_deployment: true
-#osd_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_osd_docker_username: ceph
-#ceph_osd_docker_imagename: daemon
-#ceph_osd_docker_image_tag: latest
-#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE" # comma separated variables
-#ceph_osd_docker_prepare_env: "OSD_FORCE_ZAP=1" # comma separated variables
-#ceph_osd_docker_devices:
-# - /dev/sdb
-# - /dev/sdc
-#journal_size: 5120 # OSD journal size in MB
-#public_network: 0.0.0.0/0
-#cluster_network: "{{ public_network }}"
-
-#######
-# MDS #
-#######
-#mds_containerized_deployment: true
-#mds_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_mds_docker_username: ceph
-#ceph_mds_docker_imagename: daemon
-#ceph_mds_docker_image_tag: latest
-#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
-
-#######
-# RGW #
-#######
-#rgw_containerized_deployment: true
-#rgw_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_rgw_docker_username: ceph
-#ceph_rgw_docker_imagename: daemon
-#ceph_rgw_docker_image_tag: latest
-#ceph_rgw_civetweb_port: 80
-#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
-
-###########
-# RESTAPI #
-###########
-#restapi_containerized_deployment: true
-#ceph_restapi_docker_interface: eth0
-#ceph_restapi_port: 5000
-#ceph_restapi_docker_username: ceph
-#ceph_restapi_docker_imagename: daemon
-#ceph_restapi_docker_image_tag: latest
-#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
-
-
-##############
-# RBD MIRROR #
-##############
-#rbd_mirror_containerized_deployment: true
-#rbd_mirror_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_rbd_mirror_docker_username: ceph
-#ceph_rbd_mirror_docker_imagename: daemon
-#ceph_rbd_mirror_docker_image_tag: latest
-#ceph_docker_on_openstack: false
-
-#######
-# NFS #
-#######
-#nfs_containerized_deployment: true
-#nfs_containerized_deployment_with_kv: false
-#ceph_nfs_docker_username: ceph
-#ceph_nfs_docker_imagename: daemon
-#ceph_nfs_docker_image_tag: latest
-#ceph_nfs_docker_extra_env: "GANESHA_EPOCH={{ ganesha_epoch }}" # comma separated variables
-#nfs_file_gw: false
-#nfs_obj_gw: false
-#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
-#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
--- /dev/null
+---
+dummy:
+
+##########
+# GLOBAL #
+##########
+#docker: true
+#ceph_docker_dev_image: false
+
+#######
+# MON #
+#######
+#mon_containerized_deployment: true
+#mon_containerized_deployment_with_kv: false
+#mon_containerized_default_ceph_conf_with_kv: true
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#kv_port: 4001
+#mon_docker_privileged: true
+#ceph_mon_docker_username: ceph
+#ceph_mon_docker_imagename: daemon
+#ceph_mon_docker_image_tag: latest
+#ceph_mon_docker_interface: "{{ monitor_interface }}"
+#ceph_mon_docker_subnet: "{{ public_network }}" # subnet of the ceph_mon_docker_interface
+#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
+
+#######
+# OSD #
+#######
+#osd_containerized_deployment: true
+#osd_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_osd_docker_username: ceph
+#ceph_osd_docker_imagename: daemon
+#ceph_osd_docker_image_tag: latest
+#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE" # comma separated variables
+#ceph_osd_docker_prepare_env: "OSD_FORCE_ZAP=1" # comma separated variables
+#ceph_osd_docker_devices:
+# - /dev/sdb
+# - /dev/sdc
+#journal_size: 5120 # OSD journal size in MB
+#public_network: 0.0.0.0/0
+#cluster_network: "{{ public_network }}"
+
+#######
+# MDS #
+#######
+#mds_containerized_deployment: true
+#mds_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_mds_docker_username: ceph
+#ceph_mds_docker_imagename: daemon
+#ceph_mds_docker_image_tag: latest
+#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
+
+#######
+# RGW #
+#######
+#rgw_containerized_deployment: true
+#rgw_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rgw_docker_username: ceph
+#ceph_rgw_docker_imagename: daemon
+#ceph_rgw_docker_image_tag: latest
+#ceph_rgw_civetweb_port: 80
+#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
+
+###########
+# RESTAPI #
+###########
+#restapi_containerized_deployment: true
+#ceph_restapi_docker_interface: eth0
+#ceph_restapi_port: 5000
+#ceph_restapi_docker_username: ceph
+#ceph_restapi_docker_imagename: daemon
+#ceph_restapi_docker_image_tag: latest
+#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
+
+
+##############
+# RBD MIRROR #
+##############
+#rbd_mirror_containerized_deployment: true
+#rbd_mirror_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rbd_mirror_docker_username: ceph
+#ceph_rbd_mirror_docker_imagename: daemon
+#ceph_rbd_mirror_docker_image_tag: latest
+#ceph_docker_on_openstack: false
+
+#######
+# NFS #
+#######
+#nfs_containerized_deployment: true
+#nfs_containerized_deployment_with_kv: false
+#ceph_nfs_docker_username: ceph
+#ceph_nfs_docker_imagename: daemon
+#ceph_nfs_docker_image_tag: latest
+#ceph_nfs_docker_extra_env: "GANESHA_EPOCH={{ ganesha_epoch }}" # comma separated variables
+#nfs_file_gw: false
+#nfs_obj_gw: false
+#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-#cluster: ceph # cluster name
-
-###########
-# INSTALL #
-###########
-
-#mon_group_name: mons
-#osd_group_name: osds
-#rgw_group_name: rgws
-#mds_group_name: mdss
-#nfs_group_name: nfss
-#restapi_group_name: restapis
-#rbdmirror_group_name: rbdmirrors
-#client_group_name: clients
-#iscsi_group_name: iscsigws
-
-# If check_firewall is true, then ansible will try to determine if the
-# Ceph ports are blocked by a firewall. If the machine running ansible
-# cannot reach the Ceph ports for some other reason, you may need or
-# want to set this to False to skip those checks.
-#check_firewall: False
-
-# This variable determines if ceph packages can be updated. If False, the
-# package resources will use "state=present". If True, they will use
-# "state=latest".
-#upgrade_ceph_packages: False
-
-# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
-
-#debian_package_dependencies:
-# - python-pycurl
-# - hdparm
-# - ntp
-
-#centos_package_dependencies:
-# - python-pycurl
-# - hdparm
-# - epel-release
-# - ntp
-# - python-setuptools
-# - libselinux-python
-
-#redhat_package_dependencies:
-# - python-pycurl
-# - hdparm
-# - ntp
-# - python-setuptools
-
-# Enable the ntp service by default to avoid clock skew on
-# ceph nodes
-#ntp_service_enabled: true
-
-# The list of ceph packages needed for debian.
-# This variable should only be changed if packages are not available from a given
-# install source or architecture.
-#debian_ceph_packages:
-# - ceph
-# - ceph-common #|
-# - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
-# - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
-# - libcephfs1 #|--> they don't get update so we need to force them
-
-# Whether or not to install the ceph-test package.
-#ceph_test: False
-
-## Configure package origin
-#
-#ceph_origin: 'upstream' # or 'distro' or 'local'
-# 'distro' means that no separate repo file will be added
-# you will get whatever version of Ceph is included in your Linux distro.
-# 'local' means that the ceph binaries will be copied over from the local machine
-
-# LOCAL CEPH INSTALLATION (ceph_origin==local)
-#
-# Path to DESTDIR of the ceph install
-#ceph_installation_dir: "/path/to/ceph_installation/"
-# Whether or not to use installer script rundep_installer.sh
-# This script takes in rundep and installs the packages line by line onto the machine
-# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
-# all runtime dependencies installed
-#use_installer: false
-# Root directory for ceph-ansible
-#ansible_dir: "/path/to/ceph-ansible"
-
-#ceph_use_distro_backports: false # DEBIAN ONLY
-
-# STABLE
-########
-
-# COMMUNITY VERSION
-#ceph_stable: false # use ceph stable branch
-#ceph_stable_key: https://download.ceph.com/keys/release.asc
-#ceph_stable_release: jewel # ceph stable release
-#ceph_stable_repo: "http://download.ceph.com/debian-{{ ceph_stable_release }}"
-
-######################################
-# Releases name to number dictionary #
-######################################
-#ceph_release_num:
-# dumpling: 0.67
-# emperor: 0.72
-# firefly: 0.80
-# giant: 0.87
-# hammer: 0.94
-# infernalis: 9
-# jewel: 10
-# kraken: 11
-
-# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
-# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
-# for more info read: https://github.com/ceph/ceph-ansible/issues/305
-#ceph_stable_distro_source:
-
-# This option is needed for _both_ stable and dev version, so please always fill the right version
-# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/
-#ceph_stable_redhat_distro: el7
-
-# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
-# This version is only supported on RHEL >= 7.1
-# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
-# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
-# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
-# 7.1 or later if you want to use the kernel RBD client.
-#
-# The CephFS kernel client is undergoing rapid development upstream, and we do
-# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
-# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
-# on RHEL 7.
-#
-#
-# Backward compatibility of variable names
-# Commit 492518a2 changed variable names of rhcs installations
-# to not break backward compatiblity we re-declare these variables
-# with the content of the new variable
-#ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}"
-# This will affect how/what repositories are enabled depending on the desired
-# version. The previous version was 1.3. The current version is 2.
-#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
-#ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com
-#ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com
-#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
-#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default(/tmp/rh-storage-mount) }}"
-#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default(/tmp/rh-storage-repo) }}" # where to copy iso's content
-
-
-# UBUNTU CLOUD ARCHIVE
-# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
-# usually has newer Ceph releases than the normal distro repository.
-#
-#ceph_stable_uca: false
-#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
-#ceph_stable_openstack_release_uca: liberty
-#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
-
-# DEV
-# ###
-
-#ceph_dev: false # use ceph development branch
-#ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
-#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
-
-# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
-# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
-# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
-#ceph_dev_redhat_distro: centos7
-
-# CUSTOM
-# ###
-
-# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
-# a URL to the .repo file to be installed on the targets. For deb,
-# ceph_custom_repo should be the URL to the repo base.
-#ceph_custom: false # use custom ceph repository
-#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
-
-
-######################
-# CEPH CONFIGURATION #
-######################
-
-## Ceph options
-#
-# Each cluster requires a unique, consistent filesystem ID. By
-# default, the playbook generates one for you and stores it in a file
-# in `fetch_directory`. If you want to customize how the fsid is
-# generated, you may find it useful to disable fsid generation to
-# avoid cluttering up your ansible repo. If you set `generate_fsid` to
-# false, you *must* generate `fsid` in another way.
-#fsid: "{{ cluster_uuid.stdout }}"
-#generate_fsid: true
-
-#cephx: true
-#max_open_files: 131072
-
-## Client options
-#
-#rbd_cache: "true"
-#rbd_cache_writethrough_until_flush: "true"
-#rbd_concurrent_management_ops: 20
-
-#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
-
-# Permissions for the rbd_client_log_path and
-# rbd_client_admin_socket_path. Depending on your use case for Ceph
-# you may want to change these values. The default, which is used if
-# any of the variables are unset or set to a false value (like `null`
-# or `false`) is to automatically determine what is appropriate for
-# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
-# for infernalis releases, and root:root and 1777 for pre-infernalis
-# releases.
-#
-# For other use cases, including running Ceph with OpenStack, you'll
-# want to set these differently:
-#
-# For OpenStack on RHEL, you'll want:
-# rbd_client_directory_owner: "qemu"
-# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
-# rbd_client_directory_mode: "0755"
-#
-# For OpenStack on Ubuntu or Debian, set:
-# rbd_client_directory_owner: "libvirt-qemu"
-# rbd_client_directory_group: "kvm"
-# rbd_client_directory_mode: "0755"
-#
-# If you set rbd_client_directory_mode, you must use a string (e.g.,
-# 'rbd_client_directory_mode: "0755"', *not*
-# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
-# must be in octal or symbolic form
-#rbd_client_directory_owner: null
-#rbd_client_directory_group: null
-#rbd_client_directory_mode: null
-
-#rbd_client_log_path: /var/log/ceph
-#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
-#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
-
-## Monitor options
-#
-# You must define either monitor_interface or monitor_address. Preference
-# will go to monitor_interface if both are defined.
-#monitor_interface: interface
-#monitor_address: 0.0.0.0
-#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
-
-## OSD options
-#
-#journal_size: 5120 # OSD journal size in MB
-#public_network: 0.0.0.0/0
-#cluster_network: "{{ public_network }}"
-#osd_mkfs_type: xfs
-#osd_mkfs_options_xfs: -f -i size=2048
-#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
-#osd_objectstore: filestore
-
-# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
-# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
-# be set to 'true' or 'false' to explicitly override those
-# defaults. Leave it 'null' to use the default for your chosen mkfs
-# type.
-#filestore_xattr_use_omap: null
-
-## MDS options
-#
-#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
-#mds_allow_multimds: false
-#mds_max_mds: 3
-
-## Rados Gateway options
-#
-#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
-#radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
-#radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}"
-#radosgw_civetweb_num_threads: 50
-#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
-#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
-#radosgw_keystone_admin_token: password
-#radosgw_keystone_accepted_roles: Member, _member_, admin
-#radosgw_keystone_token_cache_size: 10000
-#radosgw_keystone_revocation_internal: 900
-#radosgw_s3_auth_use_keystone: "true"
-#radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
-# Rados Gateway options
-#redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
-#email_address: foo@bar.com
-
-## REST API options
-#
-#restapi_interface: "{{ monitor_interface }}"
-#restapi_address: "{{ monitor_address }}"
-#restapi_port: 5000
-
-## Testing mode
-# enable this mode _only_ when you have a single node
-# if you don't want it keep the option commented
-#common_single_host_mode: true
-
-
-###################
-# CONFIG OVERRIDE #
-###################
-
-# Ceph configuration file override.
-# This allows you to specify more configuration options
-# using an INI style format.
-# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
-#
-# Example:
-# ceph_conf_overrides:
-# global:
-# foo: 1234
-# bar: 5678
-#
-#ceph_conf_overrides: {}
-
-
-#############
-# OS TUNING #
-#############
-
-#disable_transparent_hugepage: true
-#os_tuning_params:
-# - { name: kernel.pid_max, value: 4194303 }
-# - { name: fs.file-max, value: 26234859 }
-# - { name: vm.zone_reclaim_mode, value: 0 }
-# - { name: vm.vfs_cache_pressure, value: 50 }
-# - { name: vm.swappiness, value: 10 }
-# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
-
-
-##########
-# DOCKER #
-##########
-
-#docker: false
-
-# Do not comment the following variables mon_containerized_deployment_* here. These variables are being used
-# by ceph.conf.j2 template. so it should always be defined
-#mon_containerized_deployment_with_kv: false
-#mon_containerized_deployment: false
-#mon_containerized_default_ceph_conf_with_kv: false
-
-
-##################
-# Temporary Vars #
-##################
-# NOTE(SamYaple): These vars are set here to they are defined before use. They
-# should be removed after a refactor has properly seperated all the checks into
-# the appropriate roles.
-
-#journal_collocation: False
-#raw_multi_journal: False
-#osd_directory: False
-#bluestore: False
-#dmcrypt_journal_collocation: False
-#dmcrypt_dedicated_journal: False
-#raw_journal_devices: []
-#devices: []
-
-#osd_auto_discovery: False
-
-# Confiure the type of NFS gatway access. At least one must be enabled for an
-# NFS role to be useful
-#
-# Set this to true to enable File access via NFS. Requires an MDS role.
-#nfs_file_gw: true
-# Set this to true to enable Object access via NFS. Requires an RGW role.
-#nfs_obj_gw: false
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+#cluster: ceph # cluster name
+
+###########
+# INSTALL #
+###########
+
+#mon_group_name: mons
+#osd_group_name: osds
+#rgw_group_name: rgws
+#mds_group_name: mdss
+#nfs_group_name: nfss
+#restapi_group_name: restapis
+#rbdmirror_group_name: rbdmirrors
+#client_group_name: clients
+#iscsi_group_name: iscsigws
+
+# If check_firewall is true, then ansible will try to determine if the
+# Ceph ports are blocked by a firewall. If the machine running ansible
+# cannot reach the Ceph ports for some other reason, you may need or
+# want to set this to False to skip those checks.
+#check_firewall: False
+
+# This variable determines if ceph packages can be updated. If False, the
+# package resources will use "state=present". If True, they will use
+# "state=latest".
+#upgrade_ceph_packages: False
+
+# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\
+
+#debian_package_dependencies:
+# - python-pycurl
+# - hdparm
+# - ntp
+
+#centos_package_dependencies:
+# - python-pycurl
+# - hdparm
+# - epel-release
+# - ntp
+# - python-setuptools
+# - libselinux-python
+
+#redhat_package_dependencies:
+# - python-pycurl
+# - hdparm
+# - ntp
+# - python-setuptools
+
+# Enable the ntp service by default to avoid clock skew on
+# ceph nodes
+#ntp_service_enabled: true
+
+# The list of ceph packages needed for debian.
+# This variable should only be changed if packages are not available from a given
+# install source or architecture.
+#debian_ceph_packages:
+# - ceph
+# - ceph-common #|
+# - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
+# - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
+# - libcephfs1 #|--> they don't get update so we need to force them
+
+# Whether or not to install the ceph-test package.
+#ceph_test: False
+
+## Configure package origin
+#
+#ceph_origin: 'upstream' # or 'distro' or 'local'
+# 'distro' means that no separate repo file will be added
+# you will get whatever version of Ceph is included in your Linux distro.
+# 'local' means that the ceph binaries will be copied over from the local machine
+
+# LOCAL CEPH INSTALLATION (ceph_origin==local)
+#
+# Path to DESTDIR of the ceph install
+#ceph_installation_dir: "/path/to/ceph_installation/"
+# Whether or not to use installer script rundep_installer.sh
+# This script takes in rundep and installs the packages line by line onto the machine
+# If this is set to false then it is assumed that the machine ceph is being copied onto will already have
+# all runtime dependencies installed
+#use_installer: false
+# Root directory for ceph-ansible
+#ansible_dir: "/path/to/ceph-ansible"
+
+#ceph_use_distro_backports: false # DEBIAN ONLY
+
+# STABLE
+########
+
+# COMMUNITY VERSION
+#ceph_stable: false # use ceph stable branch
+#ceph_stable_key: https://download.ceph.com/keys/release.asc
+#ceph_stable_release: jewel # ceph stable release
+#ceph_stable_repo: "http://download.ceph.com/debian-{{ ceph_stable_release }}"
+
+######################################
+# Releases name to number dictionary #
+######################################
+#ceph_release_num:
+# dumpling: 0.67
+# emperor: 0.72
+# firefly: 0.80
+# giant: 0.87
+# hammer: 0.94
+# infernalis: 9
+# jewel: 10
+# kraken: 11
+
+# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
+# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
+# for more info read: https://github.com/ceph/ceph-ansible/issues/305
+#ceph_stable_distro_source:
+
+# This option is needed for _both_ stable and dev version, so please always fill the right version
+# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/
+#ceph_stable_redhat_distro: el7
+
+# ENTERPRISE VERSION RED HAT STORAGE (from 1.3)
+# This version is only supported on RHEL >= 7.1
+# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel
+# packages natively. The RHEL 7.1 kernel packages are more stable and secure than
+# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL
+# 7.1 or later if you want to use the kernel RBD client.
+#
+# The CephFS kernel client is undergoing rapid development upstream, and we do
+# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this
+# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS
+# on RHEL 7.
+#
+#
+# Backward compatibility of variable names
+# Commit 492518a2 changed variable names of rhcs installations
+# to not break backward compatiblity we re-declare these variables
+# with the content of the new variable
+#ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}"
+# This will affect how/what repositories are enabled depending on the desired
+# version. The previous version was 1.3. The current version is 2.
+#ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}"
+#ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com
+#ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com
+#ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}"
+#ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default(/tmp/rh-storage-mount) }}"
+#ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default(/tmp/rh-storage-repo) }}" # where to copy iso's content
+
+
+# UBUNTU CLOUD ARCHIVE
+# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive
+# usually has newer Ceph releases than the normal distro repository.
+#
+#ceph_stable_uca: false
+#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
+#ceph_stable_openstack_release_uca: liberty
+#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}"
+
+# DEV
+# ###
+
+#ceph_dev: false # use ceph development branch
+#ceph_dev_key: https://download.ceph.com/keys/autobuild.asc
+#ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack
+
+# supported distros are centos6, centos7, fc17, fc18, fc19, fc20, fedora17, fedora18,
+# fedora19, fedora20, opensuse12, sles0. (see http://gitbuilder.ceph.com/).
+# For rhel, please pay attention to the versions: 'rhel6 3' or 'rhel 4', the fullname is _very_ important.
+#ceph_dev_redhat_distro: centos7
+
+# CUSTOM
+# ###
+
+# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be
+# a URL to the .repo file to be installed on the targets. For deb,
+# ceph_custom_repo should be the URL to the repo base.
+#ceph_custom: false # use custom ceph repository
+#ceph_custom_repo: https://server.domain.com/ceph-custom-repo
+
+
+######################
+# CEPH CONFIGURATION #
+######################
+
+## Ceph options
+#
+# Each cluster requires a unique, consistent filesystem ID. By
+# default, the playbook generates one for you and stores it in a file
+# in `fetch_directory`. If you want to customize how the fsid is
+# generated, you may find it useful to disable fsid generation to
+# avoid cluttering up your ansible repo. If you set `generate_fsid` to
+# false, you *must* generate `fsid` in another way.
+#fsid: "{{ cluster_uuid.stdout }}"
+#generate_fsid: true
+
+#cephx: true
+#max_open_files: 131072
+
+## Client options
+#
+#rbd_cache: "true"
+#rbd_cache_writethrough_until_flush: "true"
+#rbd_concurrent_management_ops: 20
+
+#rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions
+
+# Permissions for the rbd_client_log_path and
+# rbd_client_admin_socket_path. Depending on your use case for Ceph
+# you may want to change these values. The default, which is used if
+# any of the variables are unset or set to a false value (like `null`
+# or `false`) is to automatically determine what is appropriate for
+# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770
+# for infernalis releases, and root:root and 1777 for pre-infernalis
+# releases.
+#
+# For other use cases, including running Ceph with OpenStack, you'll
+# want to set these differently:
+#
+# For OpenStack on RHEL, you'll want:
+# rbd_client_directory_owner: "qemu"
+# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt)
+# rbd_client_directory_mode: "0755"
+#
+# For OpenStack on Ubuntu or Debian, set:
+# rbd_client_directory_owner: "libvirt-qemu"
+# rbd_client_directory_group: "kvm"
+# rbd_client_directory_mode: "0755"
+#
+# If you set rbd_client_directory_mode, you must use a string (e.g.,
+# 'rbd_client_directory_mode: "0755"', *not*
+# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode
+# must be in octal or symbolic form
+#rbd_client_directory_owner: null
+#rbd_client_directory_group: null
+#rbd_client_directory_mode: null
+
+#rbd_client_log_path: /var/log/ceph
+#rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor
+#rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor
+
+## Monitor options
+#
+# You must define either monitor_interface or monitor_address. Preference
+# will go to monitor_interface if both are defined.
+#monitor_interface: interface
+#monitor_address: 0.0.0.0
+#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf
+
+## OSD options
+#
+#journal_size: 5120 # OSD journal size in MB
+#public_network: 0.0.0.0/0
+#cluster_network: "{{ public_network }}"
+#osd_mkfs_type: xfs
+#osd_mkfs_options_xfs: -f -i size=2048
+#osd_mount_options_xfs: noatime,largeio,inode64,swalloc
+#osd_objectstore: filestore
+
+# xattrs. by default, 'filestore xattr use omap' is set to 'true' if
+# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can
+# be set to 'true' or 'false' to explicitly override those
+# defaults. Leave it 'null' to use the default for your chosen mkfs
+# type.
+#filestore_xattr_use_omap: null
+
+## MDS options
+#
+#mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf
+#mds_allow_multimds: false
+#mds_max_mds: 3
+
+## Rados Gateway options
+#
+#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls
+#radosgw_civetweb_port: 8080 # on Infernalis we get: "set_ports_option: cannot bind to 80: 13 (Permission denied)"
+#radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}"
+#radosgw_civetweb_num_threads: 50
+#radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/
+#radosgw_keystone_url: # url:admin_port ie: http://192.168.0.1:35357
+#radosgw_keystone_admin_token: password
+#radosgw_keystone_accepted_roles: Member, _member_, admin
+#radosgw_keystone_token_cache_size: 10000
+#radosgw_keystone_revocation_internal: 900
+#radosgw_s3_auth_use_keystone: "true"
+#radosgw_nss_db_path: /var/lib/ceph/radosgw/ceph-radosgw.{{ ansible_hostname }}/nss
+# Rados Gateway options
+#redhat_distro_ceph_extra: centos6.4 # supported distros are centos6.3, centos6.4, centos6, fedora18, fedora19, opensuse12.2, rhel6.3, rhel6.4, rhel6.5, rhel6, sles11sp2
+#email_address: foo@bar.com
+
+## REST API options
+#
+#restapi_interface: "{{ monitor_interface }}"
+#restapi_address: "{{ monitor_address }}"
+#restapi_port: 5000
+
+## Testing mode
+# enable this mode _only_ when you have a single node
+# if you don't want it keep the option commented
+#common_single_host_mode: true
+
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ceph configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+#
+#ceph_conf_overrides: {}
+
+
+#############
+# OS TUNING #
+#############
+
+#disable_transparent_hugepage: true
+#os_tuning_params:
+# - { name: kernel.pid_max, value: 4194303 }
+# - { name: fs.file-max, value: 26234859 }
+# - { name: vm.zone_reclaim_mode, value: 0 }
+# - { name: vm.vfs_cache_pressure, value: 50 }
+# - { name: vm.swappiness, value: 10 }
+# - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" }
+
+
+##########
+# DOCKER #
+##########
+
+#docker: false
+
+# Do not comment the following variables mon_containerized_deployment_* here. These variables are being used
+# by ceph.conf.j2 template. so it should always be defined
+#mon_containerized_deployment_with_kv: false
+#mon_containerized_deployment: false
+#mon_containerized_default_ceph_conf_with_kv: false
+
+
+##################
+# Temporary Vars #
+##################
+# NOTE(SamYaple): These vars are set here to they are defined before use. They
+# should be removed after a refactor has properly seperated all the checks into
+# the appropriate roles.
+
+#journal_collocation: False
+#raw_multi_journal: False
+#osd_directory: False
+#bluestore: False
+#dmcrypt_journal_collocation: False
+#dmcrypt_dedicated_journal: False
+#raw_journal_devices: []
+#devices: []
+
+#osd_auto_discovery: False
+
+# Confiure the type of NFS gatway access. At least one must be enabled for an
+# NFS role to be useful
+#
+# Set this to true to enable File access via NFS. Requires an MDS role.
+#nfs_file_gw: true
+# Set this to true to enable Object access via NFS. Requires an RGW role.
+#nfs_obj_gw: false
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-
-#fetch_directory: fetch/
-#cluster: ceph
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+#fetch_directory: fetch/
+#cluster: ceph
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-#user_config: false
-#pools:
-# - { name: test, pgs: "{{ pool_default_pg_num }}" }
-# - { name: test2, pgs: "{{ pool_default_pg_num }}" }
-
-#keys:
-# - { name: client.test, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test'" }
-# - { name: client.test2, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test2'" }
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+#user_config: false
+#pools:
+# - { name: test, pgs: "{{ pool_default_pg_num }}" }
+# - { name: test2, pgs: "{{ pool_default_pg_num }}" }
+
+#keys:
+# - { name: client.test, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test'" }
+# - { name: client.test2, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=test2'" }
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-#coreos_pypy_version: 4.0.1
-#coreos_pypy_arch: linux64
-#coreos_pypy_url: https://bitbucket.org/pypy/pypy/downloads/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}}.tar.bz2
-#pypy_directory: /opt/pypy
-#pypy_binary_directory: /opt/bin
-#pip_url: https://bootstrap.pypa.io/get-pip.py
-#local_temp_directory: /tmp
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+#coreos_pypy_version: 4.0.1
+#coreos_pypy_arch: linux64
+#coreos_pypy_url: https://bitbucket.org/pypy/pypy/downloads/pypy-{{coreos_pypy_version}}-{{coreos_pypy_arch}}.tar.bz2
+#pypy_directory: /opt/pypy
+#pypy_binary_directory: /opt/bin
+#pip_url: https://bootstrap.pypa.io/get-pip.py
+#local_temp_directory: /tmp
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-# Even though MDS nodes should not have the admin key
-# at their disposal, some people might want to have it
-# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
-# will copy the admin key to the /etc/ceph/ directory
-#copy_admin_key: false
-
-#cephx: true
-
-
-##########
-# DOCKER #
-##########
-
-#mds_containerized_deployment: false
-#mds_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_mds_docker_username: ceph
-#ceph_mds_docker_imagename: daemon
-#ceph_mds_docker_image_tag: latest
-#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+# Even though MDS nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on MDS nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+#cephx: true
+
+
+##########
+# DOCKER #
+##########
+
+#mds_containerized_deployment: false
+#mds_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_mds_docker_username: ceph
+#ceph_mds_docker_imagename: daemon
+#ceph_mds_docker_image_tag: latest
+#ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-#mon_group_name: mons
-
-# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
-#fsid: "{{ cluster_uuid.stdout }}"
-#monitor_secret: "{{ monitor_keyring.stdout }}"
-#cephx: true
-
-# CephFS
-#pool_default_pg_num: 128
-#cephfs_data: cephfs_data
-#cephfs_metadata: cephfs_metadata
-#cephfs: cephfs
-
-# Secure your cluster
-# This will set the following flags on all the pools:
-# * nosizechange
-# * nopgchange
-# * nodelete
-
-#secure_cluster: false
-#secure_cluster_flags:
-# - nopgchange
-# - nodelete
-# - nosizechange
-
-# Enable the Calamari-backed REST API on a Monitor
-#calamari: false
-
-#############
-# OPENSTACK #
-#############
-
-#openstack_config: false
-#openstack_glance_pool:
-# name: images
-# pg_num: "{{ pool_default_pg_num }}"
-#openstack_cinder_pool:
-# name: volumes
-# pg_num: "{{ pool_default_pg_num }}"
-#openstack_nova_pool:
-# name: vms
-# pg_num: "{{ pool_default_pg_num }}"
-#openstack_cinder_backup_pool:
-# name: backups
-# pg_num: "{{ pool_default_pg_num }}"
-
-#openstack_pools:
-# - "{{ openstack_glance_pool }}"
-# - "{{ openstack_cinder_pool }}"
-# - "{{ openstack_nova_pool }}"
-# - "{{ openstack_cinder_backup_pool }}"
-
-#openstack_keys:
-# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
-# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
-# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
-
-##########
-# DOCKER #
-##########
-
-#mon_containerized_deployment: false
-#mon_containerized_deployment_with_kv: false
-# This is currently in ceph-common defaults because it is shared with ceph-nfs
-#mon_containerized_default_ceph_conf_with_kv: false
-#ceph_mon_docker_interface: eth0
-#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
-#ceph_mon_docker_username: ceph
-#ceph_mon_docker_imagename: daemon
-#ceph_mon_docker_image_tag: latest
-#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
-#ceph_docker_on_openstack: false
-#mon_docker_privileged: false
-#mon_docker_net_host: true
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+#mon_group_name: mons
+
+# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
+#fsid: "{{ cluster_uuid.stdout }}"
+#monitor_secret: "{{ monitor_keyring.stdout }}"
+#cephx: true
+
+# CephFS
+#pool_default_pg_num: 128
+#cephfs_data: cephfs_data
+#cephfs_metadata: cephfs_metadata
+#cephfs: cephfs
+
+# Secure your cluster
+# This will set the following flags on all the pools:
+# * nosizechange
+# * nopgchange
+# * nodelete
+
+#secure_cluster: false
+#secure_cluster_flags:
+# - nopgchange
+# - nodelete
+# - nosizechange
+
+# Enable the Calamari-backed REST API on a Monitor
+#calamari: false
+
+#############
+# OPENSTACK #
+#############
+
+#openstack_config: false
+#openstack_glance_pool:
+# name: images
+# pg_num: "{{ pool_default_pg_num }}"
+#openstack_cinder_pool:
+# name: volumes
+# pg_num: "{{ pool_default_pg_num }}"
+#openstack_nova_pool:
+# name: vms
+# pg_num: "{{ pool_default_pg_num }}"
+#openstack_cinder_backup_pool:
+# name: backups
+# pg_num: "{{ pool_default_pg_num }}"
+
+#openstack_pools:
+# - "{{ openstack_glance_pool }}"
+# - "{{ openstack_cinder_pool }}"
+# - "{{ openstack_nova_pool }}"
+# - "{{ openstack_cinder_backup_pool }}"
+
+#openstack_keys:
+# - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
+# - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
+# - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
+
+##########
+# DOCKER #
+##########
+
+#mon_containerized_deployment: false
+#mon_containerized_deployment_with_kv: false
+# This is currently in ceph-common defaults because it is shared with ceph-nfs
+#mon_containerized_default_ceph_conf_with_kv: false
+#ceph_mon_docker_interface: eth0
+#ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
+#ceph_mon_docker_username: ceph
+#ceph_mon_docker_imagename: daemon
+#ceph_mon_docker_image_tag: latest
+#ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
+#ceph_docker_on_openstack: false
+#mon_docker_privileged: false
+#mon_docker_net_host: true
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-## Ceph options
-#
-#cephx: true
-
-
-#######################
-# Access type options #
-#######################
-# These are currently in ceph-common defaults because nfs_obj_gw shared with ceph-rgw
-# Enable NFS File access
-#nfs_file_gw: true
-# Enable NFS Object access
-#nfs_obj_gw: false
-
-######################
-# NFS Ganesha Config #
-######################
-#ceph_nfs_export_id: 20134
-#ceph_nfs_pseudo_path: "/cephfile"
-#ceph_nfs_protocols: "3,4"
-#ceph_nfs_access_type: "RW"
-#ceph_nfs_log_file: "/var/log/ganesha.log"
-
-####################
-# FSAL Ceph Config #
-####################
-#ceph_nfs_ceph_export_id: 20134
-#ceph_nfs_ceph_pseudo_path: "/cephobject"
-#ceph_nfs_ceph_protocols: "3,4"
-#ceph_nfs_ceph_access_type: "RW"
-
-###################
-# FSAL RGW Config #
-###################
-#ceph_nfs_rgw_export_id: 20134
-#ceph_nfs_rgw_pseudo_path: "/ceph"
-#ceph_nfs_rgw_protocols: "3,4"
-#ceph_nfs_rgw_access_type: "RW"
-#ceph_nfs_rgw_user: "cephnfs"
-# Note: keys are optional and can be generated, but not on containerized, where
-# they must be configered.
-#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
-#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
-
-
-###################
-# CONFIG OVERRIDE #
-###################
-
-# Ganesha configuration file override.
-# This allows you to specify more configuration options
-# using an INI style format.
-# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
-#
-# Example:
-# ceph_conf_overrides:
-# global:
-# foo: 1234
-# bar: 5678
-#
-#ganesha_conf_overrides: {}
-
-##########
-# DOCKER #
-##########
-
-#nfs_containerized_deployment: false
-#nfs_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_nfs_docker_username: ceph
-#ceph_nfs_docker_imagename: ganesha
-#ceph_nfs_docker_image_tag: latest
-#ceph_nfs_docker_extra_env: "GANESHA_EPOCH={{ ganesha_epoch }}" # comma separated variables
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+## Ceph options
+#
+#cephx: true
+
+
+#######################
+# Access type options #
+#######################
+# These are currently in ceph-common defaults because nfs_obj_gw shared with ceph-rgw
+# Enable NFS File access
+#nfs_file_gw: true
+# Enable NFS Object access
+#nfs_obj_gw: false
+
+######################
+# NFS Ganesha Config #
+######################
+#ceph_nfs_export_id: 20134
+#ceph_nfs_pseudo_path: "/cephfile"
+#ceph_nfs_protocols: "3,4"
+#ceph_nfs_access_type: "RW"
+#ceph_nfs_log_file: "/var/log/ganesha.log"
+
+####################
+# FSAL Ceph Config #
+####################
+#ceph_nfs_ceph_export_id: 20134
+#ceph_nfs_ceph_pseudo_path: "/cephobject"
+#ceph_nfs_ceph_protocols: "3,4"
+#ceph_nfs_ceph_access_type: "RW"
+
+###################
+# FSAL RGW Config #
+###################
+#ceph_nfs_rgw_export_id: 20134
+#ceph_nfs_rgw_pseudo_path: "/ceph"
+#ceph_nfs_rgw_protocols: "3,4"
+#ceph_nfs_rgw_access_type: "RW"
+#ceph_nfs_rgw_user: "cephnfs"
+# Note: keys are optional and can be generated, but not on containerized, where
+# they must be configered.
+#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
+#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
+
+
+###################
+# CONFIG OVERRIDE #
+###################
+
+# Ganesha configuration file override.
+# This allows you to specify more configuration options
+# using an INI style format.
+# The following sections are supported: [global], [mon], [osd], [mds], [rgw]
+#
+# Example:
+# ceph_conf_overrides:
+# global:
+# foo: 1234
+# bar: 5678
+#
+#ganesha_conf_overrides: {}
+
+##########
+# DOCKER #
+##########
+
+#nfs_containerized_deployment: false
+#nfs_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_nfs_docker_username: ceph
+#ceph_nfs_docker_imagename: ganesha
+#ceph_nfs_docker_image_tag: latest
+#ceph_nfs_docker_extra_env: "GANESHA_EPOCH={{ ganesha_epoch }}" # comma separated variables
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-#dummy:
-
-# You can override default vars defined in defaults/main.yml here,
-# but I would advice to use host or group vars instead
-
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-# Even though OSD nodes should not have the admin key
-# at their disposal, some people might want to have it
-# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
-# will copy the admin key to the /etc/ceph/ directory
-#copy_admin_key: false
-
-
-####################
-# OSD CRUSH LOCATION
-####################
-
-# /!\
-#
-# BE EXTREMELY CAREFUL WITH THIS OPTION
-# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
-#
-# /!\
-#
-# It is probably best to keep this option to 'false' as the default
-# suggests it. This option should only be used while doing some complex
-# CRUSH map. It allows you to force a specific location for a set of OSDs.
-#
-# The following options will build a ceph.conf with OSD sections
-# Example:
-# [osd.X]
-# osd crush location = "root=location"
-#
-# This works with your inventory file
-# To match the following 'osd_crush_location' option the inventory must look like:
-#
-# [osds]
-# osd0 ceph_crush_root=foo ceph_crush_rack=bar
-
-#crush_location: false
-#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
-
-
-##############
-# CEPH OPTIONS
-##############
-
-# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
-#fsid: "{{ cluster_uuid.stdout }}"
-#cephx: true
-
-# Devices to be used as OSDs
-# You can pre-provision disks that are not present yet.
-# Ansible will just skip them. Newly added disk will be
-# automatically configured during the next run.
-#
-
-
-# Declare devices to be used as OSDs
-# All scenario(except 3rd) inherit from the following device declaration
-
-#devices:
-# - /dev/sdb
-# - /dev/sdc
-# - /dev/sdd
-# - /dev/sde
-
-#devices: []
-
-
-#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
-# You can use this option with First and Forth and Fifth OSDS scenario.
-# Device discovery is based on the Ansible fact 'ansible_devices'
-# which reports all the devices on a system. If chosen all the disks
-# found will be passed to ceph-disk. You should not be worried on using
-# this option since ceph-disk has a built-in check which looks for empty devices.
-# Thus devices with existing partition tables will not be used.
-#
-#osd_auto_discovery: false
-
-
-# !! WARNING !!
-# #
-# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
-# #
-# # !! WARNING !!
-#
-
-# I. First scenario: journal and osd_data on the same device
-# Use 'true' to enable this scenario
-# This will collocate both journal and data on the same disk
-# creating a partition at the beginning of the device
-# List devices under 'devices' variable above or choose 'osd_auto_discovery'
-
-
-#journal_collocation: false
-
-
-# II. Second scenario: N journal devices for N OSDs
-# Use 'true' for 'raw_multi_journal' to enable this scenario
-# List devices under 'devices' variable above and
-# write journal devices for those under 'raw_journal_devices'
-# In the following example:
-# * sdb and sdc will get sdf as a journal
-# * sdd and sde will get sdg as a journal
-
-# While starting you have 2 options:
-# 1. Pre-allocate all the devices
-# 2. Progressively add new devices
-#raw_multi_journal: false
-#raw_journal_devices:
-# - /dev/sdf
-# - /dev/sdf
-# - /dev/sdg
-# - /dev/sdg
-#raw_journal_devices: []
-
-
-# III. Use directory instead of disk for OSDs
-# Use 'true' to enable this scenario
-
-#osd_directory: false
-#osd_directories:
-# - /var/lib/ceph/osd/mydir1
-# - /var/lib/ceph/osd/mydir2
-
-
-# IV. This will partition disks for BlueStore
-# Use 'true' to enable this scenario
-#bluestore: false
-
-
-# V. Encrypt osd data and/or journal devices with dm-crypt.
-# Keys are stored into the monitors k/v store
-# Use 'true' to enable this scenario
-# Both journal and data are stored on the same dm-crypt encrypted device
-#dmcrypt_journal_collocation: false
-
-
-# VI. Encrypt osd data and/or journal devices with dm-crypt.
-# Keys are stored into the monitors k/v store
-# Use 'true' to enable this scenario
-# Journal and osd data are separated, each with their own dm-crypt device
-# You must use raw_journal_devices and set your journal devices
-#dmcrypt_dedicated_journal: false
-
-
-##########
-# DOCKER #
-##########
-
-#osd_containerized_deployment: false
-#osd_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#kv_port: 4001
-#ceph_osd_docker_prepare_env: "OSD_FORCE_ZAP=1"
-#ceph_osd_docker_username: ceph
-#ceph_osd_docker_imagename: daemon
-#ceph_osd_docker_image_tag: latest
-#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE" # comma separated variables
-#ceph_osd_docker_devices:
-# - /dev/sdb
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+#dummy:
+
+# You can override default vars defined in defaults/main.yml here,
+# but I would advice to use host or group vars instead
+
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+# Even though OSD nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+
+####################
+# OSD CRUSH LOCATION
+####################
+
+# /!\
+#
+# BE EXTREMELY CAREFUL WITH THIS OPTION
+# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
+#
+# /!\
+#
+# It is probably best to keep this option to 'false' as the default
+# suggests it. This option should only be used while doing some complex
+# CRUSH map. It allows you to force a specific location for a set of OSDs.
+#
+# The following options will build a ceph.conf with OSD sections
+# Example:
+# [osd.X]
+# osd crush location = "root=location"
+#
+# This works with your inventory file
+# To match the following 'osd_crush_location' option the inventory must look like:
+#
+# [osds]
+# osd0 ceph_crush_root=foo ceph_crush_rack=bar
+
+#crush_location: false
+#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
+
+
+##############
+# CEPH OPTIONS
+##############
+
+# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
+#fsid: "{{ cluster_uuid.stdout }}"
+#cephx: true
+
+# Devices to be used as OSDs
+# You can pre-provision disks that are not present yet.
+# Ansible will just skip them. Newly added disk will be
+# automatically configured during the next run.
+#
+
+
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
+
+#devices:
+# - /dev/sdb
+# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#devices: []
+
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# You can use this option with First and Forth and Fifth OSDS scenario.
+# Device discovery is based on the Ansible fact 'ansible_devices'
+# which reports all the devices on a system. If chosen all the disks
+# found will be passed to ceph-disk. You should not be worried on using
+# this option since ceph-disk has a built-in check which looks for empty devices.
+# Thus devices with existing partition tables will not be used.
+#
+#osd_auto_discovery: false
+
+
+# !! WARNING !!
+# #
+# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
+# #
+# # !! WARNING !!
+#
+
+# I. First scenario: journal and osd_data on the same device
+# Use 'true' to enable this scenario
+# This will collocate both journal and data on the same disk
+# creating a partition at the beginning of the device
+# List devices under 'devices' variable above or choose 'osd_auto_discovery'
+
+
+#journal_collocation: false
+
+
+# II. Second scenario: N journal devices for N OSDs
+# Use 'true' for 'raw_multi_journal' to enable this scenario
+# List devices under 'devices' variable above and
+# write journal devices for those under 'raw_journal_devices'
+# In the following example:
+# * sdb and sdc will get sdf as a journal
+# * sdd and sde will get sdg as a journal
+
+# While starting you have 2 options:
+# 1. Pre-allocate all the devices
+# 2. Progressively add new devices
+#raw_multi_journal: false
+#raw_journal_devices:
+# - /dev/sdf
+# - /dev/sdf
+# - /dev/sdg
+# - /dev/sdg
+#raw_journal_devices: []
+
+
+# III. Use directory instead of disk for OSDs
+# Use 'true' to enable this scenario
+
+#osd_directory: false
+#osd_directories:
+# - /var/lib/ceph/osd/mydir1
+# - /var/lib/ceph/osd/mydir2
+
+
+# IV. This will partition disks for BlueStore
+# Use 'true' to enable this scenario
+#bluestore: false
+
+
+# V. Encrypt osd data and/or journal devices with dm-crypt.
+# Keys are stored into the monitors k/v store
+# Use 'true' to enable this scenario
+# Both journal and data are stored on the same dm-crypt encrypted device
+#dmcrypt_journal_collocation: false
+
+
+# VI. Encrypt osd data and/or journal devices with dm-crypt.
+# Keys are stored into the monitors k/v store
+# Use 'true' to enable this scenario
+# Journal and osd data are separated, each with their own dm-crypt device
+# You must use raw_journal_devices and set your journal devices
+#dmcrypt_dedicated_journal: false
+
+
+##########
+# DOCKER #
+##########
+
+#osd_containerized_deployment: false
+#osd_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#kv_port: 4001
+#ceph_osd_docker_prepare_env: "OSD_FORCE_ZAP=1"
+#ceph_osd_docker_username: ceph
+#ceph_osd_docker_imagename: daemon
+#ceph_osd_docker_image_tag: latest
+#ceph_osd_docker_extra_env: "CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE" # comma separated variables
+#ceph_osd_docker_devices:
+# - /dev/sdb
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-#########
-# SETUP #
-#########
-
-
-# NOTE (leseb): the rbd-mirror daemon needs a user to start
-# because it has to authenticate with the local cluster.
-# By default, using the admin user is fine, so you should not
-# need to change 'ceph_rbd_mirror_local_user' unless you have
-# a dedicated key available in /etc/ceph/.
-# Generally Ansible will use the admin key and put it
-# under /etc/ceph/. The same goes for 'ceph_rbd_mirror_remote_user'
-# there should not be any reason to change it.
-#ceph_rbd_mirror_local_user: admin
-#ceph_rbd_mirror_remote_user: admin
-
-# NOTE (leseb): the following variable needs the name of the remote cluster.
-# The name of this cluster must be different than your local cluster simply
-# because we need to have both keys and ceph.conf inside /etc/ceph.
-# Thus if cluster names are identical we can not have them under /etc/ceph
-#ceph_rbd_mirror_remote_cluster: ""
-
-
-#################
-# CONFIGURATION #
-#################
-
-#ceph_rbd_mirror_configure: false
-#ceph_rbd_mirror_pool: ""
-
-
-##########
-# DOCKER #
-##########
-
-#rbd_mirror_containerized_deployment: false
-#rbd_mirror_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_rbd_mirror_docker_username: ceph
-#ceph_rbd_mirror_docker_imagename: daemon
-#ceph_rbd_mirror_docker_image_tag: latest
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+#########
+# SETUP #
+#########
+
+
+# NOTE (leseb): the rbd-mirror daemon needs a user to start
+# because it has to authenticate with the local cluster.
+# By default, using the admin user is fine, so you should not
+# need to change 'ceph_rbd_mirror_local_user' unless you have
+# a dedicated key available in /etc/ceph/.
+# Generally Ansible will use the admin key and put it
+# under /etc/ceph/. The same goes for 'ceph_rbd_mirror_remote_user'
+# there should not be any reason to change it.
+#ceph_rbd_mirror_local_user: admin
+#ceph_rbd_mirror_remote_user: admin
+
+# NOTE (leseb): the following variable needs the name of the remote cluster.
+# The name of this cluster must be different than your local cluster simply
+# because we need to have both keys and ceph.conf inside /etc/ceph.
+# Thus if cluster names are identical we can not have them under /etc/ceph
+#ceph_rbd_mirror_remote_cluster: ""
+
+
+#################
+# CONFIGURATION #
+#################
+
+#ceph_rbd_mirror_configure: false
+#ceph_rbd_mirror_pool: ""
+
+
+##########
+# DOCKER #
+##########
+
+#rbd_mirror_containerized_deployment: false
+#rbd_mirror_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rbd_mirror_docker_username: ceph
+#ceph_rbd_mirror_docker_imagename: daemon
+#ceph_rbd_mirror_docker_image_tag: latest
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-##########
-# DOCKER #
-##########
-
-#restapi_containerized_deployment: false
-#ceph_restapi_docker_interface: eth0
-#ceph_restapi_port: 5000
-#ceph_restapi_docker_username: ceph
-#ceph_restapi_docker_imagename: daemon
-#ceph_restapi_docker_image_tag: latest
-#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+##########
+# DOCKER #
+##########
+
+#restapi_containerized_deployment: false
+#ceph_restapi_docker_interface: eth0
+#ceph_restapi_port: 5000
+#ceph_restapi_docker_username: ceph
+#ceph_restapi_docker_imagename: daemon
+#ceph_restapi_docker_image_tag: latest
+#ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
+++ /dev/null
----
-# Variables here are applicable to all host groups NOT roles
-
-# This sample file generated by generate_group_vars_sample.sh
-
-# Dummy variable to avoid error because ansible does not recognize the
-# file as a good configuration file when no variable in it.
-dummy:
-
-# You can override vars by using host or group vars
-
-###########
-# GENERAL #
-###########
-
-#fetch_directory: fetch/
-
-# Even though RGW nodes should not have the admin key
-# at their disposal, some people might want to have it
-# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
-# will copy the admin key to the /etc/ceph/ directory
-#copy_admin_key: false
-
-## Ceph options
-#
-#cephx: true
-
-# Used for the sudo exception while starting the radosgw process
-# a new entry /etc/sudoers.d/ceph will be created
-# allowing root to not require tty
-#radosgw_user: root
-
-##########
-# DOCKER #
-##########
-
-#rgw_containerized_deployment: false
-#rgw_containerized_deployment_with_kv: false
-#kv_type: etcd
-#kv_endpoint: 127.0.0.1
-#ceph_rgw_civetweb_port: 80
-#ceph_rgw_docker_username: ceph
-#ceph_rgw_docker_imagename: daemon
-#ceph_rgw_docker_image_tag: latest
-#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
-#ceph_docker_on_openstack: false
-#ceph_config_keys: [] # DON'T TOUCH ME
-
--- /dev/null
+---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
+# You can override vars by using host or group vars
+
+###########
+# GENERAL #
+###########
+
+#fetch_directory: fetch/
+
+# Even though RGW nodes should not have the admin key
+# at their disposal, some people might want to have it
+# distributed on RGW nodes. Setting 'copy_admin_key' to 'true'
+# will copy the admin key to the /etc/ceph/ directory
+#copy_admin_key: false
+
+## Ceph options
+#
+#cephx: true
+
+# Used for the sudo exception while starting the radosgw process
+# a new entry /etc/sudoers.d/ceph will be created
+# allowing root to not require tty
+#radosgw_user: root
+
+##########
+# DOCKER #
+##########
+
+#rgw_containerized_deployment: false
+#rgw_containerized_deployment_with_kv: false
+#kv_type: etcd
+#kv_endpoint: 127.0.0.1
+#ceph_rgw_civetweb_port: 80
+#ceph_rgw_docker_username: ceph
+#ceph_rgw_docker_imagename: daemon
+#ceph_rgw_docker_image_tag: latest
+#ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
+#ceph_docker_on_openstack: false
+#ceph_config_keys: [] # DON'T TOUCH ME
+
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mds/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mds_group_name }}
+ - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false
- name: stop ceph.target with systemd
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rgw/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rgw_group_name }}
+ - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false
- name: stop ceph.target with systemd
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rbdmirror_group_name }}
+ - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false
- name: stop ceph.target with systemd
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-nfs/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ nfs_group_name }}
+ - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false
- name: stop ceph.target with systemd
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-osd/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ osd_group_name }}
+ - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false
- name: check for a device list
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mon/defaults/main.yml
- include_vars: roles/ceph-restapi/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mon_group_name }}
+ - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ restapi_group_name }}
+ - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false
- name: stop ceph.target with systemd
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mds_group_name }}
+ - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ rgw_group_name }}
+ - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ rbdmirror_group_name }}
+ - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ nfs_group_name }}
+ - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ osd_group_name }}
+ - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ mon_group_name }}
+ - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ restapi_group_name }}
+ - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false
- name: purge fetch directory for localhost
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mds/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mds_group_name }}
+ - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false
- name: disable ceph mds service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rgw/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rgw_group_name }}
+ - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false
- name: disable ceph rgw service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rbdmirror_group_name }}
+ - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false
- name: disable ceph rbd-mirror service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-nfs/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ nfs_group_name }}
+ - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false
- name: disable ceph nfs service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-osd/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ osd_group_name }}
+ - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false
- name: disable ceph osd service
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mon/defaults/main.yml
- include_vars: roles/ceph-restapi/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mon_group_name }}
+ - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ restapi_group_name }}
+ - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false
- name: disable ceph mon service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mds_group_name }}
+ - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ rgw_group_name }}
+ - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ rbdmirror_group_name }}
+ - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ nfs_group_name }}
+ - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ osd_group_name }}
+ - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ mon_group_name }}
+ - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ restapi_group_name }}
+ - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false
- name: purge fetch directory for localhost
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
- name: exit playbook, if user did not mean to shrink cluster
fail:
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
- name: exit playbook, if user did not mean to shrink cluster
fail:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mon/defaults/main.yml
- include_vars: roles/ceph-restapi/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mon_group_name }}
+ - include_vars: group_vars/{{ mon_group_name }}.yml
failed_when: false
- - include_vars: group_vars/{{ restapi_group_name }}
+ - include_vars: group_vars/{{ restapi_group_name }}.yml
failed_when: false
- name: select a running monitor
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-osd/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ osd_group_name }}
+ - include_vars: group_vars/{{ osd_group_name }}.yml
failed_when: false
- name: collect osd ids
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-mds/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ mds_group_name }}
+ - include_vars: group_vars/{{ mds_group_name }}.yml
failed_when: false
- name: stop ceph mds service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rgw/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rgw_group_name }}
+ - include_vars: group_vars/{{ rgw_group_name }}.yml
failed_when: false
- name: stop ceph rgw service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-rbd-mirror/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ rbdmirror_group_name }}
+ - include_vars: group_vars/{{ rbdmirror_group_name }}.yml
failed_when: false
- name: stop ceph rbd mirror service
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-nfs/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
failed_when: false
- - include_vars: group_vars/{{ nfs_group_name }}
+ - include_vars: group_vars/{{ nfs_group_name }}.yml
failed_when: false
- name: stop ceph nfs service
become: True
vars_files:
- roles/ceph-common/defaults/main.yml
- - group_vars/all
+ - group_vars/all.yml
roles:
- ceph-fetch-keys
tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- - include_vars: group_vars/all
+ - include_vars: group_vars/all.yml
- name: get the name of the existing ceph cluster
shell: "ls /etc/ceph/*.conf"