# file as a good configuration file when no variable in it.
dummy:
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+#dummy:
+
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
+
###########
# GENERAL #
###########
# will copy the admin key to the /etc/ceph/ directory
#copy_admin_key: false
+
####################
# OSD CRUSH LOCATION
####################
+# /!\
+#
+# BE EXTREMELY CAREFUL WITH THIS OPTION
+# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
+#
+# /!\
+#
+# It is probably best to keep this option to 'false' as the default
+# suggests it. This option should only be used while doing some complex
+# CRUSH map. It allows you to force a specific location for a set of OSDs.
+#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
#crush_location: false
#osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
+
##############
# CEPH OPTIONS
##############
#
-# !! WARNING !!
-#
-# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
-#
-# !! WARNING !!
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
-# Declare devices
-# All the scenarii inherit from the following device declaration
-#
#devices:
# - /dev/sdb
# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
-# This mode prevents you from filling out the 'devices' variable above.
#
#osd_auto_discovery: false
+
+# !! WARNING !!
+# #
+# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
+# #
+# # !! WARNING !!
+#
+
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
+# List devices under 'devices' variable above or choose 'osd_auto_discovery'
+
#journal_collocation: false
-# II. N journal devices for N OSDs
-# Use 'true' to enable this scenario
-#
+# II. Second scenario: N journal devices for N OSDs
+# Use 'true' for 'raw_multi_journal' to enable this scenario
+# List devices under 'devices' variable above and
+# write journal devices for those under 'raw_journal_devices'
# In the following example:
-# * sdd and sde will get sdb as a journal
-# * sdf and sdg will get sdc as a journal
+# * sdb and sdc will get sdf as a journal
+# * sdd and sde will get sdg as a journal
+
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
-
#raw_multi_journal: false
#raw_journal_devices:
-# - /dev/sdb
-# - /dev/sdb
-# - /dev/sdc
-# - /dev/sdc
+# - /dev/sdf
+# - /dev/sdf
+# - /dev/sdg
+# - /dev/sdg
# III. Use directory instead of disk for OSDs
---
+# Variables here are applicable to all host groups NOT roles
+
+# This sample file generated by generate_group_vars_sample.sh
+
+# Dummy variable to avoid error because ansible does not recognize the
+# file as a good configuration file when no variable in it.
+dummy:
+
# You can override default vars defined in defaults/main.yml here,
# but I would advice to use host or group vars instead
+
###########
# GENERAL #
###########
# will copy the admin key to the /etc/ceph/ directory
copy_admin_key: false
+
####################
# OSD CRUSH LOCATION
####################
+# /!\
+#
+# BE EXTREMELY CAREFUL WITH THIS OPTION
+# DO NOT USE IT UNLESS YOU KNOW WHAT YOU ARE DOING
+#
+# /!\
+#
+# It is probably best to keep this option to 'false' as the default
+# suggests it. This option should only be used while doing some complex
+# CRUSH map. It allows you to force a specific location for a set of OSDs.
+#
# The following options will build a ceph.conf with OSD sections
# Example:
# [osd.X]
crush_location: false
osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
+
##############
# CEPH OPTIONS
##############
#
-# !! WARNING !!
-#
-# /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
-#
-# !! WARNING !!
+# Declare devices to be used as OSDs
+# All scenario(except 3rd) inherit from the following device declaration
-# Declare devices
-# All the scenarii inherit from the following device declaration
-#
#devices:
# - /dev/sdb
# - /dev/sdc
+# - /dev/sdd
+# - /dev/sde
+
+#'osd_auto_discovery' mode prevents you from filling out the 'devices' variable above.
+# You can use this option with First and Forth and Fifth OSDS scenario.
# Device discovery is based on the Ansible fact 'ansible_devices'
# which reports all the devices on a system. If chosen all the disks
# found will be passed to ceph-disk. You should not be worried on using
# this option since ceph-disk has a built-in check which looks for empty devices.
# Thus devices with existing partition tables will not be used.
-# This mode prevents you from filling out the 'devices' variable above.
#
osd_auto_discovery: false
+
+# !! WARNING !!
+# #
+# # /!\ ENABLE ONLY ONE SCENARIO AT A TIME /!\
+# #
+# # !! WARNING !!
+#
+
# I. First scenario: journal and osd_data on the same device
# Use 'true' to enable this scenario
# This will collocate both journal and data on the same disk
# creating a partition at the beginning of the device
+# List devices under 'devices' variable above or choose 'osd_auto_discovery'
+
journal_collocation: false
-# II. N journal devices for N OSDs
-# Use 'true' to enable this scenario
-#
+# II. Second scenario: N journal devices for N OSDs
+# Use 'true' for 'raw_multi_journal' to enable this scenario
+# List devices under 'devices' variable above and
+# write journal devices for those under 'raw_journal_devices'
# In the following example:
-# * sdd and sde will get sdb as a journal
-# * sdf and sdg will get sdc as a journal
+# * sdb and sdc will get sdf as a journal
+# * sdd and sde will get sdg as a journal
+
# While starting you have 2 options:
# 1. Pre-allocate all the devices
# 2. Progressively add new devices
-
raw_multi_journal: false
#raw_journal_devices:
-# - /dev/sdb
-# - /dev/sdb
-# - /dev/sdc
-# - /dev/sdc
+# - /dev/sdf
+# - /dev/sdf
+# - /dev/sdg
+# - /dev/sdg
# III. Use directory instead of disk for OSDs