###########
# GENERAL #
###########
-
#mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#monitor_secret: "{{ monitor_keyring.stdout }}"
#admin_secret: 'admin_secret'
-# CephFS
-#cephfs_data: cephfs_data
-#cephfs_metadata: cephfs_metadata
-#cephfs: cephfs
-
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
#calamari_debug: false
-#############
-# crush rules
-#############
+##########
+# CEPHFS #
+##########
+#cephfs: cephfs # name of the ceph filesystem
+#cephfs_data: cephfs_data # name of the data pool for a given filesystem
+#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
+
+#cephfs_pools:
+# - { name: "{{ cephfs_data }}", pgs: "" }
+# - { name: "{{ cephfs_metadata }}", pgs: "" }
+
+
+###############
+# CRUSH RULES #
+###############
#crush_rule_config: false
#crush_rule_hdd:
#crush_rules:
# - "{{ crush_rule_hdd }}"
# - "{{ crush_rule_ssd }}"
+
+
#############
# OPENSTACK #
#############
-
#openstack_config: false
#openstack_glance_pool:
# name: images
# - { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] }
# - { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=vms, allow rwx pool=volumes, allow rwx pool=backups", mode: "0600", acls: [] }
+
##########
# DOCKER #
##########
###########
# GENERAL #
###########
-
mon_group_name: mons
# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
monitor_secret: "{{ monitor_keyring.stdout }}"
admin_secret: 'admin_secret'
-# CephFS
-cephfs_data: cephfs_data
-cephfs_metadata: cephfs_metadata
-cephfs: cephfs
-
# Secure your cluster
# This will set the following flags on all the pools:
# * nosizechange
calamari_debug: false
-#############
-# crush rules
-#############
+##########
+# CEPHFS #
+##########
+cephfs: cephfs # name of the ceph filesystem
+cephfs_data: cephfs_data # name of the data pool for a given filesystem
+cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
+
+cephfs_pools:
+ - { name: "{{ cephfs_data }}", pgs: "" }
+ - { name: "{{ cephfs_metadata }}", pgs: "" }
+
+
+###############
+# CRUSH RULES #
+###############
crush_rule_config: false
crush_rule_hdd:
crush_rules:
- "{{ crush_rule_hdd }}"
- "{{ crush_rule_ssd }}"
+
+
#############
# OPENSTACK #
#############
-
openstack_config: false
openstack_glance_pool:
name: images
- { name: client.gnocchi, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_gnocchi_pool.name }}", mode: "0600", acls: [] }
- { name: client.openstack, key: "$(ceph-authtool --gen-print-key)", mon_cap: "allow r", osd_cap: "allow class-read object_prefix rbd_children, allow rwx pool=images, allow rwx pool=vms, allow rwx pool=volumes, allow rwx pool=backups", mode: "0600", acls: [] }
+
##########
# DOCKER #
##########
- monitor_interface == 'interface'
- monitor_address == '0.0.0.0'
- monitor_address_block | length == 0
+
+- name: make sure pg num is set for cephfs pools
+ fail:
+ msg: "You must set pg num for your cephfs pools, see the cephfs_pools variable."
+ with_items: "{{ cephfs_pools }}"
+ when:
+ - inventory_hostname in groups.get(mds_group_name, [])
+ - item.pgs == ''
---
-# NOTE (leseb): in the present playbook the conditional is done on the task
-# We don't do this in main.yml because of the 'docker' variable, when set to true
-# the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
-# since those check are performed by the ceph-common role
- name: create filesystem pools
- command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool create {{ item }} {{ osd_pool_default_pg_num }}"
- with_items:
- - "{{ cephfs_data }}"
- - "{{ cephfs_metadata }}"
+ command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
changed_when: false
+ with_items:
+ - "{{ cephfs_pools }}"
- name: check if ceph filesystem already exists
command: "{{docker_exec_cmd }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
- name: create ceph filesystem
command: "{{ docker_exec_cmd}} ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}"
changed_when: false
- when: check_existing_cephfs.rc != 0
+ when:
+ - check_existing_cephfs.rc != 0
- name: allow multimds
command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} fs set {{ cephfs }} allow_multimds true --yes-i-really-mean-it"
--- /dev/null
+---
+cephfs_pools:
+ - { name: "{{ cephfs_data }}", pgs: "8" }
+ - { name: "{{ cephfs_metadata }}", pgs: "8" }
--- /dev/null
+---
+cephfs_pools:
+ - { name: "{{ cephfs_data }}", pgs: "8" }
+ - { name: "{{ cephfs_metadata }}", pgs: "8" }
--- /dev/null
+---
+cephfs_pools:
+ - { name: "{{ cephfs_data }}", pgs: "8" }
+ - { name: "{{ cephfs_metadata }}", pgs: "8" }