# Dummy variable to avoid error because ansible does not recognize the file as a good configuration file when no variable in it.
dummy:
-## Ceph options
+####################
+# OSD CRUSH LOCATION
+####################
+
+# The following options will build a ceph.conf with OSD sections
+# Example:
+# [osd.X]
+# osd crush location = "root=location"
#
+# This works with your inventory file
+# To match the following 'osd_crush_location' option the inventory must look like:
+#
+# [osds]
+# osd0 ceph_crush_root=foo ceph_crush_rack=bar
+
+crush_location: false
+osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
+
+##############
+# CEPH OPTIONS
+##############
#cephx: true
# You can override default vars defined in defaults/main.yml here,\r
# but I would advice to use host or group vars instead\r
\r
-## Ceph options\r
+####################\r
+# OSD CRUSH LOCATION\r
+####################\r
+\r
+# The following options will build a ceph.conf with OSD sections\r
+# Example:\r
+# [osd.X]\r
+# osd crush location = "root=location"\r
#\r
+# This works with your inventory file\r
+# To match the following 'osd_crush_location' option the inventory must look like:\r
+#\r
+# [osds]\r
+# osd0 ceph_crush_root=foo ceph_crush_rack=bar\r
+\r
+crush_location: false\r
+osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"\r
+\r
+##############\r
+# CEPH OPTIONS\r
+##############\r
\r
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT\r
fsid: "{{ cluster_uuid.stdout }}"\r
ignore_errors: True
changed_when: False
+- include: osd_fragment.yml
+ when: crush_location
+
- name: Start and add that the OSD service to the init sequence
service: >
name=ceph
--- /dev/null
+---
+- name: Get OSD path
+ shell: "df | grep {{ item }} | awk '{print $6}'"
+ with_items: devices
+ register: osd_path
+ ignore_errors: true
+
+- name: Get OSD id
+ command: cat {{ item.stdout }}/whoami
+ register: osd_id
+ with_items: osd_path.results
+ ignore_errors: true
+
+- name: Create a Ceph fragment and assemble directory
+ file: >
+ path={{ item }}
+ state=directory
+ owner=root
+ group=root
+ mode=0644
+ with_items:
+ - /etc/ceph/ceph.d/
+ - /etc/ceph/ceph.d/osd_fragments
+
+- name: Create the OSD fragment
+ template: >
+ src=osd.conf.j2
+ dest=/etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
+ with_items: osd_id.results
+
+- name: Copy ceph.conf for assembling
+ command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
+
+- name: Assemble OSD sections
+ assemble: >
+ src=/etc/ceph/ceph.d/osd_fragments/
+ dest=/etc/ceph/ceph.d/osd.conf
+ owner=root
+ group=root
+ mode=0644
+
+- name: Assemble Ceph conf and OSD fragments
+ assemble: >
+ src=/etc/ceph/ceph.d/
+ dest=/etc/ceph/ceph.conf
+ owner=root
+ group=root
+ mode=0644
--- /dev/null
+[osd.{{ item.stdout }}]
+osd crush location = {{ osd_crush_location }}