]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
ceph-osd: add support for bluestore 649/head
authorSébastien Han <seb@redhat.com>
Fri, 25 Mar 2016 13:15:29 +0000 (14:15 +0100)
committerSébastien Han <seb@redhat.com>
Fri, 25 Mar 2016 15:02:02 +0000 (16:02 +0100)
With Jewel comes a new store to store Ceph object: BlueStore. Adding an
extra scenario might seem like a useless duplication however the
ultimate goal is remove the other roles later. Thus this is easier to
add new role instead of modifying existing one. Once we drop the support
for release older than Jewel we will just remove all the previous
  scenario files.

Signed-off-by: Sébastien Han <seb@redhat.com>
group_vars/osds.sample
roles/ceph-osd/defaults/main.yml
roles/ceph-osd/tasks/main.yml
roles/ceph-osd/tasks/scenarios/bluestore.yml [new file with mode: 0644]

index 11f0caee1ccd0ccd89fdffca516d8f316bc2e822..3c34a60ac79582fb1ef7b4599bfd24207ca3fab9 100644 (file)
@@ -118,6 +118,11 @@ dummy:
 #  - /var/lib/ceph/osd/mydir2
 
 
+# V. Fith scenario: this will partition disks for BlueStore
+# Use 'true' to enable this scenario
+#bluestore: false
+
+
 ##########
 # DOCKER #
 ##########
index 0b86c1fe3f1c1cac672c068d2784639abc62b450..30ffc5f3f9a350b7e2250541d4cee1f1072d075b 100644 (file)
@@ -110,6 +110,11 @@ osd_directory: false
 #  - /var/lib/ceph/osd/mydir2
 
 
+# V. Fith scenario: this will partition disks for BlueStore
+# Use 'true' to enable this scenario
+bluestore: false
+
+
 ##########
 # DOCKER #
 ##########
index fcda24dde39ec24a5eab5c9ad1d344bc91820af6..cfad81e96ec358e486fd53dff8644f7d1bb4bc10 100644 (file)
 - include: ./scenarios/osd_directory.yml
   when: osd_directory and not osd_containerized_deployment
 
+- include: ./scenarios/bluestore.yml
+  when:
+    osd_objectstore == 'bluestore' and
+    not osd_containerized_deployment
+
 - include: ./docker/main.yml
   when: osd_containerized_deployment
diff --git a/roles/ceph-osd/tasks/scenarios/bluestore.yml b/roles/ceph-osd/tasks/scenarios/bluestore.yml
new file mode 100644 (file)
index 0000000..0ef9c95
--- /dev/null
@@ -0,0 +1,40 @@
+
+---
+## SCENARIO 4: BLUESTORE
+
+- include: ../check_devices.yml
+
+# NOTE (leseb): the prepare process must be parallelized somehow...
+# if you have 64 disks with 4TB each, this will take a while
+# since Ansible will sequential process the loop
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+- name: automatic prepare osd disk(s) without partitions
+  command: ceph-disk prepare --bluestore "/dev/{{ item.key }}"
+  ignore_errors: true
+  register: prepared_osds
+  with_dict: ansible_devices
+  when:
+    ansible_devices is defined and
+    item.value.removable == "0" and
+    item.value.partitions|count == 0 and
+    bluestore and
+    osd_auto_discovery
+
+- name: manually prepare osd disk(s)
+  command: ceph-disk prepare --bluestore "{{ item.2 }}"
+  ignore_errors: true
+  with_together:
+    - combined_parted_results.results
+    - combined_ispartition_results.results
+    - devices
+  when:
+    not item.0.get("skipped") and
+    not item.1.get("skipped") and
+    item.0.get("rc", 0) != 0 and
+    item.1.get("rc", 0) != 0 and
+    bluestore and not
+    osd_auto_discovery
+
+- include: ../activate_osds.yml