From: Sébastien Han Date: Fri, 25 Mar 2016 13:15:29 +0000 (+0100) Subject: ceph-osd: add support for bluestore X-Git-Tag: v1.0.3~8^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=225e066db26089d64ea5910f1498062f1b4d7381;p=ceph-ansible.git ceph-osd: add support for bluestore With Jewel comes a new store to store Ceph object: BlueStore. Adding an extra scenario might seem like a useless duplication however the ultimate goal is remove the other roles later. Thus this is easier to add new role instead of modifying existing one. Once we drop the support for release older than Jewel we will just remove all the previous scenario files. Signed-off-by: Sébastien Han --- diff --git a/group_vars/osds.sample b/group_vars/osds.sample index 11f0caee1..3c34a60ac 100644 --- a/group_vars/osds.sample +++ b/group_vars/osds.sample @@ -118,6 +118,11 @@ dummy: # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +#bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 0b86c1fe3..30ffc5f3f 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -110,6 +110,11 @@ osd_directory: false # - /var/lib/ceph/osd/mydir2 +# V. Fith scenario: this will partition disks for BlueStore +# Use 'true' to enable this scenario +bluestore: false + + ########## # DOCKER # ########## diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index fcda24dde..cfad81e96 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -11,5 +11,10 @@ - include: ./scenarios/osd_directory.yml when: osd_directory and not osd_containerized_deployment +- include: ./scenarios/bluestore.yml + when: + osd_objectstore == 'bluestore' and + not osd_containerized_deployment + - include: ./docker/main.yml when: osd_containerized_deployment diff --git a/roles/ceph-osd/tasks/scenarios/bluestore.yml b/roles/ceph-osd/tasks/scenarios/bluestore.yml new file mode 100644 index 000000000..0ef9c95f6 --- /dev/null +++ b/roles/ceph-osd/tasks/scenarios/bluestore.yml @@ -0,0 +1,40 @@ + +--- +## SCENARIO 4: BLUESTORE + +- include: ../check_devices.yml + +# NOTE (leseb): the prepare process must be parallelized somehow... +# if you have 64 disks with 4TB each, this will take a while +# since Ansible will sequential process the loop + +# NOTE (alahouze): if the device is a partition, the parted command below has +# failed, this is why we check if the device is a partition too. +- name: automatic prepare osd disk(s) without partitions + command: ceph-disk prepare --bluestore "/dev/{{ item.key }}" + ignore_errors: true + register: prepared_osds + with_dict: ansible_devices + when: + ansible_devices is defined and + item.value.removable == "0" and + item.value.partitions|count == 0 and + bluestore and + osd_auto_discovery + +- name: manually prepare osd disk(s) + command: ceph-disk prepare --bluestore "{{ item.2 }}" + ignore_errors: true + with_together: + - combined_parted_results.results + - combined_ispartition_results.results + - devices + when: + not item.0.get("skipped") and + not item.1.get("skipped") and + item.0.get("rc", 0) != 0 and + item.1.get("rc", 0) != 0 and + bluestore and not + osd_auto_discovery + +- include: ../activate_osds.yml