- include: ./scenarios/journal_collocation.yml
when:
- - journal_collocation
+ - (journal_collocation or dmcrypt_journal_collocation)
- not containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
-- include: ./scenarios/dmcrypt-journal-collocation.yml
- when:
- - dmcrypt_journal_collocation
- - not containerized_deployment
- # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
- static: False
-
- include: ./scenarios/dmcrypt-dedicated-journal.yml
when:
- dmcrypt_dedicated_journal
- include: ../check_devices.yml
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+ when:
+ - osd_objectstore == 'bluestore'
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+ when:
+ - osd_objectstore == 'filestore'
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+ when:
+ - osd_objectstore == 'filestore'
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: prepare dmcrypt osd disk(s) with a dedicated journal device (filestore)
- command: "ceph-disk prepare --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
- with_together:
- - "{{ parted_results.results }}"
- - "{{ ispartition_results.results }}"
- - "{{ devices }}"
- - "{{ raw_journal_devices }}"
- changed_when: false
- when:
- - not item.0.get("skipped")
- - not item.1.get("skipped")
- - item.0.get("rc", 0) != 0
- - item.1.get("rc", 0) != 0
- - not osd_auto_discovery
- - osd_objectstore == 'filestore'
- - dmcrypt_dedicated_journal
-
-- name: prepare dmcrypt osd disk(s) with a dedicated journal device (bluestore)
- command: "ceph-disk prepare --bluestore --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
+- name: prepare dmcrypt osd disk(s) with a dedicated journal device on "{{ osd_objectstore }}"
+ command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- not osd_auto_discovery
- - osd_objectstore == 'bluestore'
- - dmcrypt_dedicated_journal
- include: ../activate_osds.yml
+++ /dev/null
----
-## SCENARIO 5: DMCRYPT
-
-- include: ../check_devices.yml
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (filestore)
- command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
- with_dict: "{{ ansible_devices }}"
- when:
- - ansible_devices is defined
- - item.value.removable == "0"
- - item.value.partitions|count == 0
- - item.value.holders|count == 0
- - dmcrypt_journal_collocation
- - osd_objectstore == 'filestore'
- - osd_auto_discovery
-
-- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal (bluestore)
- command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
- with_dict: "{{ ansible_devices }}"
- when:
- - ansible_devices is defined
- - item.value.removable == "0"
- - item.value.partitions|count == 0
- - item.value.holders|count == 0
- - dmcrypt_journal_collocation
- - osd_objectstore == 'bluestore'
- - osd_auto_discovery
-
-- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (filestore)
- command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
- with_together:
- - "{{ parted_results.results }}"
- - "{{ ispartition_results.results }}"
- - "{{ devices }}"
- when:
- - not item.0.get("skipped")
- - not item.1.get("skipped")
- - item.0.get("rc", 0) != 0
- - item.1.get("rc", 0) != 0
- - dmcrypt_journal_collocation
- - osd_objectstore == 'filestore'
- - not osd_auto_discovery
-
-- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal (bluestore)
- command: ceph-disk prepare --bluestore --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
- with_together:
- - "{{ parted_results.results }}"
- - "{{ ispartition_results.results }}"
- - "{{ devices }}"
- when:
- - not item.0.get("skipped")
- - not item.1.get("skipped")
- - item.0.get("rc", 0) != 0
- - item.1.get("rc", 0) != 0
- - dmcrypt_journal_collocation
- - osd_objectstore == 'bluestore'
- - not osd_auto_discovery
-
-- include: ../activate_osds.yml
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-- name: automatic prepare filestore osd disk(s) without partitions with collocated osd data and journal
- command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
- register: prepared_osds
- with_dict: "{{ ansible_devices }}"
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
when:
- - ansible_devices is defined
- - item.value.removable == "0"
- - item.value.partitions|count == 0
- - item.value.holders|count == 0
+ - osd_objectstore == 'bluestore'
- journal_collocation
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
+ when:
- osd_objectstore == 'filestore'
- - osd_auto_discovery
+ - journal_collocation
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- name: manually prepare filestore osd disk(s) with collocated osd data and journal
- command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
- with_together:
- - "{{ parted_results.results }}"
- - "{{ ispartition_results.results }}"
- - "{{ devices }}"
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }}"
when:
- - not item.0.get("skipped")
- - not item.1.get("skipped")
- - item.0.get("rc", 0) != 0
- - item.1.get("rc", 0) != 0
+ - osd_objectstore == 'filestore'
- journal_collocation
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore --dmcrypt"
+ when:
+ - osd_objectstore == 'bluestore'
+ - dmcrypt_journal_collocation
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore --dmcrypt"
+ when:
- osd_objectstore == 'filestore'
- - not osd_auto_discovery
+ - dmcrypt_journal_collocation
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-- name: automatic prepare bluestore osd disk(s) without partitions
- command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --dmcrypt"
+ when:
+ - osd_objectstore == 'filestore'
+ - dmcrypt_journal_collocation
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
+# NOTE (alahouze): if the device is a partition, the parted command below has
+# failed, this is why we check if the device is a partition too.
+- name: automatic prepare "{{ osd_objectstore }}" osd disk(s) without partitions with collocated osd data and journal
+ command: "ceph-disk prepare {{ ceph_disk_cli_options }} /dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
when:
- item.value.removable == "0"
- item.value.partitions|count == 0
- item.value.holders|count == 0
- - journal_collocation
- - osd_objectstore == 'bluestore'
- osd_auto_discovery
-- name: manually prepare bluestore osd disk(s)
- command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
+- name: manually prepare "{{ osd_objectstore }}" osd disk(s) with collocated osd data and journal
+ command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ ispartition_results.results }}"
- not item.1.get("skipped")
- item.0.get("rc", 0) != 0
- item.1.get("rc", 0) != 0
- - journal_collocation
- - osd_objectstore == 'bluestore'
- not osd_auto_discovery
- include: ../activate_osds.yml
# since Ansible will sequential process the loop
- set_fact:
- osd_type: "--filestore"
- when:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --bluestore"
+ when:
+ - osd_objectstore == 'bluestore'
+ - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }} --filestore"
+ when:
+ - osd_objectstore == 'filestore'
- ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
+
+- set_fact:
+ ceph_disk_cli_options: "--cluster {{ cluster }}"
+ when:
- osd_objectstore == 'filestore'
+ - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
- name: prepare filestore osd disk(s) with a dedicated journal device
- command: "ceph-disk prepare {{ osd_type | default('') }} --cluster {{ cluster }} {{ item.1 }} {{ item.2 }}"
+ command: "ceph-disk prepare {{ ceph_disk_cli_options }} {{ item.1 }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ devices }}"
changed_when: false
when:
- item.0.get("skipped") or item.0.get("rc", 0) != 0
- - raw_multi_journal
- osd_objectstore == 'filestore'
- not osd_auto_discovery
- name: manually prepare bluestore osd disk(s) with a dedicated device for db and wal
- command: "ceph-disk prepare --bluestore --cluster {{ cluster }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
+ command: "ceph-disk prepare {{ ceph_disk_cli_options }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
- "{{ raw_journal_devices }}"
when:
- not item.0.get("skipped")
- item.0.get("rc", 0) != 0
- - raw_multi_journal
- osd_objectstore == 'bluestore'
- not osd_auto_discovery