From: Sébastien Han Date: Thu, 29 Jun 2017 15:34:54 +0000 (+0200) Subject: osd: ability to set db and wal to bluestore X-Git-Tag: v2.3.0rc2~11^2~1 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=7d657ac643bd1ef18bb4d06eeea0e02d172b9665;p=ceph-ansible.git osd: ability to set db and wal to bluestore This commits refactors how we deploy bluestore. We have existing scenarios that we don't want to change too much. This commits eases the user experience by now changing the way you use scenarios. Bluestore is just a different interface to store objects but the scenarios more or less remain the same. If you set osd_objectstore == 'bluestore' along with journal_collocation: true, you will get an OSD running bluestore with DB and WAL partitions on the same device. If you set osd_objectstore == 'bluestore' along with raw_multi_journal: true, you will get an OSD running bluestore with a dedicated drive for the rocksdb DB, then the remaining drives (used with 'devices') will have WAL and DATA collocated. If you set osd_objectstore == 'bluestore' along with raw_multi_journal: true and declare bluestore_wal_devices you will get an OSD running bluestore with a dedicated drive for rocksdb db, a dedicated drive partition for rocksdb WAL and a dedicated drive for DATA. Signed-off-by: Sébastien Han --- diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 7b2dcbfb0..40f3c4252 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -103,8 +103,18 @@ dummy: # This will collocate both journal and data on the same disk # creating a partition at the beginning of the device # List devices under 'devices' variable above or choose 'osd_auto_discovery' - - +# +# +# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored +# on the device. So the device will get 2 partitions: +# - One for 'data', also called 'block' +# - One for block, db, and wal data +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sda* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" #journal_collocation: false @@ -130,13 +140,48 @@ dummy: # On a containerized scenario we only support A SINGLE journal # for all the OSDs on a given machine. If you don't, bad things will happen # This is a limitation we plan to fix at some point. +# +# +# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored +# on a dedicated device. So the following will happen: +# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. +# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. +# 'block' will store all your data. +# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db' +# and one for RocksDB WAL, called 'block.wal' +# +# By default raw_journal_devices will represent block.db +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sd* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" +# /dev/sdb: PTTYPE="gpt" +# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" +# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" #raw_journal_devices: [] # IV. This will partition disks for BlueStore -# Use 'true' to enable this scenario # To enable bluestore just set: # osd_objectstore: bluestore +# +# If osd_objectstore: bluestore is enabled. +# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices. +# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices) +# for block.wal +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sd* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699" +# /dev/sdb: PTTYPE="gpt" +# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" +# /dev/sdc: PTTYPE="gpt" +# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" +#bluestore_wal_devices: "{{ raw_journal_devices }}" # V. Encrypt osd data and/or journal devices with dm-crypt. @@ -167,18 +212,26 @@ dummy: # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # # Examples: -# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} +# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 +# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# +# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 +# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} +# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} +# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# # #ceph_osd_docker_devices: "{{ devices }}" -#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 +#ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1 # ACTIVATE DEVICE # Examples: -# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 +# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1 +# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_BLUESTORE=1 # #ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} #ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 4779c3db2..db2f07c10 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -95,8 +95,18 @@ osd_auto_discovery: false # This will collocate both journal and data on the same disk # creating a partition at the beginning of the device # List devices under 'devices' variable above or choose 'osd_auto_discovery' - - +# +# +# If osd_objectstore: bluestore is enabled both rocksdb DB and WAL will be stored +# on the device. So the device will get 2 partitions: +# - One for 'data', also called 'block' +# - One for block, db, and wal data +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sda* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="9c43e346-dd6e-431f-92d8-cbed4ccb25f6" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="749c71c9-ed8f-4930-82a7-a48a3bcdb1c7" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="e6ca3e1d-4702-4569-abfa-e285de328e9d" journal_collocation: false @@ -122,13 +132,48 @@ raw_multi_journal: false # On a containerized scenario we only support A SINGLE journal # for all the OSDs on a given machine. If you don't, bad things will happen # This is a limitation we plan to fix at some point. +# +# +# If osd_objectstore: bluestore is enabled, both rocksdb DB and WAL will be stored +# on a dedicated device. So the following will happen: +# - The devices listed in 'devices' will get 2 partitions, one for 'block' and one for 'data'. +# 'data' is only 100MB big and do not store any of your data, it's just a bunch of Ceph metadata. +# 'block' will store all your data. +# - The devices in 'raw_journal_devices' will get 1 partition for RocksDB DB, called 'block.db' +# and one for RocksDB WAL, called 'block.wal' +# +# By default raw_journal_devices will represent block.db +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sd* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="c6821801-2f21-4980-add0-b7fc8bd424d5" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="f2cc6fa8-5b41-4428-8d3f-6187453464d0" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="ea454807-983a-4cf2-899e-b2680643bc1c" +# /dev/sdb: PTTYPE="gpt" +# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="af5b2d74-4c08-42cf-be57-7248c739e217" +# /dev/sdb2: PARTLABEL="ceph block.wal" PARTUUID="af3f8327-9aa9-4c2b-a497-cf0fe96d126a" raw_journal_devices: [] # IV. This will partition disks for BlueStore -# Use 'true' to enable this scenario # To enable bluestore just set: # osd_objectstore: bluestore +# +# If osd_objectstore: bluestore is enabled. +# By default, if bluestore_wal_devices is empty, it will get the content of raw_journal_devices. +# If set, then you will have a dedicated partition on a specific device (bluestore_wal_devices) +# for block.wal +# +# Example of what you will get: +# [root@ceph-osd0 ~]# blkid /dev/sd* +# /dev/sda: PTTYPE="gpt" +# /dev/sda1: UUID="39241ae9-d119-4335-96b3-0898da8f45ce" TYPE="xfs" PARTLABEL="ceph data" PARTUUID="961e7313-bdb7-49e7-9ae7-077d65c4c669" +# /dev/sda2: PARTLABEL="ceph block" PARTUUID="bff8e54e-b780-4ece-aa16-3b2f2b8eb699" +# /dev/sdb: PTTYPE="gpt" +# /dev/sdb1: PARTLABEL="ceph block.db" PARTUUID="0734f6b6-cc94-49e9-93de-ba7e1d5b79e3" +# /dev/sdc: PTTYPE="gpt" +# /dev/sdc1: PARTLABEL="ceph block.wal" PARTUUID="824b84ba-6777-4272-bbbd-bfe2a25cecf3" +bluestore_wal_devices: "{{ raw_journal_devices }}" # V. Encrypt osd data and/or journal devices with dm-crypt. @@ -159,18 +204,26 @@ ceph_config_keys: [] # DON'T TOUCH ME # WARNING /!\ DMCRYPT scenario ONLY works with Docker version 1.12.5 and above # # Examples: -# Journal collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -# Dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} +# Journal collocated on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_FILESTORE=1 +# Dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# Encrypted OSD on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal on Filestore: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# +# Bluestore OSD collocated: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 +# Bluestore OSD with dedicated db: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} +# Bluestore OSD with dedicated db and wal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_BLUESTORE=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_BLUESTORE_BLOCK_WAL={{ bluestore_wal_devices[0] }} +# Encrypted OSD: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Encrypted OSD with dedicated journal: ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_DMCRYPT=1 -e OSD_JOURNAL={{ raw_journal_devices[0] }} -e OSD_FILESTORE=1 +# # ceph_osd_docker_devices: "{{ devices }}" -ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 +ceph_osd_docker_prepare_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FORCE_ZAP=1 -e OSD_BLUESTORE=1 # ACTIVATE DEVICE # Examples: -# Journal collocated or Dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 +# Journal collocated or Dedicated journal on Filesore: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_FILESTORE=1 +# Encrypted OSD or Encrypted OSD with dedicated journal: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_DMCRYPT=1 -e OSD_FILESTORE=1 +# Bluestore OSD: ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE -e OSD_JOURNAL_SIZE={{ journal_size }} -e OSD_BLUESTORE=1 # ceph_osd_docker_extra_env: -e CLUSTER={{ cluster }} -e OSD_JOURNAL_SIZE={{ journal_size }} ceph_osd_docker_run_script_path: "/usr/share" # script called by systemd to run the docker command diff --git a/roles/ceph-osd/tasks/check_mandatory_vars.yml b/roles/ceph-osd/tasks/check_mandatory_vars.yml index c09105923..9def8abf4 100644 --- a/roles/ceph-osd/tasks/check_mandatory_vars.yml +++ b/roles/ceph-osd/tasks/check_mandatory_vars.yml @@ -16,7 +16,7 @@ msg: "WARNING: journal_size is configured to less than 5GB. This is not recommended and can lead to severe issues." when: - journal_size|int < 5120 - - osd_objectstore != 'bluestore' + - osd_objectstore == 'filestore' - osd_group_name in group_names - name: make sure an osd scenario was chosen @@ -28,7 +28,6 @@ - not containerized_deployment - not journal_collocation - not raw_multi_journal - - osd_objectstore != 'bluestore' - not dmcrypt_journal_collocation - not dmcrypt_dedicated_journal @@ -40,14 +39,10 @@ - osd_group_name in group_names - not containerized_deployment - (journal_collocation and raw_multi_journal) - or (journal_collocation and osd_objectstore == 'bluestore') - or (raw_multi_journal and osd_objectstore == 'bluestore') or (dmcrypt_journal_collocation and journal_collocation) or (dmcrypt_journal_collocation and raw_multi_journal) - or (dmcrypt_journal_collocation and osd_objectstore == 'bluestore') or (dmcrypt_dedicated_journal and journal_collocation) or (dmcrypt_dedicated_journal and raw_multi_journal) - or (dmcrypt_dedicated_journal and osd_objectstore == 'bluestore') or (dmcrypt_dedicated_journal and dmcrypt_journal_collocation) - name: verify devices have been provided @@ -71,10 +66,12 @@ - raw_journal_devices|length == 0 or devices|length == 0 -- name: check if bluestore is supported by the selection ceph version +- name: check if bluestore is supported by the selected ceph version fail: - msg: "Bluestore is not supported with the selected Ceph version, use Luminous and above." + msg: "bluestore is not supported by the selected Ceph version, use Luminous or above." when: - - osd_objectstore != 'bluestore' + - osd_group_name is defined - osd_group_name in group_names - - ( ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous ) }} ) + - not containerized_deployment + - osd_objectstore == 'bluestore' + - ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index de2e53ef3..3fbd2266f 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -20,13 +20,6 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- include: ./scenarios/bluestore.yml - when: - - osd_objectstore == 'bluestore' - - not containerized_deployment - # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) - static: False - - include: ./scenarios/dmcrypt-journal-collocation.yml when: - dmcrypt_journal_collocation diff --git a/roles/ceph-osd/tasks/scenarios/bluestore.yml b/roles/ceph-osd/tasks/scenarios/bluestore.yml deleted file mode 100644 index 4cb1b3f5a..000000000 --- a/roles/ceph-osd/tasks/scenarios/bluestore.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -## SCENARIO 4: BLUESTORE - -- include: ../check_devices.yml - -# NOTE (leseb): the prepare process must be parallelized somehow... -# if you have 64 disks with 4TB each, this will take a while -# since Ansible will sequential process the loop - -# NOTE (alahouze): if the device is a partition, the parted command below has -# failed, this is why we check if the device is a partition too. -- name: automatic prepare bluestore osd disk(s) without partitions - command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}" - register: prepared_osds - with_dict: "{{ ansible_devices }}" - when: - - ansible_devices is defined - - item.value.removable == "0" - - item.value.partitions|count == 0 - - item.value.holders|count == 0 - - osd_objectstore == 'bluestore' - - osd_auto_discovery - -- name: manually prepare bluestore osd disk(s) - command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}" - with_together: - - "{{ parted_results.results }}" - - "{{ ispartition_results.results }}" - - "{{ devices }}" - when: - - not item.0.get("skipped") - - not item.1.get("skipped") - - item.0.get("rc", 0) != 0 - - item.1.get("rc", 0) != 0 - - osd_objectstore == 'bluestore' - - not osd_auto_discovery - -- include: ../activate_osds.yml diff --git a/roles/ceph-osd/tasks/scenarios/journal_collocation.yml b/roles/ceph-osd/tasks/scenarios/journal_collocation.yml index 0e1d08a12..da7d7be33 100644 --- a/roles/ceph-osd/tasks/scenarios/journal_collocation.yml +++ b/roles/ceph-osd/tasks/scenarios/journal_collocation.yml @@ -19,6 +19,7 @@ - item.value.partitions|count == 0 - item.value.holders|count == 0 - journal_collocation + - osd_objectstore == 'filestore' - osd_auto_discovery - name: manually prepare filestore osd disk(s) with collocated osd data and journal @@ -33,6 +34,35 @@ - item.0.get("rc", 0) != 0 - item.1.get("rc", 0) != 0 - journal_collocation + - osd_objectstore == 'filestore' + - not osd_auto_discovery + +- name: automatic prepare bluestore osd disk(s) without partitions + command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}" + register: prepared_osds + with_dict: "{{ ansible_devices }}" + when: + - ansible_devices is defined + - item.value.removable == "0" + - item.value.partitions|count == 0 + - item.value.holders|count == 0 + - journal_collocation + - osd_objectstore == 'bluestore' + - osd_auto_discovery + +- name: manually prepare bluestore osd disk(s) + command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}" + with_together: + - "{{ parted_results.results }}" + - "{{ ispartition_results.results }}" + - "{{ devices }}" + when: + - not item.0.get("skipped") + - not item.1.get("skipped") + - item.0.get("rc", 0) != 0 + - item.1.get("rc", 0) != 0 + - journal_collocation + - osd_objectstore == 'bluestore' - not osd_auto_discovery - include: ../activate_osds.yml diff --git a/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml b/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml index 79bc20642..6f29ed58e 100644 --- a/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml +++ b/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml @@ -17,6 +17,21 @@ when: - item.0.get("skipped") or item.0.get("rc", 0) != 0 - raw_multi_journal + - osd_objectstore == 'filestore' + - not osd_auto_discovery + +- name: manually prepare bluestore osd disk(s) with a dedicated device for db and wal + command: "ceph-disk prepare --bluestore --cluster {{ cluster }} --block.db {{ item.1 }} --block.wal {{ item.2 }} {{ item.3 }}" + with_together: + - "{{ parted_results.results }}" + - "{{ raw_journal_devices }}" + - "{{ bluestore_wal_devices }}" + - "{{ devices }}" + when: + - not item.0.get("skipped") + - item.0.get("rc", 0) != 0 + - raw_multi_journal + - osd_objectstore == 'bluestore' - not osd_auto_discovery - include: ../activate_osds.yml