we now use the name of the scenario in the prepare task.
Signed-off-by: Sébastien Han <seb@redhat.com>
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: automatic prepare osd disk(s) without partitions
+- name: automatic prepare bluestore osd disk(s) without partitions
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
- bluestore
- osd_auto_discovery
-- name: manually prepare osd disk(s)
+- name: manually prepare bluestore osd disk(s)
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: prepare osd disk(s)
+- name: prepare dmcrypt osd disk(s) with a dedicated journal device
command: "ceph-disk prepare --dmcrypt --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
with_together:
- "{{ parted_results.results }}"
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: automatic prepare osd disk(s) without partitions (dmcrypt)
+- name: automatic prepare dmcrypt osd disk(s) without partitions with collocated osd data and journal
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "/dev/{{ item.key }}"
with_dict: "{{ ansible_devices }}"
when:
- dmcrypt_journal_collocation
- osd_auto_discovery
-- name: manually prepare osd disk(s) (dmcrypt)
+- name: manually prepare dmcrypt osd disk(s) with collocated osd data and journal
command: ceph-disk prepare --dmcrypt --cluster "{{ cluster }}" "{{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
-- name: automatic prepare osd disk(s) without partitions
+- name: automatic prepare filestore osd disk(s) without partitions with collocated osd data and journal
command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
register: prepared_osds
with_dict: "{{ ansible_devices }}"
- journal_collocation
- osd_auto_discovery
-- name: manually prepare osd disk(s)
+- name: manually prepare filestore osd disk(s) with collocated osd data and journal
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"
# NOTE (leseb): the prepare process must be parallelized somehow...
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
-- name: prepare OSD disk(s)
+- name: prepare osd directory disk(s)
command: "ceph-disk prepare --cluster {{ cluster }} {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
when: osd_directory
-- name: activate OSD(s)
+- name: activate osd(s)
command: "ceph-disk activate {{ item }}"
with_items: "{{ osd_directories }}"
changed_when: false
-- name: start and add the OSD target to the systemd sequence
+- name: start and add osd target(s) to the systemd sequence
service:
name: ceph.target
state: started
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
-- name: prepare osd disk(s)
+- name: prepare filestore osd disk(s) with a dedicated journal device
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.1 }} {{ item.2 }}"
with_together:
- "{{ parted_results.results }}"