+++ /dev/null
----
-## SCENARIO 2: SINGLE JOURNAL DEVICE FOR N OSDS
-#
-
-- name: Install dependencies
- apt: pkg=parted state=present
- when: ansible_os_family == 'Debian'
-
-- name: Install dependencies
- yum: name=parted state=present
- when: ansible_os_family == 'RedHat'
-
-- name: Copy OSD bootstrap key
- copy: src=fetch/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring dest=/var/lib/ceph/bootstrap-osd/ceph.keyring owner=root group=root mode=600
- when: cephx
-
-# NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
-# the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
-# it should exist we rc=0 and don't do anything unless we do something like --force
-# As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "ignore_errors: True"
-# I believe it's safer
-#
-
-- name: Check if the device is a partition or a disk
- shell: echo '{{ item }}' | egrep '/dev/[a-z]{3}[0-9]$'
- ignore_errors: true
- with_items: devices
- register: ispartition
- changed_when: False
-
-- name: If partition named 'ceph' exists
- shell: parted --script {{ item }} print | egrep -sq '^ 1.*ceph'
- ignore_errors: True
- with_items: devices
- register: parted
- changed_when: False
-
-# Prepare means
-# - create GPT partition for a disk, or a loop label for a partition
-# - mark the partition with the ceph type uuid
-# - create a file system
-# - mark the fs as ready for ceph consumption
-# - entire data disk is used (one big partition)
-# - a new partition is added to the journal disk (so it can be easily shared)
-#
-
-# NOTE (leseb): the prepare process must be parallelized somehow...
-# if you have 64 disks with 4TB each, this will take a while
-# since Ansible will sequential process the loop
-
-# NOTE (alahouze): if the device is a partition, the parted command below has
-# failed, this is why we check if the device is a partition too.
-
-- name: Prepare OSD disk(s)
- command: ceph-disk prepare {{ item.2 }} {{ raw_journal_device }}
- when: (item.0.rc != 0 or item.1.rc != 0) and raw_journal
- ignore_errors: True
- with_together:
- - parted.results
- - ispartition.results
- - devices
-
-# Activate means:
-# - mount the volume in a temp location
-# - allocate an osd id (if needed)
-# - remount in the correct location /var/lib/ceph/osd/$cluster-$id
-# - start ceph-osd
-#
-
-# This task is for disk devices only because of the explicit use of the first
-# partition.
-
-- name: Activate OSD(s) when device is a disk
- command: ceph-disk activate {{ item.2 }}1
- with_together:
- - parted.results
- - ispartition.results
- - devices
- when: item.0.rc == 0 and item.1.rc != 0
- ignore_errors: True
- changed_when: False
-
-# This task is for partitions because we don't explicitly use a partition.
-
-- name: Activate OSD(s) when device is a partition
- command: ceph-disk activate {{ item.1 }}
- with_together:
- - ispartition.results
- - devices
- when: item.0.rc == 0
- ignore_errors: True
- changed_when: False
-
-- name: Start and add that the OSD service to the init sequence
- service: name=ceph state=started enabled=yes args=osd
--- /dev/null
+---
+# NOTE (leseb): some devices might miss partition label which which will result
+# in ceph-disk failing to prepare OSD. Thus zapping them prior to prepare the OSD
+# ensures that the device will get successfully prepared.
+- name: Erasing partitions and labels from OSD disk(s)
+ command: ceph-disk zap {{ item }}
+ when: zap_devices and (journal_collocation or raw_multi_journal)
+ with_items: devices
+
+- name: Erasing partitions and labels from the journal device(s)
+ command: ceph-disk zap {{ item }}
+ when: zap_devices and raw_multi_journal
+ with_items: raw_journal_devices