name: ceph-osd@2
state: stopped
- - name: destroy osd.2
+ - name: destroy osd.2
command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
- - name: zap /dev/sdd1
+ - name: zap /dev/sdd1
command: "ceph-volume lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- - name: zap /dev/sdd2
+ - name: zap /dev/sdd2
command: "ceph-volume lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- - name: redeploy osd.2 using /dev/sdd1
+ - name: redeploy osd.2 using /dev/sdd1
command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+ - name: destroy osd.0
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdc1
+ command: "ceph-volume lvm zap /dev/sdc1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1