--- /dev/null
+../../../playbooks/test_bluestore.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_filestore.yml
\ No newline at end of file
--- /dev/null
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: destroy osd.2
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: zap /dev/sdd1
+ command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
--- /dev/null
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: destroy osd.2
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: zap /dev/sdd1
+ command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdd2
+ command: "ceph-volume lvm zap /dev/sdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
# retest to ensure cluster came back up correctly after rebooting
testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+ # destroy an OSD, zap it's device and recreate it using it's ID
+ create: ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # retest to ensure cluster came back up correctly
+ create: testinfra -n 4 --sudo -v --connection=ansible --ansible-inventory={changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/tests
+
vagrant destroy --force
--- /dev/null
+../../../playbooks/test_bluestore.yml
\ No newline at end of file
--- /dev/null
+../../../playbooks/test_filestore.yml
\ No newline at end of file