tasks:
- name: destroy osd.2
- command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- name: destroy osd.0
- command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- hosts: osds
# osd.2 device
- name: zap /dev/sdd1
- command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 device
- name: zap test_group/data-lv1
- command: "ceph-volume lvm zap --destroy test_group/data-lv1"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
tasks:
- name: destroy osd.2
- command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
- name: destroy osd.0
- command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
- hosts: osds
# osd.2 device
- name: zap /dev/sdd1
- command: "ceph-volume lvm zap /dev/sdd1 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.2 journal
- name: zap /dev/sdd2
- command: "ceph-volume lvm zap /dev/sdd2 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- name: redeploy osd.2 using /dev/sdd1
- command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# note: we don't use --destroy here to test this works without that flag.
# --destroy is used in the bluestore tests
- name: zap test_group/data-lv1
- command: "ceph-volume lvm zap test_group/data-lv1"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 journal device
- name: zap /dev/sdc1
- command: "ceph-volume lvm zap /dev/sdc1 --destroy"
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1