From ba10b6820fd7f352ed6ba78246a6976c54783cbd Mon Sep 17 00:00:00 2001 From: Alfredo Deza Date: Mon, 24 Sep 2018 12:51:47 -0400 Subject: [PATCH] ceph-volume tests/functional update playbooks to use --cluster Signed-off-by: Alfredo Deza --- .../functional/lvm/playbooks/test_bluestore.yml | 12 ++++++------ .../functional/lvm/playbooks/test_filestore.yml | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml index ac4c1f9211fb8..7f4e109f3d4f1 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,23 +31,23 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --bluestore --data /dev/sdd1 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 device - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap --destroy test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml index a8a9e22b743e0..6cb3ac9ef19d8 100644 --- a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml @@ -19,10 +19,10 @@ tasks: - name: destroy osd.2 - command: "ceph osd destroy osd.2 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" - name: destroy osd.0 - command: "ceph osd destroy osd.0 --yes-i-really-mean-it" + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" - hosts: osds @@ -31,18 +31,18 @@ # osd.2 device - name: zap /dev/sdd1 - command: "ceph-volume lvm zap /dev/sdd1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 # osd.2 journal - name: zap /dev/sdd2 - command: "ceph-volume lvm zap /dev/sdd2 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: redeploy osd.2 using /dev/sdd1 - command: "ceph-volume lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" environment: CEPH_VOLUME_DEBUG: 1 @@ -50,18 +50,18 @@ # note: we don't use --destroy here to test this works without that flag. # --destroy is used in the bluestore tests - name: zap test_group/data-lv1 - command: "ceph-volume lvm zap test_group/data-lv1" + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" environment: CEPH_VOLUME_DEBUG: 1 # osd.0 journal device - name: zap /dev/sdc1 - command: "ceph-volume lvm zap /dev/sdc1 --destroy" + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" environment: CEPH_VOLUME_DEBUG: 1 - name: prepare osd.0 again using test_group/data-lv1 - command: "ceph-volume lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" environment: CEPH_VOLUME_DEBUG: 1 -- 2.39.5