]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-volume tests.functional add back partitions with parted to redeploy after zapping
authorAlfredo Deza <adeza@redhat.com>
Thu, 29 Nov 2018 23:07:09 +0000 (18:07 -0500)
committerAlfredo Deza <adeza@redhat.com>
Fri, 30 Nov 2018 19:06:38 +0000 (14:06 -0500)
Signed-off-by: Alfredo Deza <adeza@redhat.com>
(cherry picked from commit 945e8f56ce21411f664a58147afb343f0671b591)

src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml
src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml
src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml

index bebe6dc36ba513c8904bb7d0e2927e3121fe388c..8caa1ce38bef9d28f55cdf791aca0ce9bf547780 100644 (file)
       environment:
         CEPH_VOLUME_DEBUG: 1
 
+    # partitions have been completely removed, so re-create them again
+    - name: re-create partition /dev/sdd for lvm data usage
+      parted:
+        device: /dev/sdd
+        number: 1
+        part_start: 0%
+        part_end: 50%
+        unit: '%'
+        label: gpt
+        state: present
+
     - name: redeploy osd.2 using /dev/sdd1
       command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
       environment:
index c48e4becece7d8ace417acde3699b373c0e76d98..a65af0702404ba25e63d63589088c936a92e3577 100644 (file)
       environment:
         CEPH_VOLUME_DEBUG: 1
 
+    # partitions have been completely removed, so re-create them again
+    - name: re-create partition /dev/sdd for lvm data usage
+      parted:
+        device: /dev/sdd
+        number: 1
+        part_start: 0%
+        part_end: 50%
+        unit: '%'
+        label: gpt
+        state: present
+
+    - name: re-create partition /dev/sdd lvm journals
+      parted:
+        device: /dev/sdd
+        number: 2
+        part_start: 50%
+        part_end: 100%
+        unit: '%'
+        state: present
+        label: gpt
+
     - name: redeploy osd.2 using /dev/sdd1
       command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
       environment:
index 19209b1d2113617102991608166657004b9750c9..3e032e20243baa4355509e55966d643dda51bc0b 100644 (file)
       environment:
         CEPH_VOLUME_DEBUG: 1
 
+    # partitions have been completely removed, so re-create them again
+    - name: re-create partition /dev/sdd for lvm data usage
+      parted:
+        device: /dev/sdd
+        number: 1
+        part_start: 0%
+        part_end: 50%
+        unit: '%'
+        label: gpt
+        state: present
+
     - name: redeploy osd.2 using /dev/sdd1
       command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
       environment:
index c48e4becece7d8ace417acde3699b373c0e76d98..a65af0702404ba25e63d63589088c936a92e3577 100644 (file)
       environment:
         CEPH_VOLUME_DEBUG: 1
 
+    # partitions have been completely removed, so re-create them again
+    - name: re-create partition /dev/sdd for lvm data usage
+      parted:
+        device: /dev/sdd
+        number: 1
+        part_start: 0%
+        part_end: 50%
+        unit: '%'
+        label: gpt
+        state: present
+
+    - name: re-create partition /dev/sdd lvm journals
+      parted:
+        device: /dev/sdd
+        number: 2
+        part_start: 50%
+        part_end: 100%
+        unit: '%'
+        state: present
+        label: gpt
+
     - name: redeploy osd.2 using /dev/sdd1
       command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
       environment: