]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: verify ceph-disk lockbox activation scenarii
authorLoic Dachary <ldachary@redhat.com>
Mon, 29 Feb 2016 11:20:14 +0000 (18:20 +0700)
committerLoic Dachary <ldachary@redhat.com>
Fri, 4 Mar 2016 02:13:35 +0000 (09:13 +0700)
Simulate the cases where the activation (via udev running trigger)
sequences are:

* journal then lockbox
* data then lockbox
* lockbox

All of them must end with the OSD verfied to be up.

Signed-off-by: Loic Dachary <loic@dachary.org>
qa/workunits/ceph-disk/ceph-disk-test.py

index 682bdd973f37e1e63a972282bd2a6039827e8cc2..5fedde4d3638b99bc60d664781fab6f8c73f31a6 100644 (file)
@@ -167,6 +167,13 @@ class CephDisk:
         ceph-disk --verbose destroy --destroy-by-id {id} --zap
         """.format(id=id))
 
+    def deactivate_osd(self, uuid):
+        id = self.sh("ceph osd create " + uuid).strip()
+        self.sh("""
+        set -xe
+        ceph-disk --verbose deactivate --once --deactivate-by-id {id}
+        """.format(id=id))
+
     @staticmethod
     def osd_up_predicate(osds, uuid):
         for osd in osds:
@@ -364,6 +371,45 @@ class TestCephDisk(object):
         c.check_osd_status(osd_uuid, 'journal')
         return osd_uuid
 
+    def test_trigger_dmcrypt_journal_lockbox(self):
+        c = CephDisk()
+        osd_uuid = self.activate_dmcrypt('ceph-disk')
+        data_partition = c.get_osd_partition(osd_uuid)
+        lockbox_partition = c.get_lockbox()
+        c.deactivate_osd(osd_uuid)
+        c.wait_for_osd_down(osd_uuid)
+        with pytest.raises(subprocess.CalledProcessError):
+            # fails because the lockbox is not mounted yet
+            c.sh("ceph-disk --verbose trigger --sync " + data_partition['journal_dev'])
+        c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
+        c.wait_for_osd_up(osd_uuid)
+        c.destroy_osd(osd_uuid)
+
+    def test_trigger_dmcrypt_data_lockbox(self):
+        c = CephDisk()
+        osd_uuid = self.activate_dmcrypt('ceph-disk')
+        data_partition = c.get_osd_partition(osd_uuid)
+        lockbox_partition = c.get_lockbox()
+        c.deactivate_osd(osd_uuid)
+        c.wait_for_osd_down(osd_uuid)
+        with pytest.raises(subprocess.CalledProcessError):
+            # fails because the lockbox is not mounted yet
+            c.sh("ceph-disk --verbose trigger --sync " + data_partition['path'])
+        c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
+        c.wait_for_osd_up(osd_uuid)
+        c.destroy_osd(osd_uuid)
+
+    def test_trigger_dmcrypt_lockbox(self):
+        c = CephDisk()
+        osd_uuid = self.activate_dmcrypt('ceph-disk')
+        data_partition = c.get_osd_partition(osd_uuid)
+        lockbox_partition = c.get_lockbox()
+        c.deactivate_osd(osd_uuid)
+        c.wait_for_osd_down(osd_uuid)
+        c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path'])
+        c.wait_for_osd_up(osd_uuid)
+        c.destroy_osd(osd_uuid)
+
     def test_activate_no_journal(self):
         c = CephDisk()
         disk = c.unused_disks()[0]