From: Loic Dachary Date: Mon, 29 Feb 2016 11:20:14 +0000 (+0700) Subject: tests: verify ceph-disk lockbox activation scenarii X-Git-Tag: v10.1.0~200^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=94c5df45675092e0d119112861eb96166cf266f4;p=ceph.git tests: verify ceph-disk lockbox activation scenarii Simulate the cases where the activation (via udev running trigger) sequences are: * journal then lockbox * data then lockbox * lockbox All of them must end with the OSD verfied to be up. Signed-off-by: Loic Dachary --- diff --git a/qa/workunits/ceph-disk/ceph-disk-test.py b/qa/workunits/ceph-disk/ceph-disk-test.py index 682bdd973f37e..5fedde4d3638b 100644 --- a/qa/workunits/ceph-disk/ceph-disk-test.py +++ b/qa/workunits/ceph-disk/ceph-disk-test.py @@ -167,6 +167,13 @@ class CephDisk: ceph-disk --verbose destroy --destroy-by-id {id} --zap """.format(id=id)) + def deactivate_osd(self, uuid): + id = self.sh("ceph osd create " + uuid).strip() + self.sh(""" + set -xe + ceph-disk --verbose deactivate --once --deactivate-by-id {id} + """.format(id=id)) + @staticmethod def osd_up_predicate(osds, uuid): for osd in osds: @@ -364,6 +371,45 @@ class TestCephDisk(object): c.check_osd_status(osd_uuid, 'journal') return osd_uuid + def test_trigger_dmcrypt_journal_lockbox(self): + c = CephDisk() + osd_uuid = self.activate_dmcrypt('ceph-disk') + data_partition = c.get_osd_partition(osd_uuid) + lockbox_partition = c.get_lockbox() + c.deactivate_osd(osd_uuid) + c.wait_for_osd_down(osd_uuid) + with pytest.raises(subprocess.CalledProcessError): + # fails because the lockbox is not mounted yet + c.sh("ceph-disk --verbose trigger --sync " + data_partition['journal_dev']) + c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path']) + c.wait_for_osd_up(osd_uuid) + c.destroy_osd(osd_uuid) + + def test_trigger_dmcrypt_data_lockbox(self): + c = CephDisk() + osd_uuid = self.activate_dmcrypt('ceph-disk') + data_partition = c.get_osd_partition(osd_uuid) + lockbox_partition = c.get_lockbox() + c.deactivate_osd(osd_uuid) + c.wait_for_osd_down(osd_uuid) + with pytest.raises(subprocess.CalledProcessError): + # fails because the lockbox is not mounted yet + c.sh("ceph-disk --verbose trigger --sync " + data_partition['path']) + c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path']) + c.wait_for_osd_up(osd_uuid) + c.destroy_osd(osd_uuid) + + def test_trigger_dmcrypt_lockbox(self): + c = CephDisk() + osd_uuid = self.activate_dmcrypt('ceph-disk') + data_partition = c.get_osd_partition(osd_uuid) + lockbox_partition = c.get_lockbox() + c.deactivate_osd(osd_uuid) + c.wait_for_osd_down(osd_uuid) + c.sh("ceph-disk --verbose trigger --sync " + lockbox_partition['path']) + c.wait_for_osd_up(osd_uuid) + c.destroy_osd(osd_uuid) + def test_activate_no_journal(self): c = CephDisk() disk = c.unused_disks()[0]