]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: ceph-disk: add deactivate/reactivate/destroy test cases.
authorVicente Cheng <freeze.bilsted@gmail.com>
Thu, 15 Oct 2015 06:32:20 +0000 (14:32 +0800)
committerVicente Cheng <freeze.bilsted@gmail.com>
Tue, 17 Nov 2015 01:24:43 +0000 (09:24 +0800)
  - using the deactivate/destroy feature to destroy osd
  - test reactivate option when the osd goes deactive
  - add check_osd_status to check osd status when osd goes up

Signed-off-by: Vicente Cheng <freeze.bilsted@gmail.com>
qa/workunits/ceph-disk/ceph-disk-test.py

index 53f46128fd6d76aac9202e5ed2eee7151f950f00..3d7f71abddf67ac339d237f10997c17b3c4cf384 100644 (file)
@@ -112,30 +112,9 @@ class CephDisk:
 
     def destroy_osd(self, uuid):
         id = self.sh("ceph osd create " + uuid)
-        self.helper("control_osd stop " + id + " || true")
-        self.wait_for_osd_down(uuid)
-        try:
-            partition = self.get_journal_partition(uuid)
-            if partition:
-                if partition.get('mount'):
-                    self.sh("umount '" + partition['mount'] + "' || true")
-                if partition['dmcrypt']:
-                    holder = partition['dmcrypt']['holders'][0]
-                    self.sh("cryptsetup close $(cat /sys/block/" + holder + "/dm/name) || true")
-        except:
-            pass
-        try:
-            partition = self.get_osd_partition(uuid)
-            if partition.get('mount'):
-                self.sh("umount '" + partition['mount'] + "' || true")
-            if partition['dmcrypt']:
-                holder = partition['dmcrypt']['holders'][0]
-                self.sh("cryptsetup close $(cat /sys/block/" + holder + "/dm/name) || true")
-        except:
-            pass
         self.sh("""
         ceph-disk deactivate --deactivate-by-id {id}
-        ceph-disk destroy --destroy-by-id {id}
+        ceph-disk destroy --destroy-by-id {id} --zap
         """.format(id=id))
 
     @staticmethod
@@ -173,6 +152,13 @@ class CephDisk:
             time.sleep(delay)
         raise Exception('timeout waiting for osd ' + uuid + ' to be ' + info)
 
+    def check_osd_status(self, uuid, have_journal=False):
+        data_partition = self.get_osd_partition(uuid)
+        assert data_partition['type'] == 'data'
+        assert data_partition['state'] == 'active'
+        if have_journal:
+            journal_partition = self.get_journal_partition(uuid)
+            assert journal_partition
 
 class TestCephDisk(object):
 
@@ -191,7 +177,39 @@ class TestCephDisk(object):
                 del c.conf['global'][key]
         c.save_conf()
 
-    def test_destroy_osd(self):
+    def test_deactivate_reactivate_osd(self):
+        c = CephDisk()
+        have_journal=True
+        disk = c.unused_disks()[0]
+        osd_uuid = str(uuid.uuid1())
+        c.sh("ceph-disk zap " + disk)
+        c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+             " " + disk)
+        c.wait_for_osd_up(osd_uuid)
+        device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
+        assert len(device['partitions']) == 2
+        c.check_osd_status(osd_uuid, have_journal)
+        data_partition = c.get_osd_partition(osd_uuid)
+        c.sh("ceph-disk deactivate " + data_partition['path'])
+        c.sh("ceph-disk activate " + data_partition['path'] + " --reactivate")
+        # check again
+        c.wait_for_osd_up(osd_uuid)
+        device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
+        assert len(device['partitions']) == 2
+        c.check_osd_status(osd_uuid, have_journal)
+        c.helper("pool_read_write")
+        c.destroy_osd(osd_uuid)
+
+    def test_destroy_osd_by_id(self):
+        c = CephDisk()
+        disk = c.unused_disks()[0]
+        osd_uuid = str(uuid.uuid1())
+        c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + disk)
+        c.wait_for_osd_up(osd_uuid)
+        c.check_osd_status(osd_uuid)
+        c.destroy_osd(osd_uuid)
+
+    def test_destroy_osd_by_dev_path(self):
         c = CephDisk()
         disk = c.unused_disks()[0]
         osd_uuid = str(uuid.uuid1())
@@ -200,13 +218,16 @@ class TestCephDisk(object):
         partition = c.get_osd_partition(osd_uuid)
         assert partition['type'] == 'data'
         assert partition['state'] == 'active'
-        c.destroy_osd(osd_uuid)
-        c.sh("ceph-disk zap " + disk)
+        c.sh("ceph-disk deactivate " + partition['path'])
+        c.sh("ceph-disk destroy " + partition['path'] + " --zap")
 
     def test_activate_dmcrypt_plain(self):
         c = CephDisk()
         c.conf['global']['osd dmcrypt type'] = 'plain'
         c.save_conf()
+
+    def test_deactivate_reactivate_dmcrypt_plain(self):
+        CephDisk.augtool("set /files/etc/ceph/ceph.conf/global/osd_dmcrypt_type plain")
         self.activate_dmcrypt('plain')
         c.save_conf()
 
@@ -214,8 +235,13 @@ class TestCephDisk(object):
         c = CephDisk()
         self.activate_dmcrypt('luks')
 
+    def test_deactivate_reactivate_dmcrypt_luks(self):
+        CephDisk.augtool("rm /files/etc/ceph/ceph.conf/global/osd_dmcrypt_type")
+        self.activate_dmcrypt('luks')
+
     def activate_dmcrypt(self, type):
         c = CephDisk()
+        have_journal = True
         disk = c.unused_disks()[0]
         osd_uuid = str(uuid.uuid1())
         journal_uuid = str(uuid.uuid1())
@@ -226,13 +252,41 @@ class TestCephDisk(object):
              " --dmcrypt " +
              " " + disk)
         c.wait_for_osd_up(osd_uuid)
+        c.check_osd_status(osd_uuid, have_journal)
         data_partition = c.get_osd_partition(osd_uuid)
-        assert data_partition['type'] == 'data'
-        assert data_partition['state'] == 'active'
-        journal_partition = c.get_journal_partition(osd_uuid)
-        assert journal_partition
+        c.sh("ceph-disk deactivate " + data_partition['path'])
+        c.sh("ceph-disk activate " + data_partition['path'] +
+             " --reactivate" + " --dmcrypt")
+        # check again
+        c.wait_for_osd_up(osd_uuid)
+        c.check_osd_status(osd_uuid, have_journal)
         c.destroy_osd(osd_uuid)
+
+
+    def test_activate_dmcrypt_plain(self):
+        CephDisk.augtool("set /files/etc/ceph/ceph.conf/global/osd_dmcrypt_type plain")
+        self.activate_dmcrypt('plain')
+        CephDisk.augtool("rm /files/etc/ceph/ceph.conf/global/osd_dmcrypt_type")
+
+    def test_activate_dmcrypt_luks(self):
+        CephDisk.augtool("rm /files/etc/ceph/ceph.conf/global/osd_dmcrypt_type")
+        self.activate_dmcrypt('luks')
+
+    def activate_dmcrypt(self, type):
+        c = CephDisk()
+        have_journal = True
+        disk = c.unused_disks()[0]
+        osd_uuid = str(uuid.uuid1())
+        journal_uuid = str(uuid.uuid1())
         c.sh("ceph-disk zap " + disk)
+        c.sh("ceph-disk -v prepare " +
+             " --osd-uuid " + osd_uuid +
+             " --journal-uuid " + journal_uuid +
+             " --dmcrypt " +
+             " " + disk)
+        c.wait_for_osd_up(osd_uuid)
+        c.check_osd_status(osd_uuid, have_journal)
+        c.destroy_osd(osd_uuid)
 
     def test_activate_no_journal(self):
         c = CephDisk()
@@ -257,6 +311,7 @@ class TestCephDisk(object):
 
     def test_activate_with_journal(self):
         c = CephDisk()
+        have_journal = True
         disk = c.unused_disks()[0]
         osd_uuid = str(uuid.uuid1())
         c.sh("ceph-disk zap " + disk)
@@ -265,14 +320,9 @@ class TestCephDisk(object):
         c.wait_for_osd_up(osd_uuid)
         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
         assert len(device['partitions']) == 2
-        data_partition = c.get_osd_partition(osd_uuid)
-        assert data_partition['type'] == 'data'
-        assert data_partition['state'] == 'active'
-        journal_partition = c.get_journal_partition(osd_uuid)
-        assert journal_partition
+        c.check_osd_status(osd_uuid, have_journal)
         c.helper("pool_read_write")
         c.destroy_osd(osd_uuid)
-        c.sh("ceph-disk zap " + disk)
 
     def test_activate_separated_journal(self):
         c = CephDisk()
@@ -286,17 +336,14 @@ class TestCephDisk(object):
 
     def activate_separated_journal(self, data_disk, journal_disk):
         c = CephDisk()
+        have_journal = True
         osd_uuid = str(uuid.uuid1())
         c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
              " " + data_disk + " " + journal_disk)
         c.wait_for_osd_up(osd_uuid)
         device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
         assert len(device['partitions']) == 1
-        data_partition = c.get_osd_partition(osd_uuid)
-        assert data_partition['type'] == 'data'
-        assert data_partition['state'] == 'active'
-        journal_partition = c.get_journal_partition(osd_uuid)
-        assert journal_partition
+        c.check_osd_status(osd_uuid, have_journal)
         return osd_uuid
 
     #
@@ -349,9 +396,7 @@ class TestCephDisk(object):
         c.wait_for_osd_up(osd_uuid)
         device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
         assert len(device['partitions']) == 1
-        data_partition = c.get_osd_partition(osd_uuid)
-        assert data_partition['type'] == 'data'
-        assert data_partition['state'] == 'active'
+        c.check_osd_status(osd_uuid)
         journal_partition = c.get_journal_partition(osd_uuid)
         #
         # Verify the previous OSD partition has been reused
@@ -392,7 +437,6 @@ class TestCephDisk(object):
         assert journal_partition
         c.helper("pool_read_write")
         c.destroy_osd(osd_uuid)
-        c.sh("ceph-disk zap " + multipath)
         c.sh("udevadm settle")
         c.sh("multipath -F")
         c.unload_scsi_debug()