]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
ceph-disk: update/add ceph-disk test cases for bluestore 10135/head
authorZhi Zhang <willzzhang@tencent.com>
Wed, 14 Sep 2016 05:02:41 +0000 (13:02 +0800)
committerZhi Zhang <willzzhang@tencent.com>
Tue, 20 Sep 2016 03:43:26 +0000 (11:43 +0800)
Signed-off-by: Zhi Zhang <zhangz.david@outlook.com>
qa/workunits/ceph-disk/ceph-disk-test.py

index 5fedde4d3638b99bc60d664781fab6f8c73f31a6..5c2003d6b33eea8a14f3da0cb90559cf161dc6aa 100644 (file)
@@ -144,6 +144,15 @@ class CephDisk:
     def get_journal_partition(self, uuid):
         return self.get_space_partition('journal', uuid)
 
+    def get_block_partition(self, uuid):
+        return self.get_space_partition('block', uuid)
+
+    def get_blockdb_partition(self, uuid):
+        return self.get_space_partition('block.db', uuid)
+
+    def get_blockwal_partition(self, uuid):
+        return self.get_space_partition('block.wal', uuid)
+
     def get_space_partition(self, name, uuid):
         data_partition = self.get_osd_partition(uuid)
         space_dev = data_partition[name + '_dev']
@@ -455,10 +464,84 @@ class TestCephDisk(object):
              " " + disk)
         c.wait_for_osd_up(osd_uuid)
         device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
+        assert len(device['partitions']) == 4
+        c.check_osd_status(osd_uuid, 'block')
+        c.check_osd_status(osd_uuid, 'block.wal')
+        c.check_osd_status(osd_uuid, 'block.db')
+        c.helper("pool_read_write")
+        c.destroy_osd(osd_uuid)
+        c.sh("ceph-disk --verbose zap " + disk)
+
+    def test_activate_bluestore_seperated_block_db_wal(self):
+        c = CephDisk()
+        disk1 = c.unused_disks()[0]
+        disk2 = c.unused_disks()[1]
+        osd_uuid = str(uuid.uuid1())
+        c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+        c.conf['global']['osd objectstore'] = 'bluestore'
+        c.save_conf()
+        c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
+             " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
+        c.wait_for_osd_up(osd_uuid)
+        device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
+        assert len(device['partitions']) == 2
+        device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
         assert len(device['partitions']) == 2
         c.check_osd_status(osd_uuid, 'block')
+        c.check_osd_status(osd_uuid, 'block.wal')
+        c.check_osd_status(osd_uuid, 'block.db')
         c.helper("pool_read_write")
         c.destroy_osd(osd_uuid)
+        c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+
+    def test_activate_bluestore_reuse_db_wal_partition(self):
+        c = CephDisk()
+        disks = c.unused_disks()
+        block_disk = disks[0]
+        db_wal_disk = disks[1]
+        #
+        # Create an OSD with two disks (one for block, 
+        # the other for block.db and block.wal ) and then destroy osd.
+        #
+        osd_uuid1 = str(uuid.uuid1())
+        c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
+        c.conf['global']['osd objectstore'] = 'bluestore'
+        c.save_conf()
+        c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + 
+             osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk + 
+             " --block.wal " + db_wal_disk)
+        c.wait_for_osd_up(osd_uuid1)
+        blockdb_partition = c.get_blockdb_partition(osd_uuid1)
+        blockdb_path = blockdb_partition['path']
+        blockwal_partition = c.get_blockwal_partition(osd_uuid1)
+        blockwal_path = blockwal_partition['path']
+        c.destroy_osd(osd_uuid1)
+        c.sh("ceph-disk --verbose zap " + block_disk)
+        #
+        # Create another OSD with the block.db and block.wal partition 
+        # of the previous OSD
+        #
+        osd_uuid2 = str(uuid.uuid1())
+        c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + 
+             osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path + 
+             " --block.wal " + blockwal_path)
+        c.wait_for_osd_up(osd_uuid2)
+        device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
+        assert len(device['partitions']) == 2
+        device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
+        assert len(device['partitions']) == 2
+        c.check_osd_status(osd_uuid2, 'block')
+        c.check_osd_status(osd_uuid2, 'block.wal')
+        c.check_osd_status(osd_uuid2, 'block.db')
+        blockdb_partition = c.get_blockdb_partition(osd_uuid2)
+        blockwal_partition = c.get_blockwal_partition(osd_uuid2)
+        #
+        # Verify the previous OSD partition has been reused
+        #
+        assert blockdb_partition['path'] == blockdb_path
+        assert blockwal_partition['path'] == blockwal_path
+        c.destroy_osd(osd_uuid2)
+        c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
 
     def test_activate_with_journal_dev_is_symlink(self):
         c = CephDisk()