def get_journal_partition(self, uuid):
return self.get_space_partition('journal', uuid)
+ def get_block_partition(self, uuid):
+ return self.get_space_partition('block', uuid)
+
+ def get_blockdb_partition(self, uuid):
+ return self.get_space_partition('block.db', uuid)
+
+ def get_blockwal_partition(self, uuid):
+ return self.get_space_partition('block.wal', uuid)
+
def get_space_partition(self, name, uuid):
data_partition = self.get_osd_partition(uuid)
space_dev = data_partition[name + '_dev']
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
+ assert len(device['partitions']) == 4
+ c.check_osd_status(osd_uuid, 'block')
+ c.check_osd_status(osd_uuid, 'block.wal')
+ c.check_osd_status(osd_uuid, 'block.db')
+ c.helper("pool_read_write")
+ c.destroy_osd(osd_uuid)
+ c.sh("ceph-disk --verbose zap " + disk)
+
+ def test_activate_bluestore_seperated_block_db_wal(self):
+ c = CephDisk()
+ disk1 = c.unused_disks()[0]
+ disk2 = c.unused_disks()[1]
+ osd_uuid = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+ c.conf['global']['osd objectstore'] = 'bluestore'
+ c.save_conf()
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " + osd_uuid +
+ " " + disk1 + " --block.db " + disk2 + " --block.wal " + disk2)
+ c.wait_for_osd_up(osd_uuid)
+ device = json.loads(c.sh("ceph-disk list --format json " + disk1))[0]
+ assert len(device['partitions']) == 2
+ device = json.loads(c.sh("ceph-disk list --format json " + disk2))[0]
assert len(device['partitions']) == 2
c.check_osd_status(osd_uuid, 'block')
+ c.check_osd_status(osd_uuid, 'block.wal')
+ c.check_osd_status(osd_uuid, 'block.db')
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
+ c.sh("ceph-disk --verbose zap " + disk1 + " " + disk2)
+
+ def test_activate_bluestore_reuse_db_wal_partition(self):
+ c = CephDisk()
+ disks = c.unused_disks()
+ block_disk = disks[0]
+ db_wal_disk = disks[1]
+ #
+ # Create an OSD with two disks (one for block,
+ # the other for block.db and block.wal ) and then destroy osd.
+ #
+ osd_uuid1 = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
+ c.conf['global']['osd objectstore'] = 'bluestore'
+ c.save_conf()
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
+ osd_uuid1 + " " + block_disk + " --block.db " + db_wal_disk +
+ " --block.wal " + db_wal_disk)
+ c.wait_for_osd_up(osd_uuid1)
+ blockdb_partition = c.get_blockdb_partition(osd_uuid1)
+ blockdb_path = blockdb_partition['path']
+ blockwal_partition = c.get_blockwal_partition(osd_uuid1)
+ blockwal_path = blockwal_partition['path']
+ c.destroy_osd(osd_uuid1)
+ c.sh("ceph-disk --verbose zap " + block_disk)
+ #
+ # Create another OSD with the block.db and block.wal partition
+ # of the previous OSD
+ #
+ osd_uuid2 = str(uuid.uuid1())
+ c.sh("ceph-disk --verbose prepare --bluestore --osd-uuid " +
+ osd_uuid2 + " " + block_disk + " --block.db " + blockdb_path +
+ " --block.wal " + blockwal_path)
+ c.wait_for_osd_up(osd_uuid2)
+ device = json.loads(c.sh("ceph-disk list --format json " + block_disk))[0]
+ assert len(device['partitions']) == 2
+ device = json.loads(c.sh("ceph-disk list --format json " + db_wal_disk))[0]
+ assert len(device['partitions']) == 2
+ c.check_osd_status(osd_uuid2, 'block')
+ c.check_osd_status(osd_uuid2, 'block.wal')
+ c.check_osd_status(osd_uuid2, 'block.db')
+ blockdb_partition = c.get_blockdb_partition(osd_uuid2)
+ blockwal_partition = c.get_blockwal_partition(osd_uuid2)
+ #
+ # Verify the previous OSD partition has been reused
+ #
+ assert blockdb_partition['path'] == blockdb_path
+ assert blockwal_partition['path'] == blockwal_path
+ c.destroy_osd(osd_uuid2)
+ c.sh("ceph-disk --verbose zap " + block_disk + " " + db_wal_disk)
def test_activate_with_journal_dev_is_symlink(self):
c = CephDisk()