# When debugging these tests (must be root), here are a few useful commands:
#
# export PATH=..:$PATH
-# ln -sf /home/ubuntu/ceph/src/ceph-disk /usr/sbin/ceph-disk
+# ln -sf /home/ubuntu/ceph/src/ceph-disk $(which ceph-disk)
# ln -sf /home/ubuntu/ceph/udev/95-ceph-osd.rules /lib/udev/rules.d/95-ceph-osd.rules
# ln -sf /home/ubuntu/ceph/systemd/ceph-disk@.service /usr/lib/systemd/system/ceph-disk@.service
# ceph-disk.conf will be silently ignored if it is a symbolic link or a hard link /var/log/upstart for logs
@staticmethod
def sh(command):
- output = subprocess.check_output(command, shell=True)
- LOG.debug("sh: " + command + ": " + output)
- return output.strip()
+ LOG.debug(":sh: " + command)
+ proc = subprocess.Popen(
+ args=command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True,
+ bufsize=1)
+ lines = []
+ with proc.stdout:
+ for line in iter(proc.stdout.readline, b''):
+ line = line.decode('utf-8')
+ lines.append(line)
+ LOG.debug(str(line.strip()))
+ if proc.wait() != 0:
+ raise subprocess.CalledProcessError(
+ returncode=proc.returncode,
+ cmd=command
+ )
+ return "".join(lines)
def unused_disks(self, pattern='[vs]d.'):
names = filter(lambda x: re.match(pattern, x), os.listdir("/sys/block"))
raise Exception("journal for uuid = " + uuid + " not found in " + str(disks))
def destroy_osd(self, uuid):
- id = self.sh("ceph osd create " + uuid)
+ id = self.sh("ceph osd create " + uuid).strip()
self.helper("control_osd stop " + id + " || true")
self.wait_for_osd_down(uuid)
try:
def setup_class(self):
logging.basicConfig(level=logging.DEBUG)
c = CephDisk()
- if c.sh("lsb_release -si") == 'CentOS':
+ if c.sh("lsb_release -si").strip() == 'CentOS':
c.helper("install multipath-tools device-mapper-multipath")
c.conf['global']['osd journal size'] = 100
c.save_conf()
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid + " " + disk)
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid + " " + disk)
c.wait_for_osd_up(osd_uuid)
partition = c.get_osd_partition(osd_uuid)
assert partition['type'] == 'data'
assert partition['state'] == 'active'
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
def test_activate_dmcrypt_plain(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
journal_uuid = str(uuid.uuid1())
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
c.sh("ceph-disk --verbose prepare " +
" --osd-uuid " + osd_uuid +
" --journal-uuid " + journal_uuid +
journal_partition = c.get_journal_partition(osd_uuid)
assert journal_partition
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
def test_activate_no_journal(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
c.conf['global']['osd objectstore'] = 'memstore'
c.save_conf()
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
assert 'journal_dev' not in partition
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
c.save_conf()
def test_activate_with_journal(self):
c = CephDisk()
disk = c.unused_disks()[0]
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk zap " + disk)
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose zap " + disk)
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + disk))[0]
assert journal_partition
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + disk)
+ c.sh("ceph-disk --verbose zap " + disk)
def test_activate_separated_journal(self):
c = CephDisk()
osd_uuid = self.activate_separated_journal(data_disk, journal_disk)
c.helper("pool_read_write 1") # 1 == pool size
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + data_disk + " " + journal_disk)
+ c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
def activate_separated_journal(self, data_disk, journal_disk):
c = CephDisk()
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_disk)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + data_disk))[0]
c.helper("pool_read_write 2") # 2 == pool size
c.destroy_osd(osd_uuid)
c.destroy_osd(other_osd_uuid)
- c.sh("ceph-disk zap " + data_disk + " " + journal_disk + " " + other_data_disk)
+ c.sh("ceph-disk --verbose zap " + data_disk + " " +
+ journal_disk + " " + other_data_disk)
#
# Create an OSD and reuse an existing journal partition
journal_partition = c.get_journal_partition(osd_uuid)
journal_path = journal_partition['path']
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + data_disk)
+ c.sh("ceph-disk --verbose zap " + data_disk)
osd_uuid = str(uuid.uuid1())
#
# Create another OSD with the journal partition of the previous OSD
#
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + data_disk + " " + journal_path)
c.helper("pool_read_write 1") # 1 == pool size
c.wait_for_osd_up(osd_uuid)
#
assert journal_partition['path'] == journal_path
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + data_disk + " " + journal_disk)
+ c.sh("ceph-disk --verbose zap " + data_disk + " " + journal_disk)
def test_activate_multipath(self):
c = CephDisk()
- if c.sh("lsb_release -si") != 'CentOS':
- pytest.skip("see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
+ if c.sh("lsb_release -si").strip() != 'CentOS':
+ pytest.skip(
+ "see issue https://bugs.launchpad.net/ubuntu/+source/multipath-tools/+bug/1488688")
c.ensure_sd()
#
# Figure out the name of the multipath device
# Prepare the multipath device
#
osd_uuid = str(uuid.uuid1())
- c.sh("ceph-disk zap " + multipath)
- c.sh("ceph-disk prepare --osd-uuid " + osd_uuid +
+ c.sh("ceph-disk --verbose zap " + multipath)
+ c.sh("ceph-disk --verbose prepare --osd-uuid " + osd_uuid +
" " + multipath)
c.wait_for_osd_up(osd_uuid)
device = json.loads(c.sh("ceph-disk list --format json " + multipath))[0]
assert journal_partition
c.helper("pool_read_write")
c.destroy_osd(osd_uuid)
- c.sh("ceph-disk zap " + multipath)
+ c.sh("ceph-disk --verbose zap " + multipath)
c.sh("udevadm settle")
c.sh("multipath -F")
c.unload_scsi_debug()