import argparse
import errno
import fcntl
+import json
import logging
import os
import os.path
raise Error('partition %d for %s does not appear to exist' % (pnum, dev))
-def list_all_partitions():
+def list_all_partitions(names):
"""
Return a list of devices and partitions
"""
+ if not names:
+ names = os.listdir('/sys/block')
dev_part_list = {}
- for name in os.listdir('/sys/block'):
+ for name in names:
LOG.debug("list_all_partitions: " + name)
# /dev/fd0 may hang http://tracker.ceph.com/issues/6827
if re.match(r'^fd\d$', name):
return m.group(1).lower()
return None
-
-def more_osd_info(path, uuid_map):
- desc = []
- ceph_fsid = get_oneliner(path, 'ceph_fsid')
- if ceph_fsid:
- cluster = find_cluster_by_uuid(ceph_fsid)
- if cluster:
- desc.append('cluster ' + cluster)
- else:
- desc.append('unknown cluster ' + ceph_fsid)
-
- who = get_oneliner(path, 'whoami')
- if who:
- desc.append('osd.%s' % who)
-
- journal_uuid = get_oneliner(path, 'journal_uuid')
- if journal_uuid:
- journal_uuid = journal_uuid.lower()
- if journal_uuid in uuid_map:
- desc.append('journal %s' % uuid_map[journal_uuid])
-
- return desc
-
-def list_dev_osd(dev, uuid_map):
- path = is_mounted(dev)
- fs_type = get_dev_fs(dev)
- desc = []
- if path:
- desc.append('active')
- desc.extend(more_osd_info(path, uuid_map))
- elif fs_type:
+def more_osd_info(path, uuid_map, desc):
+ desc['ceph_fsid'] = get_oneliner(path, 'ceph_fsid')
+ if desc['ceph_fsid']:
+ desc['cluster'] = find_cluster_by_uuid(desc['ceph_fsid'])
+ desc['whoami'] = get_oneliner(path, 'whoami')
+ desc['journal_uuid'] = get_oneliner(path, 'journal_uuid')
+ if desc['journal_uuid']:
+ desc['journal_uuid'] = desc['journal_uuid'].lower()
+ if desc['journal_uuid'] in uuid_map:
+ desc['journal_dev'] = uuid_map[desc['journal_uuid']]
+
+def list_dev_osd(dev, uuid_map, desc):
+ desc['mount'] = is_mounted(dev)
+ desc['fs_type'] = get_dev_fs(dev)
+ desc['state'] = 'unprepared'
+ if desc['mount']:
+ desc['state'] = 'active'
+ more_osd_info(desc['mount'], uuid_map, desc)
+ elif desc['fs_type']:
try:
- tpath = mount(dev=dev, fstype=fs_type, options='')
+ tpath = mount(dev=dev, fstype=desc['fs_type'], options='')
if tpath:
try:
magic = get_oneliner(tpath, 'magic')
if magic is not None:
- desc.append('prepared')
- desc.extend(more_osd_info(tpath, uuid_map))
+ desc['magic'] = magic
+ desc['state'] = 'prepared'
+ more_osd_info(tpath, uuid_map, desc)
finally:
unmount(tpath)
except MountError:
pass
+
+def list_format_more_osd_info_plain(dev):
+ desc = []
+ if dev.get('ceph_fsid'):
+ if dev.get('cluster'):
+ desc.append('cluster ' + dev['cluster'])
+ else:
+ desc.append('unknown cluster ' + dev['ceph_fsid'])
+ if dev.get('whoami'):
+ desc.append('osd.%s' % dev['whoami'])
+ if dev.get('journal_dev'):
+ desc.append('journal %s' % dev['journal_dev'])
return desc
+def list_format_dev_plain(dev, devices=[], prefix=''):
+ desc = []
+ if dev['ptype'] == OSD_UUID:
+ desc = ['ceph data', dev['state']] + list_format_more_osd_info_plain(dev)
+ elif dev['ptype'] in (DMCRYPT_OSD_UUID,
+ DMCRYPT_LUKS_OSD_UUID):
+ dmcrypt = dev['dmcrypt']
+ if not dmcrypt['holders']:
+ desc = ['ceph data (dmcrypt %s)' % dmcrypt['type'], 'not currently mapped']
+ elif len(dmcrypt['holders']) == 1:
+ holder = '/dev/' + dmcrypt['holders'][0]
+ def lookup_dev(devices, path):
+ for device in devices:
+ if device['path'] == path:
+ return device
+ holder_dev = lookup_dev(devices, holder)
+ desc = ['ceph data (dmcrypt %s %s)' % (dmcrypt['type'], holder)] + list_format_more_osd_info_plain(holder_dev)
+ else:
+ desc = ['ceph data (dmcrypt %s)' % dmcrypt['type'], 'holders: ' + ','.join(dmcrypt['holders'])]
+ elif dev['ptype'] == JOURNAL_UUID:
+ desc.append('ceph journal')
+ if dev['journal_for']:
+ desc.append('for %s' % dev['journal_for'])
+ elif dev['ptype'] in (DMCRYPT_JOURNAL_UUID,
+ DMCRYPT_LUKS_JOURNAL_UUID):
+ dmcrypt = dev['dmcrypt']
+ if dmcrypt['holders'] and len(dmcrypt['holders']) == 1:
+ desc = ['ceph journal (dmcrypt %s /dev/%s)' % (dmcrypt['type'], dmcrypt['holders'][0])]
+ else:
+ desc = ['ceph journal (dmcrypt %s)' % dmcrypt['type']]
+ if dev.get('journal_for'):
+ desc.append('for %s' % dev['journal_for'])
+ else:
+ desc.append(dev['type'])
+ if dev.get('fs_type'):
+ desc.append(dev['fs_type'])
+ elif dev.get('ptype'):
+ desc.append(dev['ptype'])
+ if dev.get('mount'):
+ desc.append('mounted on %s' % dev['mount'])
+ return '%s%s %s' % (prefix, dev['path'], ', '.join(desc))
+
+def list_format_plain(devices):
+ lines = []
+ for device in devices:
+ if device.get('partitions'):
+ lines.append('%s :' % device['path'])
+ for p in sorted(device['partitions']):
+ lines.append(list_format_dev_plain(dev=p,
+ devices=devices,
+ prefix=' '))
+ else:
+ lines.append(list_format_dev_plain(dev=device,
+ devices=devices,
+ prefix=''))
+ return "\n".join(lines)
+
def list_dev(dev, uuid_map, journal_map):
- ptype = 'unknown'
- prefix = ''
- if is_partition(dev):
- ptype = get_partition_type(dev)
- prefix = ' '
+ info = {
+ 'path': dev,
+ 'dmcrypt': {},
+ }
- LOG.debug("list_dev(dev = " + dev + ", ptype = " + ptype + ")")
- desc = []
+ info['is_partition'] = is_partition(dev)
+ if info['is_partition']:
+ ptype = get_partition_type(dev)
+ info['uuid'] = get_partition_uuid(dev)
+ else:
+ ptype = 'unknown'
+ info['ptype'] = ptype
+ LOG.info("list_dev(dev = " + dev + ", ptype = " + str(ptype) + ")")
if ptype in (OSD_UUID, MPATH_OSD_UUID):
- desc = list_dev_osd(dev, uuid_map)
- if desc:
- desc = ['ceph data'] + desc
- else:
- desc = ['ceph data', 'unprepared']
+ info['type'] = 'data'
+ if ptype == MPATH_OSD_UUID:
+ info['multipath'] = True
+ list_dev_osd(dev, uuid_map, info)
elif ptype == DMCRYPT_OSD_UUID:
holders = is_held(dev)
- if not holders:
- desc = ['ceph data (dmcrypt plain)', 'not currently mapped']
- elif len(holders) == 1:
- holder = '/dev/' + holders[0]
- fs_desc = list_dev_osd(holder, uuid_map)
- desc = ['ceph data (dmcrypt plain %s)' % holder] + fs_desc
- else:
- desc = ['ceph data (dmcrypt plain)', 'holders: ' + ','.join(holders)]
+ info['type'] = 'data'
+ info['dmcrypt']['holders'] = holders
+ info['dmcrypt']['type'] = 'plain'
+ if len(holders) == 1:
+ list_dev_osd('/dev/' + holders[0], uuid_map, info)
elif ptype == DMCRYPT_LUKS_OSD_UUID:
holders = is_held(dev)
- if not holders:
- desc = ['ceph data (dmcrypt LUKS)', 'not currently mapped']
- elif len(holders) == 1:
- holder = '/dev/' + holders[0]
- fs_desc = list_dev_osd(holder, uuid_map)
- desc = ['ceph data (dmcrypt LUKS %s)' % holder] + fs_desc
- else:
- desc = ['ceph data (dmcrypt LUKS)', 'holders: ' + ','.join(holders)]
+ info['type'] = 'data'
+ info['dmcrypt']['holders'] = holders
+ info['dmcrypt']['type'] = 'LUKS'
+ if len(holders) == 1:
+ list_dev_osd('/dev/' + holders[0], uuid_map, info)
elif ptype in (JOURNAL_UUID, MPATH_JOURNAL_UUID):
- desc.append('ceph journal')
- part_uuid = get_partition_uuid(dev)
- if part_uuid and part_uuid in journal_map:
- desc.append('for %s' % journal_map[part_uuid])
+ info['type'] = 'journal'
+ if ptype == MPATH_JOURNAL_UUID:
+ info['multipath'] = True
+ if info.get('uuid') in journal_map:
+ info['journal_for'] = journal_map[info['uuid']]
elif ptype == DMCRYPT_JOURNAL_UUID:
holders = is_held(dev)
- if len(holders) == 1:
- desc = ['ceph journal (dmcrypt plain /dev/%s)' % holders[0]]
- else:
- desc = ['ceph journal (dmcrypt plain)']
- part_uuid = get_partition_uuid(dev)
- if part_uuid and part_uuid in journal_map:
- desc.append('for %s' % journal_map[part_uuid])
+ info['type'] = 'journal'
+ info['dmcrypt']['type'] = 'plain'
+ info['dmcrypt']['holders'] = holders
+ if info.get('uuid') in journal_map:
+ info['journal_for'] = journal_map[info['uuid']]
elif ptype == DMCRYPT_LUKS_JOURNAL_UUID:
holders = is_held(dev)
- if len(holders) == 1:
- desc = ['ceph journal (dmcrypt LUKS /dev/%s)' % holders[0]]
- else:
- desc = ['ceph journal (dmcrypt LUKS)']
- part_uuid = get_partition_uuid(dev)
- if part_uuid and part_uuid in journal_map:
- desc.append('for %s' % journal_map[part_uuid])
+ info['type'] = 'journal'
+ info['dmcrypt']['type'] = 'LUKS'
+ info['dmcrypt']['holders'] = holders
+ if info.get('uuid') in journal_map:
+ info['journal_for'] = journal_map[info['uuid']]
else:
path = is_mounted(dev)
fs_type = get_dev_fs(dev)
if is_swap(dev):
- desc.append('swap')
+ info['type'] = 'swap'
else:
- desc.append('other')
+ info['type'] = 'other'
if fs_type:
- desc.append(fs_type)
- elif ptype:
- desc.append(ptype)
+ info['fs_type'] = fs_type
if path:
- desc.append('mounted on %s' % path)
+ info['mount'] = path
- print '%s%s %s' % (prefix, dev, ', '.join(desc))
+ return info
-
-def main_list(args):
- partmap = list_all_partitions()
+def list_devices(args):
+ partmap = list_all_partitions(args.path)
uuid_map = {}
journal_map = {}
if part_uuid:
uuid_map[part_uuid] = dev
ptype = get_partition_type(dev)
- LOG.debug("main_list: " + dev + " " + ptype + " " +
- str(part_uuid))
- if ptype == OSD_UUID:
- fs_type = get_dev_fs(dev)
+ LOG.debug("main_list: " + dev +
+ " ptype = " + str(ptype) +
+ " uuid = " + str(part_uuid))
+ if ptype in (OSD_UUID,
+ DMCRYPT_OSD_UUID,
+ DMCRYPT_LUKS_OSD_UUID):
+ if ptype in (DMCRYPT_OSD_UUID,
+ DMCRYPT_LUKS_OSD_UUID):
+ holders = is_held(dev)
+ if len(holders) != 1:
+ continue
+ dev_to_mount = '/dev/' + holders[0]
+ else:
+ dev_to_mount = dev
+
+ fs_type = get_dev_fs(dev_to_mount)
if fs_type is not None:
try:
- tpath = mount(dev=dev, fstype=fs_type, options='')
+ tpath = mount(dev=dev_to_mount,
+ fstype=fs_type, options='')
try:
journal_uuid = get_oneliner(tpath, 'journal_uuid')
if journal_uuid:
unmount(tpath)
except MountError:
pass
- if ptype in (DMCRYPT_OSD_UUID,
- DMCRYPT_LUKS_OSD_UUID):
- holders = is_held(dev)
- if len(holders) == 1:
- holder = '/dev/' + holders[0]
- fs_type = get_dev_fs(holder)
- if fs_type is not None:
- try:
- tpath = mount(dev=holder, fstype=fs_type, options='')
- try:
- journal_uuid = get_oneliner(tpath, 'journal_uuid')
- if journal_uuid:
- journal_map[journal_uuid.lower()] = dev
- finally:
- unmount(tpath)
- except MountError:
- pass
-
- LOG.debug("main_list: " + str(partmap) + ", " +
- str(uuid_map) + ", " + str(journal_map))
+ LOG.debug("main_list: " + str(partmap) + ", uuid_map = " +
+ str(uuid_map) + ", journal_map = " + str(journal_map))
+
+ devices = []
for base, parts in sorted(partmap.iteritems()):
if parts:
- print '%s :' % get_dev_path(base)
+ disk = { 'path': get_dev_path(base) }
+ partitions = []
for p in sorted(parts):
- list_dev(get_dev_path(p), uuid_map, journal_map)
+ partitions.append(list_dev(get_dev_path(p), uuid_map, journal_map))
+ disk['partitions'] = partitions
+ devices.append(disk)
else:
- list_dev(get_dev_path(base), uuid_map, journal_map)
+ device = list_dev(get_dev_path(base), uuid_map, journal_map)
+ device['path'] = get_dev_path(base)
+ devices.append(device)
+ LOG.debug("list_devices: " + str(devices))
+ return devices
+
+def main_list(args):
+ devices = list_devices(args)
+ if args.format == 'json':
+ print json.dumps(devices)
+ else:
+ output = list_format_plain(devices)
+ if output:
+ print output
###########################
def make_list_parser(subparsers):
list_parser = subparsers.add_parser('list', help='List disks, partitions, and Ceph OSDs')
+ list_parser.add_argument(
+ '--format',
+ help='output format',
+ default='plain',
+ choices=['json','plain'],
+ )
+ list_parser.add_argument(
+ 'path',
+ metavar='PATH',
+ nargs='*',
+ help='path to block devices, relative to /sys/block',
+ )
list_parser.set_defaults(
func=main_list,
)
+from mock import patch, DEFAULT, Mock
+import argparse
+import pytest
import ceph_disk
-# This file tests nothing (yet) except for being able to import ceph_disk
-# correctly and thus ensuring somewhat that it will work under different Python
-# versions. You must write unittests here so that code has adequate coverage.
+def fail_to_mount(dev, fstype, options):
+ raise ceph_disk.MountError(dev + " mount fail")
class TestCephDisk(object):
- def test_basic(self):
- assert True
+ def setup_class(self):
+ ceph_disk.setup_logging(verbose=True, log_stdout=False)
+
+ def test_main_list_json(self, capsys):
+ args = ceph_disk.parse_args(['list', '--format', 'json'])
+ with patch.multiple(
+ ceph_disk,
+ list_devices=lambda args: {}):
+ ceph_disk.main_list(args)
+ out, err = capsys.readouterr()
+ assert '{}\n' == out
+
+ def test_main_list_plain(self, capsys):
+ args = ceph_disk.parse_args(['list'])
+ with patch.multiple(
+ ceph_disk,
+ list_devices=lambda args: {}):
+ ceph_disk.main_list(args)
+ out, err = capsys.readouterr()
+ assert '' == out
+
+ def test_list_format_more_osd_info_plain(self):
+ dev = {
+ 'ceph_fsid': 'UUID',
+ 'cluster': 'ceph',
+ 'whoami': '1234',
+ 'journal_dev': '/dev/Xda2',
+ }
+ out = ceph_disk.list_format_more_osd_info_plain(dev)
+ assert dev['cluster'] in " ".join(out)
+ assert dev['journal_dev'] in " ".join(out)
+ assert dev['whoami'] in " ".join(out)
+
+ dev = {
+ 'ceph_fsid': 'UUID',
+ 'whoami': '1234',
+ 'journal_dev': '/dev/Xda2',
+ }
+ out = ceph_disk.list_format_more_osd_info_plain(dev)
+ assert 'unknown cluster' in " ".join(out)
+
+ def test_list_format_plain(self):
+ payload = [{
+ 'path': '/dev/Xda',
+ 'ptype': 'unknown',
+ 'type': 'other',
+ 'mount': '/somewhere',
+ }]
+ out = ceph_disk.list_format_plain(payload)
+ assert payload[0]['path'] in out
+ assert payload[0]['type'] in out
+ assert payload[0]['mount'] in out
+
+ payload = [{
+ 'path': '/dev/Xda1',
+ 'ptype': 'unknown',
+ 'type': 'swap',
+ }]
+ out = ceph_disk.list_format_plain(payload)
+ assert payload[0]['path'] in out
+ assert payload[0]['type'] in out
+
+ payload = [{
+ 'path': '/dev/Xda',
+ 'partitions': [
+ {
+ 'dmcrypt': {},
+ 'ptype': 'whatever',
+ 'is_partition': True,
+ 'fs_type': 'ext4',
+ 'path': '/dev/Xda1',
+ 'mounted': '/somewhere',
+ 'type': 'other',
+ }
+ ],
+ }]
+ out = ceph_disk.list_format_plain(payload)
+ assert payload[0]['path'] in out
+ assert payload[0]['partitions'][0]['path'] in out
+
+ def test_list_format_dev_plain(dev):
+ #
+ # data
+ #
+ dev = {
+ 'path': '/dev/Xda1',
+ 'ptype': ceph_disk.OSD_UUID,
+ 'state': 'prepared',
+ 'whoami': '1234',
+ }
+ out = ceph_disk.list_format_dev_plain(dev)
+ assert 'data' in out
+ assert dev['whoami'] in out
+ assert dev['state'] in out
+ #
+ # journal
+ #
+ dev = {
+ 'path': '/dev/Xda2',
+ 'ptype': ceph_disk.JOURNAL_UUID,
+ 'journal_for': '/dev/Xda1',
+ }
+ out = ceph_disk.list_format_dev_plain(dev)
+ assert 'journal' in out
+ assert dev['journal_for'] in out
+
+ #
+ # dmcrypt data
+ #
+ ptype2type = {
+ ceph_disk.DMCRYPT_OSD_UUID: 'plain',
+ ceph_disk.DMCRYPT_LUKS_OSD_UUID: 'LUKS',
+ }
+ for (ptype, type) in ptype2type.iteritems():
+ for holders in ((), ("dm_0",), ("dm_0", "dm_1")):
+ devices = [{
+ 'path': '/dev/dm_0',
+ 'whoami': '1234',
+ }]
+ dev = {
+ 'dmcrypt': {
+ 'holders': holders,
+ 'type': type,
+ },
+ 'path': '/dev/Xda1',
+ 'ptype': ptype,
+ 'state': 'prepared',
+ }
+ out = ceph_disk.list_format_dev_plain(dev, devices)
+ assert 'data' in out
+ assert 'dmcrypt' in out
+ assert type in out
+ if len(holders) == 1:
+ assert devices[0]['whoami'] in out
+ for holder in holders:
+ assert holder in out
+
+ #
+ # dmcrypt journal
+ #
+ ptype2type = {
+ ceph_disk.DMCRYPT_JOURNAL_UUID: 'plain',
+ ceph_disk.DMCRYPT_LUKS_JOURNAL_UUID: 'LUKS',
+ }
+ for (ptype, type) in ptype2type.iteritems():
+ for holders in ((), ("dm_0",)):
+ dev = {
+ 'path': '/dev/Xda2',
+ 'ptype': ptype,
+ 'journal_for': '/dev/Xda1',
+ 'dmcrypt': {
+ 'holders': holders,
+ 'type': type,
+ },
+ }
+ out = ceph_disk.list_format_dev_plain(dev, devices)
+ assert 'journal' in out
+ assert 'dmcrypt' in out
+ assert type in out
+ assert dev['journal_for'] in out
+ if len(holders) == 1:
+ assert holders[0] in out
+
+ def test_list_dev_osd(self):
+ dev = "Xda"
+ mount_path = '/mount/path'
+ fs_type = 'ext4'
+ cluster = 'ceph'
+ uuid_map = {}
+ def more_osd_info(path, uuid_map, desc):
+ desc['cluster'] = cluster
+ #
+ # mounted therefore active
+ #
+ with patch.multiple(
+ ceph_disk,
+ is_mounted=lambda dev: mount_path,
+ get_dev_fs=lambda dev: fs_type,
+ more_osd_info=more_osd_info
+ ):
+ desc = {}
+ ceph_disk.list_dev_osd(dev, uuid_map, desc)
+ assert {'cluster': 'ceph',
+ 'fs_type': 'ext4',
+ 'mount': '/mount/path',
+ 'state': 'active'} == desc
+ #
+ # not mounted and cannot mount: unprepared
+ #
+ mount_path = None
+ with patch.multiple(
+ ceph_disk,
+ is_mounted=lambda dev: mount_path,
+ get_dev_fs=lambda dev: fs_type,
+ mount=fail_to_mount,
+ more_osd_info=more_osd_info
+ ):
+ desc = {}
+ ceph_disk.list_dev_osd(dev, uuid_map, desc)
+ assert {'fs_type': 'ext4',
+ 'mount': mount_path,
+ 'state': 'unprepared'} == desc
+ #
+ # not mounted and magic found: prepared
+ #
+ def get_oneliner(path, what):
+ if what == 'magic':
+ return ceph_disk.CEPH_OSD_ONDISK_MAGIC
+ else:
+ raise Exception('unknown ' + what)
+ with patch.multiple(
+ ceph_disk,
+ is_mounted=lambda dev: mount_path,
+ get_dev_fs=lambda dev: fs_type,
+ mount=DEFAULT,
+ unmount=DEFAULT,
+ get_oneliner=get_oneliner,
+ more_osd_info=more_osd_info
+ ):
+ desc = {}
+ ceph_disk.list_dev_osd(dev, uuid_map, desc)
+ assert {'cluster': 'ceph',
+ 'fs_type': 'ext4',
+ 'mount': mount_path,
+ 'magic': ceph_disk.CEPH_OSD_ONDISK_MAGIC,
+ 'state': 'prepared'} == desc
+
+ def test_list_all_partitions(self):
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+
+ with patch(
+ 'ceph_disk.os',
+ listdir=lambda path: [disk],
+ ), patch.multiple(
+ ceph_disk,
+ list_partitions=lambda dev: [partition],
+ ):
+ assert {disk: [partition]} == ceph_disk.list_all_partitions([])
+
+ with patch.multiple(
+ ceph_disk,
+ list_partitions=lambda dev: [partition],
+ ):
+ assert {disk: [partition]} == ceph_disk.list_all_partitions([disk])
+
+ def test_list_data(self):
+ args = ceph_disk.parse_args(['list'])
+ #
+ # a data partition that fails to mount is silently
+ # ignored
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+ fs_type = "ext4"
+
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: ceph_disk.OSD_UUID,
+ get_dev_fs=lambda dev: fs_type,
+ mount=fail_to_mount,
+ unmount=DEFAULT,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'dmcrypt': {},
+ 'fs_type': fs_type,
+ 'is_partition': True,
+ 'mount': None,
+ 'path': '/dev/' + partition,
+ 'ptype': ceph_disk.OSD_UUID,
+ 'state': 'unprepared',
+ 'type': 'data',
+ 'uuid': partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ def test_list_dmcrypt_data(self):
+ args = ceph_disk.parse_args(['list'])
+ partition_type2type = {
+ ceph_disk.DMCRYPT_OSD_UUID: 'plain',
+ ceph_disk.DMCRYPT_LUKS_OSD_UUID: 'LUKS',
+ }
+ for (partition_type, type) in partition_type2type.iteritems():
+ #
+ # dmcrypt data partition with one holder
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+ holders = ["dm-0"]
+ with patch.multiple(
+ ceph_disk,
+ is_held=lambda dev: holders,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: partition_type,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'dmcrypt': {
+ 'holders': holders,
+ 'type': type,
+ },
+ 'fs_type': None,
+ 'is_partition': True,
+ 'mount': None,
+ 'path': '/dev/' + partition,
+ 'ptype': partition_type,
+ 'state': 'unprepared',
+ 'type': 'data',
+ 'uuid': partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+ #
+ # dmcrypt data partition with two holders
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+ holders = ["dm-0","dm-1"]
+ with patch.multiple(
+ ceph_disk,
+ is_held=lambda dev: holders,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: partition_type,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'dmcrypt': {
+ 'holders': holders,
+ 'type': type,
+ },
+ 'is_partition': True,
+ 'path': '/dev/' + partition,
+ 'ptype': partition_type,
+ 'type': 'data',
+ 'uuid': partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ def test_list_multipath(self):
+ args = ceph_disk.parse_args(['list'])
+ #
+ # multipath data partition
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: ceph_disk.MPATH_OSD_UUID,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'dmcrypt': {},
+ 'fs_type': None,
+ 'is_partition': True,
+ 'mount': None,
+ 'multipath': True,
+ 'path': '/dev/' + partition,
+ 'ptype': ceph_disk.MPATH_OSD_UUID,
+ 'state': 'unprepared',
+ 'type': 'data',
+ 'uuid': partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+ #
+ # multipath journal partition
+ #
+ journal_partition_uuid = "2cc40457-259e-4542-b029-785c7cc37871"
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: journal_partition_uuid,
+ get_partition_type=lambda dev: ceph_disk.MPATH_JOURNAL_UUID,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'dmcrypt': {},
+ 'is_partition': True,
+ 'multipath': True,
+ 'path': '/dev/' + partition,
+ 'ptype': ceph_disk.MPATH_JOURNAL_UUID,
+ 'type': 'journal',
+ 'uuid': journal_partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ def test_list_dmcrypt(self):
+ self.list(ceph_disk.DMCRYPT_OSD_UUID, ceph_disk.DMCRYPT_JOURNAL_UUID)
+ self.list(ceph_disk.DMCRYPT_LUKS_OSD_UUID, ceph_disk.DMCRYPT_LUKS_JOURNAL_UUID)
+
+ def test_list_normal(self):
+ self.list(ceph_disk.OSD_UUID, ceph_disk.JOURNAL_UUID)
+
+ def list(self, data_ptype, journal_ptype):
+ args = ceph_disk.parse_args(['--verbose', 'list'])
+ #
+ # a single disk has a data partition and a journal
+ # partition and the osd is active
+ #
+ data_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ data = "Xda1"
+ data_holder = "dm-0"
+ journal = "Xda2"
+ journal_holder = "dm-0"
+ mount_path = '/mount/path'
+ fs_type = 'ext4'
+ journal_uuid = "7ad5e65a-0ca5-40e4-a896-62a74ca61c55"
+ ceph_fsid = "60a2ef70-d99b-4b9b-a83c-8a86e5e60091"
+ osd_id = '1234'
+ def get_oneliner(path, what):
+ if what == 'journal_uuid':
+ return journal_uuid
+ elif what == 'ceph_fsid':
+ return ceph_fsid
+ elif what == 'whoami':
+ return osd_id
+ else:
+ raise Exception('unknown ' + what)
+ def get_partition_uuid(dev):
+ if dev == '/dev/' + data:
+ return data_uuid
+ elif dev == '/dev/' + journal:
+ return journal_uuid
+ else:
+ raise Exception('unknown ' + dev)
+ def get_partition_type(dev):
+ if (dev == '/dev/' + data or
+ dev == '/dev/' + data_holder):
+ return data_ptype
+ elif (dev == '/dev/' + journal or
+ dev == '/dev/' + journal_holder):
+ return journal_ptype
+ else:
+ raise Exception('unknown ' + dev)
+ cluster = 'ceph'
+ if data_ptype == ceph_disk.OSD_UUID:
+ data_dmcrypt = {}
+ elif data_ptype == ceph_disk.DMCRYPT_OSD_UUID:
+ data_dmcrypt = {
+ 'type': 'plain',
+ 'holders': [data_holder],
+ }
+ elif data_ptype == ceph_disk.DMCRYPT_LUKS_OSD_UUID:
+ data_dmcrypt = {
+ 'type': 'LUKS',
+ 'holders': [data_holder],
+ }
+ else:
+ raise Exception('unknown ' + data_ptype)
+
+ if journal_ptype == ceph_disk.JOURNAL_UUID:
+ journal_dmcrypt = {}
+ elif journal_ptype == ceph_disk.DMCRYPT_JOURNAL_UUID:
+ journal_dmcrypt = {
+ 'type': 'plain',
+ 'holders': [journal_holder],
+ }
+ elif journal_ptype == ceph_disk.DMCRYPT_LUKS_JOURNAL_UUID:
+ journal_dmcrypt = {
+ 'type': 'LUKS',
+ 'holders': [journal_holder],
+ }
+ else:
+ raise Exception('unknown ' + journal_ptype)
+
+ if data_dmcrypt:
+ def is_held(dev):
+ if dev == '/dev/' + data:
+ return [data_holder]
+ elif dev == '/dev/' + journal:
+ return [journal_holder]
+ else:
+ raise Exception('unknown ' + dev)
+ else:
+ def is_held(dev):
+ return []
+
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [data, journal] },
+ get_dev_fs=lambda dev: fs_type,
+ is_mounted=lambda dev: mount_path,
+ get_partition_uuid=get_partition_uuid,
+ get_partition_type=get_partition_type,
+ find_cluster_by_uuid=lambda ceph_fsid: cluster,
+ is_partition=lambda dev: True,
+ mount=DEFAULT,
+ unmount=DEFAULT,
+ get_oneliner=get_oneliner,
+ is_held=is_held,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{
+ 'ceph_fsid': ceph_fsid,
+ 'cluster': cluster,
+ 'dmcrypt': data_dmcrypt,
+ 'fs_type': fs_type,
+ 'is_partition': True,
+ 'journal_dev': '/dev/' + journal,
+ 'journal_uuid': journal_uuid,
+ 'mount': mount_path,
+ 'path': '/dev/' + data,
+ 'ptype': data_ptype,
+ 'state': 'active',
+ 'type': 'data',
+ 'whoami': osd_id,
+ 'uuid': data_uuid,
+ }, {
+ 'dmcrypt': journal_dmcrypt,
+ 'is_partition': True,
+ 'journal_for': '/dev/' + data,
+ 'path': '/dev/' + journal,
+ 'ptype': journal_ptype,
+ 'type': 'journal',
+ 'uuid': journal_uuid,
+ },
+ ]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ def test_list_other(self):
+ args = ceph_disk.parse_args(['list'])
+ #
+ # not swap, unknown fs type, not mounted, with uuid
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ partition_type = "e51adfb9-e9fd-4718-9fc1-7a0cb03ea3f4"
+ disk = "Xda"
+ partition = "Xda1"
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: partition_type,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{'dmcrypt': {},
+ 'is_partition': True,
+ 'path': '/dev/' + partition,
+ 'ptype': partition_type,
+ 'type': 'other',
+ 'uuid': partition_uuid}]}]
+ assert expect == ceph_disk.list_devices(args)
+ #
+ # not swap, mounted, ext4 fs type, with uuid
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ partition_type = "e51adfb9-e9fd-4718-9fc1-7a0cb03ea3f4"
+ disk = "Xda"
+ partition = "Xda1"
+ mount_path = '/mount/path'
+ fs_type = 'ext4'
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ get_dev_fs=lambda dev: fs_type,
+ is_mounted=lambda dev: mount_path,
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: partition_type,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{'dmcrypt': {},
+ 'is_partition': True,
+ 'mount': mount_path,
+ 'fs_type': fs_type,
+ 'path': '/dev/' + partition,
+ 'ptype': partition_type,
+ 'type': 'other',
+ 'uuid': partition_uuid,
+ }]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ #
+ # swap, with uuid
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ partition_type = "e51adfb9-e9fd-4718-9fc1-7a0cb03ea3f4"
+ disk = "Xda"
+ partition = "Xda1"
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [partition] },
+ is_swap=lambda dev: True,
+ get_partition_uuid=lambda dev: partition_uuid,
+ get_partition_type=lambda dev: partition_type,
+ is_partition=lambda dev: True,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'partitions': [{'dmcrypt': {},
+ 'is_partition': True,
+ 'path': '/dev/' + partition,
+ 'ptype': partition_type,
+ 'type': 'swap',
+ 'uuid': partition_uuid}]}]
+ assert expect == ceph_disk.list_devices(args)
+
+ #
+ # whole disk
+ #
+ partition_uuid = "56244cf5-83ef-4984-888a-2d8b8e0e04b2"
+ disk = "Xda"
+ partition = "Xda1"
+ with patch.multiple(
+ ceph_disk,
+ list_all_partitions=lambda names: { disk: [] },
+ is_partition=lambda dev: False,
+ ):
+ expect = [{'path': '/dev/' + disk,
+ 'dmcrypt': {},
+ 'is_partition': False,
+ 'ptype': 'unknown',
+ 'type': 'other'}]
+ assert expect == ceph_disk.list_devices(args)