From: Willem Jan Withagen Date: Fri, 18 Oct 2019 14:21:09 +0000 (+0200) Subject: ceph-volume-zfs: add the inventory command X-Git-Tag: v15.1.0~1132^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=efe4eb61c14ef4d3dab05084023343027bdb7e0a;p=ceph-ci.git ceph-volume-zfs: add the inventory command ``` usage: ceph-volume zfs inventory [-h] [--format {plain,json,json-pretty}] [path] Generate an inventory of available devices positional arguments: path Report on specific disk optional arguments: -h, --help show this help message and exit --format {plain,json,json-pretty} Output format ``` Which genrates: ``` wjw@zfstest.digiware.nl> ceph-volume zfs inventory /dev/Device Path Size rotates Model name /dev/ada0 232.89 GB True ST3250318AS /dev/ada1 232.89 GB True ST3250318AS /dev/ada2 223.57 GB False INTEL SSDSC2BB240G6 /dev/ada3 37.27 GB False Corsair CSSD-F40GB2 ``` or: ``` wjw@zfstest.digiware.nl> ceph-volume zfs inventory --format json-pretty ada3 [ { "abspath": "/dev/ada3", "available": true, "path": "/dev/ada3", "reject_reasons": [], "sys_api": { "descr": "Corsair CSSD-F40GB2", "fwheads": "16", "fwsectors": "63", "geomname": "ada3", "ident": "111465010000101800EC", "lunid": "5000000000000236", "mediasize": "40018599936 ", "mode": "r2w2e6", "name": "ada3", "rotationrate": "0", "sectorsize": "512", "stripeoffset": "0", "stripesize": "4096" } } ] ``` Signed-off-by: Willem Jan Withagen --- diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py index d81455b15c1..0b0889f3639 100755 --- a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/__init__.py @@ -4,3 +4,10 @@ __author__ = """Willem Jan Withagen""" __email__ = 'wjw@digiware.nl' + +import ceph_volume_zfs.zfs + +from collections import namedtuple + +sys_info = namedtuple('sys_info', ['devices']) +sys_info.devices = dict() diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py index bad522f9a76..457418493d8 100755 --- a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/__init__.py @@ -1,2 +1,4 @@ # -*- coding: utf-8 -*- -from .main import ZFS + +import logging +logger = logging.getLogger(__name__) diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py new file mode 100644 index 00000000000..be65e39acd1 --- /dev/null +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/inventory.py @@ -0,0 +1,50 @@ +import argparse +import json +from textwrap import dedent + +# import ceph_volume.process + +from ceph_volume_zfs.util.disk import Disks + +class Inventory(object): + + help = 'Generate a list of available devices' + + def __init__(self, argv): + self.argv = argv + + def format_report(self, inventory): + if self.args.format == 'json': + print(json.dumps(inventory.json_report())) + elif self.args.format == 'json-pretty': + print(json.dumps(inventory.json_report(), indent=4, sort_keys=True)) + else: + print(inventory.pretty_report()) + + def main(self): + sub_command_help = dedent(""" + Generate an inventory of available devices + """) + parser = argparse.ArgumentParser( + prog='ceph-volume zfs inventory', + description=sub_command_help, + ) + parser.add_argument( + 'path', + nargs='?', + default=None, + help=('Report on specific disk'), + ) + parser.add_argument( + '--format', + choices=['plain', 'json', 'json-pretty'], + default='plain', + help='Output format', + ) + + self.args = parser.parse_args(self.argv) + if self.args.path: + self.format_report(Disks(self.args.path)) + else: + self.format_report(Disks()) + diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py old mode 100755 new mode 100644 index 028d4876eda..073be6467dc --- a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/main.py @@ -1,21 +1,23 @@ +# vim: expandtab smarttab shiftwidth=4 softtabstop=4 + import argparse from textwrap import dedent from ceph_volume import terminal +from . import inventory +from . import prepare +from . import zap -class ZFS(object): +class ZFSDEV(object): help = 'Use ZFS to deploy OSDs' _help = dedent(""" - Use ZFS to deploy OSDs + Use ZFS to deploy OSDs - {sub_help} + {sub_help} """) - mapper = { - } - def __init__(self, argv): self.argv = argv diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py new file mode 100644 index 00000000000..7c075e86a55 --- /dev/null +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/prepare.py @@ -0,0 +1,25 @@ +import argparse + +from textwrap import dedent +# from ceph_volume.util import arg_validators + +class Prepare(object): + + help = 'Prepare a device' + + def __init__(self, argv): + self.argv = argv + + def main(self): + sub_command_help = dedent(""" + Prepare a device + """) + parser = argparse.ArgumentParser( + prog='ceph-volume zfs prepare', + description=sub_command_help, + ) + if len(self.argv) == 0 or len(self.argv) > 0: + print("Prepare: Print Help") + print(sub_command_help) + return + diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py new file mode 100644 index 00000000000..f5177d5f2e2 --- /dev/null +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/devices/zfs/zap.py @@ -0,0 +1,34 @@ +import argparse + +from textwrap import dedent +# from ceph_volume.util import arg_validators + +class Zap(object): + + help = 'Zap a device' + + def __init__(self, argv): + self.argv = argv + + def main(self): + sub_command_help = dedent(""" + Zap a device + """) + parser = argparse.ArgumentParser( + prog='ceph-volume zfs inventory', + description=sub_command_help, + ) + parser.add_argument( + 'devices', + metavar='DEVICES', + nargs='*', + # type=arg_validators.ValidDevice(gpt_ok=True), + default=[], + help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)' + ) + + if len(self.argv) == 0 or len(self.argv) > 0: + print("Zap: Print Help") + print(sub_command_help) + return + diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/main.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/main.py deleted file mode 100755 index b4bcd48ece8..00000000000 --- a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/main.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import print_function -import argparse -import os -import sys -import logging - -import main -from ceph_volume import log, conf, configuration -from ceph_volume import exceptions -from ceph_volume import terminal - -if __name__ == '__main__': - main.ZFSVOL() - - -class ZFSVOL(object): - - help_menu = 'Deploy OSDs with ZFS' - _help = """ -Use ZFS as the underlying technology for OSDs - ---verbose Increase the verbosity level - """ - name = 'zfs' - - def __init__(self, argv=None, parse=True): - self.mapper = { - } - if argv is None: - self.argv = sys.argv - else: - self.argv = argv - if parse: - self.main(self.argv) - - def help(self, warning=False): - if warning: - warning = 'See "ceph-volume zfs --help" for full list of options.' - else: - warning = '' - return self._help.format( - warning=warning, - log_path=conf.log_path, - ceph_path=self.stat_ceph_conf(), - sub_help=terminal.subhelp(self.mapper), - environ_vars=self.get_environ_vars() - ) - - def get_environ_vars(self): - environ_vars = [] - for key, value in os.environ.items(): - if key.startswith('CEPH_'): - environ_vars.append("%s=%s" % (key, value)) - if not environ_vars: - return '' - else: - environ_vars.insert(0, '\nEnviron Variables:') - return '\n'.join(environ_vars) - - def load_ceph_conf_path(self, cluster_name='ceph'): - abspath = '/etc/ceph/%s.conf' % cluster_name - conf.path = os.getenv('CEPH_CONF', abspath) - conf.cluster = cluster_name - - def stat_ceph_conf(self): - try: - configuration.load(conf.path) - return terminal.green(conf.path) - except exceptions.ConfigurationError as error: - return terminal.red(error) - - def load_log_path(self): - conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph') - - def _get_split_args(self): - subcommands = self.mapper.keys() - slice_on_index = len(self.argv) + 1 - pruned_args = self.argv[1:] - for count, arg in enumerate(pruned_args): - if arg in subcommands: - slice_on_index = count - break - return pruned_args[:slice_on_index], pruned_args[slice_on_index:] - - def main(self, argv): - self.load_ceph_conf_path() - # these need to be available for the help, which gets parsed super - # early - self.load_ceph_conf_path() - self.load_log_path() - main_args, subcommand_args = self._get_split_args() - # no flags where passed in, return the help menu instead of waiting for - # argparse which will end up complaning that there are no args - if len(argv) <= 1: - print(self.help(warning=True)) - return - parser = argparse.ArgumentParser( - prog='ceph-volume-zfs', - formatter_class=argparse.RawDescriptionHelpFormatter, - description=self.help(), - ) - parser.add_argument( - '--cluster', - default='ceph', - help='Cluster name (defaults to "ceph")', - ) - parser.add_argument( - '--log-level', - default='debug', - help='Change the file log level (defaults to debug)', - ) - parser.add_argument( - '--log-path', - default='/var/log/ceph/', - help='Change the log path (defaults to /var/log/ceph)', - ) - args = parser.parse_args(main_args) - conf.log_path = args.log_path - if os.path.isdir(conf.log_path): - conf.log_path = os.path.join(args.log_path, 'ceph-volume-zfs.log') - log.setup() - logger = logging.getLogger(__name__) - logger.info("Running command: ceph-volume-zfs %s %s", - " ".join(main_args), " ".join(subcommand_args)) - # set all variables from args and load everything needed according to - # them - self.load_ceph_conf_path(cluster_name=args.cluster) - try: - conf.ceph = configuration.load(conf.path) - except exceptions.ConfigurationError as error: - # we warn only here, because it is possible that the configuration - # file is not needed, or that it will be loaded by some other means - # (like reading from zfs tags) - logger.exception('ignoring inability to load ceph.conf') - terminal.red(error) - # dispatch to sub-commands - terminal.dispatch(self.mapper, subcommand_args) diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py new file mode 100644 index 00000000000..b666aa7d543 --- /dev/null +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/util/disk.py @@ -0,0 +1,148 @@ +import re + +from ceph_volume.util.disk import human_readable_size +from ceph_volume import process +from ceph_volume import sys_info + +report_template = """ +/dev/{geomname:<16} {mediasize:<16} {rotational!s:<7} {descr}""" +# {geomname:<25} {mediasize:<12} {rotational!s:<7} {mode!s:<9} {descr}""" + +def geom_disk_parser(block): + """ + Parses lines in 'geom disk list` output. + + Geom name: ada3 + Providers: + 1. Name: ada3 + Mediasize: 40018599936 (37G) + Sectorsize: 512 + Stripesize: 4096 + Stripeoffset: 0 + Mode: r2w2e4 + descr: Corsair CSSD-F40GB2 + lunid: 5000000000000236 + ident: 111465010000101800EC + rotationrate: 0 + fwsectors: 63 + fwheads: 16 + + :param line: A string, with the full block for `geom disk list` + """ + pairs = block.split(';') + parsed = {} + for pair in pairs: + if 'Providers' in pair: + continue + try: + column, value = pair.split(':') + except ValueError: + continue + # fixup + column = re.sub("\s+", "", column) + column= re.sub("^[0-9]+\.", "", column) + value = value.strip() + value = re.sub('\([0-9A-Z]+\)', '', value) + parsed[column.lower()] = value + return parsed + +def get_disk(diskname): + """ + Captures all available info from geom + along with interesting metadata like sectors, size, vendor, + solid/rotational, etc... + + Returns a dictionary, with all the geom fields as keys. + """ + + command = ['/sbin/geom', 'disk', 'list', re.sub('/dev/', '', diskname)] + out, err, rc = process.call(command) + geom_block = "" + for line in out: + line.strip() + geom_block += ";" + line + disk = geom_disk_parser(geom_block) + return disk + +def get_disks(): + command = ['/sbin/geom', 'disk', 'status', '-s'] + out, err, rc = process.call(command) + disks = {} + for path in out: + dsk, rest1, rest2 = path.split() + disk = get_disk(dsk) + disks['/dev/'+dsk] = disk + return disks + +class Disks(object): + + def __init__(self, path=None): + if not sys_info.devices: + sys_info.devices = get_disks() + self.disks = {} + for k in sys_info.devices: + if path != None: + if path in k: + self.disks[k] = Disk(k) + else: + self.disks[k] = Disk(k) + + def pretty_report(self, all=True): + output = [ + report_template.format( + geomname='Device Path', + mediasize='Size', + rotational='rotates', + descr='Model name', + mode='available', + )] + for disk in sorted(self.disks): + output.append(self.disks[disk].report()) + return ''.join(output) + + def json_report(self): + output = [] + for disk in sorted(self.disks): + output.append(self.disks[disk].json_report()) + return output + + +class Disk(object): + + report_fields = [ + 'rejected_reasons', + 'available', + 'path', + 'sys_api', + ] + pretty_report_sys_fields = [ + 'human_readable_size', + 'model', + 'removable', + 'ro', + 'rotational', + 'sas_address', + 'scheduler_mode', + 'vendor', + ] + + def __init__(self, path): + self.abspath = path + self.path = path + self.reject_reasons = [] + self.available = True + self.sys_api = sys_info.devices.get(path) + + def report(self): + return report_template.format( + geomname=self.sys_api.get('geomname'), + mediasize=human_readable_size(int(self.sys_api.get('mediasize'))), + rotational=int(self.sys_api.get('rotationrate')) != 0, + mode=self.sys_api.get('mode'), + descr=self.sys_api.get('descr') + ) + + def json_report(self): + output = {k.strip('_'): v for k, v in vars(self).items()} + return output + diff --git a/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py new file mode 100755 index 00000000000..e9911c75ed2 --- /dev/null +++ b/src/ceph-volume/plugin/zfs/ceph_volume_zfs/zfs.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function +import argparse +import os +import sys +import logging + +from textwrap import dedent +from ceph_volume import log, conf, configuration +from ceph_volume import exceptions +from ceph_volume import terminal + +# The ceph-volume-zfs specific code +import ceph_volume_zfs.zfs +from ceph_volume_zfs import devices +# from ceph_volume_zfs.util import device +from ceph_volume_zfs.devices import zfs + +# the supported actions +from ceph_volume_zfs.devices.zfs import inventory +from ceph_volume_zfs.devices.zfs import prepare +from ceph_volume_zfs.devices.zfs import zap + + +if __name__ == '__main__': + zfs.ZFS() + + +class ZFS(object): + + # help info for subcommands + help = "Use ZFS as the underlying technology for OSDs" + + # help info for the plugin + help_menu = "Deploy OSDs with ZFS" + _help = dedent(""" + Use ZFS as the underlying technology for OSDs + + {sub_zfshelp} + """) + name = 'zfs' + + def __init__(self, argv=None, parse=True): + self.zfs_mapper = { + 'inventory': inventory.Inventory, + 'prepare': prepare.Prepare, + 'zap': zap.Zap, + } + if argv is None: + self.argv = sys.argv + else: + self.argv = argv + if parse: + self.main(self.argv) + + def print_help(self, warning=False): + return self._help.format( + sub_zfshelp=terminal.subhelp(self.zfs_mapper) + ) + + def get_environ_vars(self): + environ_vars = [] + for key, value in os.environ.items(): + if key.startswith('CEPH_'): + environ_vars.append("%s=%s" % (key, value)) + if not environ_vars: + return '' + else: + environ_vars.insert(0, '\nEnviron Variables:') + return '\n'.join(environ_vars) + + def load_ceph_conf_path(self, cluster_name='ceph'): + abspath = '/etc/ceph/%s.conf' % cluster_name + conf.path = os.getenv('CEPH_CONF', abspath) + conf.cluster = cluster_name + + def stat_ceph_conf(self): + try: + configuration.load(conf.path) + return terminal.green(conf.path) + except exceptions.ConfigurationError as error: + return terminal.red(error) + + def load_log_path(self): + conf.log_path = os.getenv('CEPH_VOLUME_LOG_PATH', '/var/log/ceph') + + def _get_split_args(self): + subcommands = self.zfs_mapper.keys() + slice_on_index = len(self.argv) + pruned_args = self.argv + for count, arg in enumerate(pruned_args): + if arg in subcommands: + slice_on_index = count + break + return pruned_args[:slice_on_index], pruned_args[slice_on_index:] + + def main(self, argv=None): + if argv is None: + return + self.load_ceph_conf_path() + # these need to be available for the help, which gets parsed super + # early + self.load_ceph_conf_path() + self.load_log_path() + main_args, subcommand_args = self._get_split_args() + # no flags where passed in, return the help menu instead of waiting for + # argparse which will end up complaning that there are no args + if len(argv) < 1: + print(self.print_help(warning=True)) + return + parser = argparse.ArgumentParser( + prog='ceph-volume-zfs', + formatter_class=argparse.RawDescriptionHelpFormatter, + description=self.print_help(), + ) + parser.add_argument( + '--cluster', + default='ceph', + help='Cluster name (defaults to "ceph")', + ) + parser.add_argument( + '--log-level', + default='debug', + help='Change the file log level (defaults to debug)', + ) + parser.add_argument( + '--log-path', + default='/var/log/ceph/', + help='Change the log path (defaults to /var/log/ceph)', + ) + args = parser.parse_args(main_args) + conf.log_path = args.log_path + if os.path.isdir(conf.log_path): + conf.log_path = os.path.join(args.log_path, 'ceph-volume-zfs.log') + log.setup() + logger = logging.getLogger(__name__) + logger.info("Running command: ceph-volume-zfs %s %s", + " ".join(main_args), " ".join(subcommand_args)) + # set all variables from args and load everything needed according to + # them + self.load_ceph_conf_path(cluster_name=args.cluster) + try: + conf.ceph = configuration.load(conf.path) + except exceptions.ConfigurationError as error: + # we warn only here, because it is possible that the configuration + # file is not needed, or that it will be loaded by some other means + # (like reading from zfs tags) + logger.exception('ignoring inability to load ceph.conf') + terminal.red(error) + # dispatch to sub-commands + terminal.dispatch(self.zfs_mapper, subcommand_args) diff --git a/src/ceph-volume/plugin/zfs/setup.py b/src/ceph-volume/plugin/zfs/setup.py index fe2e3e27a5f..31f6998f9f4 100644 --- a/src/ceph-volume/plugin/zfs/setup.py +++ b/src/ceph-volume/plugin/zfs/setup.py @@ -23,8 +23,6 @@ setup( "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], description="Manage Ceph OSDs on ZFS pool/volume/filesystem", @@ -34,14 +32,13 @@ setup( keywords='ceph-volume-zfs', name='ceph-volume-zfs', packages=find_packages(include=['ceph_volume_zfs']), - scripts=['bin/ceph-volume-zfs'], setup_requires=setup_requirements, url='https://github.com/ceph/ceph/src/ceph-volume/plugin/zfs', version='0.1.0', zip_safe=False, entry_points = dict( ceph_volume_handlers = [ - 'zfs = ceph_volume_zfs.main:ZFSVOL', + 'zfs = ceph_volume_zfs.zfs:ZFS', ], ), )