import fnmatch
try:
- from typing import Optional, List, Dict
+ from typing import Optional, List, Dict, Any
except ImportError:
pass
import six
specification structure.
"""
- def __init__(self, paths=None, id_model=None, size=None, rotates=None, count=None):
- # type: (List[str], str, str, bool, int) -> None
+ _supported_filters = [
+ "paths", "size", "vendor", "model", "rotational", "limit", "all"
+ ]
+
+ def __init__(self,
+ paths=None, # type: Optional[List[str]]
+ model=None, # type: Optional[str]
+ size=None, # type: Optional[str]
+ rotational=None, # type: Optional[bool]
+ limit=None, # type: Optional[int]
+ vendor=None, # type: Optional[str]
+ all=False, # type: bool
+ ):
"""
ephemeral drive group device specification
"""
- if paths is None:
- paths = []
-
#: List of absolute paths to the devices.
- self.paths = paths # type: List[str]
+ self.paths = [] if paths is None else paths # type: List[str]
+
+ #: A wildcard string. e.g: "SDD*" or "SanDisk SD8SN8U5"
+ self.model = model
- #: A wildcard string. e.g: "SDD*"
- self.id_model = id_model
+ #: Match on the VENDOR property of the drive
+ self.vendor = vendor
#: Size specification of format LOW:HIGH.
#: Can also take the the form :HIGH, LOW:
self.size = size
#: is the drive rotating or not
- self.rotates = rotates
+ self.rotational = rotational
+
+ #: Limit the number of devices added to this Drive Group. Devices
+ #: are used from top to bottom in the output of ``ceph-volume inventory``
+ self.limit = limit
+
+ #: Matches all devices. Can only be used for data devices
+ self.all = all
- #: if this is present limit the number of drives to this number.
- self.count = count
self.validate()
def validate(self):
- props = [self.id_model, self.size, self.rotates, self.count]
+ # type: () -> None
+ props = [self.model, self.vendor, self.size, self.rotational] # type: List[Any]
if self.paths and any(p is not None for p in props):
raise DriveGroupValidationError(
'DeviceSelection: `paths` and other parameters are mutually exclusive')
- if not any(p is not None for p in [self.paths] + props):
+ is_empty = not any(p is not None and p != [] for p in [self.paths] + props)
+ if not self.all and is_empty:
raise DriveGroupValidationError('DeviceSelection cannot be empty')
+ if self.all and not is_empty:
+ raise DriveGroupValidationError(
+ 'DeviceSelection `all` and other parameters are mutually exclusive. {}'.format(
+ repr(self)))
+
@classmethod
def from_json(cls, device_spec):
+ # type: (dict) -> DeviceSelection
+ for applied_filter in list(device_spec.keys()):
+ if applied_filter not in cls._supported_filters:
+ raise DriveGroupValidationError(
+ "Filtering for <{}> is not supported".format(applied_filter))
+
return cls(**device_spec)
+ def __repr__(self):
+ keys = [
+ key for key in self._supported_filters + ['limit'] if getattr(self, key) is not None
+ ]
+ if 'paths' in keys and self.paths == []:
+ keys.remove('paths')
+ return "DeviceSelection({})".format(
+ ', '.join('{}={}'.format(key, repr(getattr(self, key))) for key in keys)
+ )
+
+ def __eq__(self, other):
+ return repr(self) == repr(other)
+
class DriveGroupValidationError(Exception):
+ """
+ Defining an exception here is a bit problematic, cause you cannot properly catch it,
+ if it was raised in a different mgr module.
+ """
+
def __init__(self, msg):
super(DriveGroupValidationError, self).__init__('Failed to validate Drive Group: ' + msg)
understands.
"""
+ _supported_features = [
+ "encrypted", "block_wal_size", "osds_per_device",
+ "db_slots", "wal_slots", "block_db_size", "host_pattern",
+ "data_devices", "db_devices", "wal_devices", "journal_devices",
+ "data_directories", "osds_per_device", "objectstore", "osd_id_claims",
+ "journal_size"
+ ]
+
def __init__(self,
host_pattern, # type: str
data_devices=None, # type: Optional[DeviceSelection]
db_slots=None, # type: Optional[int]
wal_slots=None, # type: Optional[int]
osd_id_claims=None, # type: Optional[Dict[str, DeviceSelection]]
+ block_db_size=None, # type: Optional[int]
+ block_wal_size=None, # type: Optional[int]
+ journal_size=None, # type: Optional[int]
):
# concept of applying a drive group to a (set) of hosts is tightly
#: An fnmatch pattern to select hosts. Can also be a single host.
self.host_pattern = host_pattern
- #: A :class:`orchestrator.DeviceSelection`
+ #: A :class:`ceph.deployment.drive_group.DeviceSelection`
self.data_devices = data_devices
- #: A :class:`orchestrator.DeviceSelection`
+ #: A :class:`ceph.deployment.drive_group.DeviceSelection`
self.db_devices = db_devices
- #: A :class:`orchestrator.DeviceSelection`
+ #: A :class:`ceph.deployment.drive_group.DeviceSelection`
self.wal_devices = wal_devices
- #: A :class:`orchestrator.DeviceSelection`
+ #: A :class:`ceph.deployment.drive_group.DeviceSelection`
self.journal_devices = journal_devices
+ #: Set (or override) the "bluestore_block_wal_size" value, in bytes
+ self.block_wal_size = block_wal_size
+
+ #: Set (or override) the "bluestore_block_db_size" value, in bytes
+ self.block_db_size = block_db_size
+
+ #: set journal_size is bytes
+ self.journal_size = journal_size
+
#: Number of osd daemons per "DATA" device.
#: To fully utilize nvme devices multiple osds are required.
self.osds_per_device = osds_per_device
self.osd_id_claims = osd_id_claims
@classmethod
- def from_json(self, json_drive_group):
+ def from_json(cls, json_drive_group):
+ # type: (dict) -> DriveGroupSpec
"""
Initialize 'Drive group' structure
:param json_drive_group: A valid json string with a Drive Group
specification
"""
- args = {k: (DeviceSelection.from_json(v) if k.endswith('_devices') else v) for k, v in
- json_drive_group.items()}
- return DriveGroupSpec(**args)
+ for applied_filter in list(json_drive_group.keys()):
+ if applied_filter not in cls._supported_features:
+ raise DriveGroupValidationError(
+ "Feature <{}> is not supported".format(applied_filter))
+
+ for key in ('block_wal_size', 'block_db_size', 'journal_size'):
+ if key in json_drive_group:
+ if isinstance(json_drive_group[key], six.string_types):
+ from ceph.deployment.drive_selection import SizeMatcher
+ json_drive_group[key] = SizeMatcher.str_to_byte(json_drive_group[key])
+
+ try:
+ args = {k: (DeviceSelection.from_json(v) if k.endswith('_devices') else v) for k, v in
+ json_drive_group.items()}
+ return DriveGroupSpec(**args)
+ except (KeyError, TypeError) as e:
+ raise DriveGroupValidationError(str(e))
def hosts(self, all_hosts):
+ # type: (List[str]) -> List[str]
return fnmatch.filter(all_hosts, self.host_pattern)
def validate(self, all_hosts):
+ # type: (List[str]) -> None
if not isinstance(self.host_pattern, six.string_types):
raise DriveGroupValidationError('host_pattern must be of type string')
specs = [self.data_devices, self.db_devices, self.wal_devices, self.journal_devices]
for s in filter(None, specs):
s.validate()
+ for s in filter(None, [self.db_devices, self.wal_devices, self.journal_devices]):
+ if s.all:
+ raise DriveGroupValidationError("`all` is only allowed for data_devices")
+
if self.objectstore not in ('filestore', 'bluestore'):
raise DriveGroupValidationError("objectstore not in ('filestore', 'bluestore')")
if not self.hosts(all_hosts):
raise DriveGroupValidationError(
"host_pattern '{}' does not match any hosts".format(self.host_pattern))
+
+ if self.block_wal_size is not None and type(self.block_wal_size) != int:
+ raise DriveGroupValidationError('block_wal_size must be of type int')
+ if self.block_db_size is not None and type(self.block_db_size) != int:
+ raise DriveGroupValidationError('block_db_size must be of type int')
+
+ def __repr__(self):
+ keys = [
+ key for key in self._supported_features if getattr(self, key) is not None
+ ]
+ if 'encrypted' in keys and not self.encrypted:
+ keys.remove('encrypted')
+ if 'objectstore' in keys and self.objectstore == 'bluestore':
+ keys.remove('objectstore')
+ return "DriveGroupSpec({})".format(
+ ', '.join('{}={}'.format(key, repr(getattr(self, key))) for key in keys)
+ )
+
+ def __eq__(self, other):
+ return repr(self) == repr(other)
--- /dev/null
+from .selector import DriveSelection # NOQA
+from .matchers import Matcher, SubstringMatcher, EqualityMatcher, AllMatcher, SizeMatcher # NOQA
+from .filter import Filter # NOQA
--- /dev/null
+# default:
+# target: 'data*'
+# data_devices:
+# size: 20G
+# db_devices:
+# size: 10G
+# rotational: 1
+# allflash:
+# target: 'fast_nodes*'
+# data_devices:
+# size: 100G
+# db_devices:
+# size: 50G
+# rotational: 0
+
+# This is the default configuration and
+# will create an OSD on all available drives
+default:
+ target: 'fnmatch_target'
+ data_devices:
+ all: true
--- /dev/null
+# -*- coding: utf-8 -*-
+
+import logging
+
+from ceph.deployment.drive_group import DeviceSelection
+
+try:
+ from typing import Optional, Generator
+except ImportError:
+ pass
+
+from .matchers import Matcher, SubstringMatcher, AllMatcher, SizeMatcher, EqualityMatcher
+
+logger = logging.getLogger(__name__)
+
+
+class Filter(object):
+ """ Filter class to assign properties to bare filters.
+
+ This is a utility class that tries to simplify working
+ with information comming from a textfile (drive_group.yaml)
+
+ """
+
+ def __init__(self,
+ name, # type: str
+ matcher, # type: Optional[Matcher]
+ ):
+ self.name = str(name)
+ self.matcher = matcher
+ logger.debug("Initializing {} filter <{}>".format(
+ self.matcher.__class__.__name__, self.name))
+
+ @property
+ def is_matchable(self):
+ # type: () -> bool
+ """ A property to indicate if a Filter has a matcher
+
+ Some filter i.e. 'limit' or 'osd_per_device' are valid filter
+ attributes but cannot be applied to a disk set. In this case
+ we return 'None'
+ :return: If a matcher is present True/Flase
+ :rtype: bool
+ """
+ return self.matcher is not None
+
+ def __repr__(self):
+ """ Visual representation of the filter
+ """
+ return 'Filter<{}>'.format(self.name)
+
+
+class FilterGenerator(object):
+ def __init__(self, device_filter):
+ # type: (DeviceSelection) -> None
+ self.device_filter = device_filter
+
+ def __iter__(self):
+ # type: () -> Generator[Filter, None, None]
+ if self.device_filter.size:
+ yield Filter('size', SizeMatcher('size', self.device_filter.size))
+ if self.device_filter.model:
+ yield Filter('model', SubstringMatcher('model', self.device_filter.model))
+ if self.device_filter.vendor:
+ yield Filter('vendor', SubstringMatcher('vendor', self.device_filter.vendor))
+ if self.device_filter.rotational is not None:
+ val = '1' if self.device_filter.rotational else '0'
+ yield Filter('rotational', EqualityMatcher('rotational', val))
+ if self.device_filter.all:
+ yield Filter('all', AllMatcher('all', str(self.device_filter.all)))
--- /dev/null
+# -*- coding: utf-8 -*-
+
+try:
+ from typing import Tuple, Optional, Any
+except ImportError:
+ pass
+
+from ceph.deployment.inventory import Device
+
+import re
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+# pylint: disable=too-few-public-methods
+class Matcher(object):
+ """ The base class to all Matchers
+
+ It holds utility methods such as _get_disk_key
+ and handles the initialization.
+
+ """
+
+ def __init__(self, key, value):
+ # type: (str, Any) -> None
+ """ Initialization of Base class
+
+ :param str key: Attribute like 'model, size or vendor'
+ :param str value: Value of attribute like 'X123, 5G or samsung'
+ """
+ self.key = key
+ self.value = value
+ self.fallback_key = '' # type: Optional[str]
+
+ def _get_disk_key(self, device):
+ # type: (Device) -> Any
+ """ Helper method to safely extract values form the disk dict
+
+ There is a 'key' and a _optional_ 'fallback' key that can be used.
+ The reason for this is that the output of ceph-volume is not always
+ consistent (due to a bug currently, but you never know).
+ There is also a safety measure for a disk_key not existing on
+ virtual environments. ceph-volume apparently sources its information
+ from udev which seems to not populate certain fields on VMs.
+
+ :raises: A generic Exception when no disk_key could be found.
+ :return: A disk value
+ :rtype: str
+ """
+ # using the . notation, but some keys are nested, and hidden behind
+ # a different hierarchy, which makes it harder to access programatically
+ # hence, make it a dict.
+ disk = device.to_json()
+
+ def findkeys(node, key_val):
+ """ Find keys in non-flat dict recursively """
+ if isinstance(node, list):
+ for i in node:
+ for key in findkeys(i, key_val):
+ yield key
+ elif isinstance(node, dict):
+ if key_val in node:
+ yield node[key_val]
+ for j in node.values():
+ for key in findkeys(j, key_val):
+ yield key
+
+ disk_value = list(findkeys(disk, self.key))
+ if not disk_value and self.fallback_key:
+ disk_value = list(findkeys(disk, self.fallback_key))
+
+ if disk_value:
+ return disk_value[0]
+ else:
+ raise Exception("No value found for {} or {}".format(
+ self.key, self.fallback_key))
+
+ def compare(self, disk):
+ # type: (Device) -> bool
+ """ Implements a valid comparison method for a SubMatcher
+ This will get overwritten by the individual classes
+
+ :param dict disk: A disk representation
+ """
+ raise NotImplementedError
+
+
+# pylint: disable=too-few-public-methods
+class SubstringMatcher(Matcher):
+ """ Substring matcher subclass
+ """
+
+ def __init__(self, key, value, fallback_key=None):
+ # type: (str, str, Optional[str]) -> None
+ Matcher.__init__(self, key, value)
+ self.fallback_key = fallback_key
+
+ def compare(self, disk):
+ # type: (Device) -> bool
+ """ Overwritten method to match substrings
+
+ This matcher does substring matching
+ :param dict disk: A disk representation (see base for examples)
+ :return: True/False if the match succeeded
+ :rtype: bool
+ """
+ if not disk:
+ return False
+ disk_value = self._get_disk_key(disk)
+ if str(self.value) in disk_value:
+ return True
+ return False
+
+
+# pylint: disable=too-few-public-methods
+class AllMatcher(Matcher):
+ """ All matcher subclass
+ """
+
+ def __init__(self, key, value, fallback_key=None):
+ # type: (str, Any, Optional[str]) -> None
+
+ Matcher.__init__(self, key, value)
+ self.fallback_key = fallback_key
+
+ def compare(self, disk):
+ # type: (Device) -> bool
+
+ """ Overwritten method to match all
+
+ A rather dumb matcher that just accepts all disks
+ (regardless of the value)
+ :param dict disk: A disk representation (see base for examples)
+ :return: always True
+ :rtype: bool
+ """
+ if not disk:
+ return False
+ return True
+
+
+# pylint: disable=too-few-public-methods
+class EqualityMatcher(Matcher):
+ """ Equality matcher subclass
+ """
+
+ def __init__(self, key, value):
+ # type: (str, Any) -> None
+
+ Matcher.__init__(self, key, value)
+
+ def compare(self, disk):
+ # type: (Device) -> bool
+
+ """ Overwritten method to match equality
+
+ This matcher does value comparison
+ :param dict disk: A disk representation
+ :return: True/False if the match succeeded
+ :rtype: bool
+ """
+ if not disk:
+ return False
+ disk_value = self._get_disk_key(disk)
+ ret = disk_value == self.value
+ if not ret:
+ logger.debug('{} != {}'.format(disk_value, self.value))
+ return ret
+
+
+class SizeMatcher(Matcher):
+ """ Size matcher subclass
+ """
+
+ SUFFIXES = (
+ ["MB", "GB", "TB"],
+ ["M", "G", "T"],
+ [1e+6, 1e+9, 1e+12]
+ )
+
+ supported_suffixes = SUFFIXES[0] + SUFFIXES[1]
+
+ # pylint: disable=too-many-instance-attributes
+ def __init__(self, key, value):
+ # type: (str, str) -> None
+
+ # The 'key' value is overwritten here because
+ # the user_defined attribute does not neccessarily
+ # correspond to the desired attribute
+ # requested from the inventory output
+ Matcher.__init__(self, key, value)
+ self.key = "human_readable_size"
+ self.fallback_key = "size"
+ self._high = None # type: Optional[str]
+ self._high_suffix = None # type: Optional[str]
+ self._low = None # type: Optional[str]
+ self._low_suffix = None # type: Optional[str]
+ self._exact = None # type: Optional[str]
+ self._exact_suffix = None # type: Optional[str]
+ self._parse_filter()
+
+ @property
+ def low(self):
+ # type: () -> Tuple[Optional[str], Optional[str]]
+ """ Getter for 'low' matchers
+ """
+ return self._low, self._low_suffix
+
+ @low.setter
+ def low(self, low):
+ # type: (Tuple[str, str]) -> None
+ """ Setter for 'low' matchers
+ """
+ self._low, self._low_suffix = low
+
+ @property
+ def high(self):
+ # type: () -> Tuple[Optional[str], Optional[str]]
+ """ Getter for 'high' matchers
+ """
+ return self._high, self._high_suffix
+
+ @high.setter
+ def high(self, high):
+ # type: (Tuple[str, str]) -> None
+ """ Setter for 'high' matchers
+ """
+ self._high, self._high_suffix = high
+
+ @property
+ def exact(self):
+ # type: () -> Tuple[Optional[str], Optional[str]]
+ """ Getter for 'exact' matchers
+ """
+ return self._exact, self._exact_suffix
+
+ @exact.setter
+ def exact(self, exact):
+ # type: (Tuple[str, str]) -> None
+ """ Setter for 'exact' matchers
+ """
+ self._exact, self._exact_suffix = exact
+
+ @classmethod
+ def _normalize_suffix(cls, suffix):
+ # type: (str) -> str
+ """ Normalize any supported suffix
+
+ Since the Drive Groups are user facing, we simply
+ can't make sure that all users type in the requested
+ form. That's why we have to internally agree on one format.
+ It also checks if any of the supported suffixes was used
+ and raises an Exception otherwise.
+
+ :param str suffix: A suffix ('G') or ('M')
+ :return: A normalized output
+ :rtype: str
+ """
+ suffix = suffix.upper()
+ if suffix not in cls.supported_suffixes:
+ raise ValueError("Unit '{}' not supported".format(suffix))
+ return dict(zip(
+ cls.SUFFIXES[1],
+ cls.SUFFIXES[0],
+ )).get(suffix, suffix)
+
+ @classmethod
+ def _parse_suffix(cls, obj):
+ # type: (str) -> str
+ """ Wrapper method to find and normalize a prefix
+
+ :param str obj: A size filtering string ('10G')
+ :return: A normalized unit ('GB')
+ :rtype: str
+ """
+ return cls._normalize_suffix(re.findall(r"[a-zA-Z]+", obj)[0])
+
+ @classmethod
+ def _get_k_v(cls, data):
+ # type: (str) -> Tuple[str, str]
+ """ Helper method to extract data from a string
+
+ It uses regex to extract all digits and calls _parse_suffix
+ which also uses a regex to extract all letters and normalizes
+ the resulting suffix.
+
+ :param str data: A size filtering string ('10G')
+ :return: A Tuple with normalized output (10, 'GB')
+ :rtype: tuple
+ """
+ return re.findall(r"\d+", data)[0], cls._parse_suffix(data)
+
+ def _parse_filter(self):
+ """ Identifies which type of 'size' filter is applied
+
+ There are four different filtering modes:
+
+ 1) 10G:50G (high-low)
+ At least 10G but at max 50G of size
+
+ 2) :60G
+ At max 60G of size
+
+ 3) 50G:
+ At least 50G of size
+
+ 4) 20G
+ Exactly 20G in size
+
+ This method uses regex to identify and extract this information
+ and raises if none could be found.
+ """
+ low_high = re.match(r"\d+[A-Z]{1,2}:\d+[A-Z]{1,2}", self.value)
+ if low_high:
+ low, high = low_high.group().split(":")
+ self.low = self._get_k_v(low)
+ self.high = self._get_k_v(high)
+
+ low = re.match(r"\d+[A-Z]{1,2}:$", self.value)
+ if low:
+ self.low = self._get_k_v(low.group())
+
+ high = re.match(r"^:\d+[A-Z]{1,2}", self.value)
+ if high:
+ self.high = self._get_k_v(high.group())
+
+ exact = re.match(r"^\d+[A-Z]{1,2}$", self.value)
+ if exact:
+ self.exact = self._get_k_v(exact.group())
+
+ if not self.low and not self.high and not self.exact:
+ raise Exception("Couldn't parse {}".format(self.value))
+
+ @staticmethod
+ # pylint: disable=inconsistent-return-statements
+ def to_byte(tpl):
+ # type: (Tuple[Optional[str], Optional[str]]) -> float
+
+ """ Convert any supported unit to bytes
+
+ :param tuple tpl: A tuple with ('10', 'GB')
+ :return: The converted byte value
+ :rtype: float
+ """
+ val_str, suffix = tpl
+ value = float(val_str) if val_str is not None else 0.0
+ return dict(zip(
+ SizeMatcher.SUFFIXES[0],
+ SizeMatcher.SUFFIXES[2],
+ )).get(str(suffix), 0.00) * value
+
+ @staticmethod
+ def str_to_byte(input):
+ # type: (str) -> float
+ return SizeMatcher.to_byte(SizeMatcher._get_k_v(input))
+
+ # pylint: disable=inconsistent-return-statements, too-many-return-statements
+ def compare(self, disk):
+ # type: (Device) -> bool
+ """ Convert MB/GB/TB down to bytes and compare
+
+ 1) Extracts information from the to-be-inspected disk.
+ 2) Depending on the mode, apply checks and return
+
+ # This doesn't seem very solid and _may_
+ be re-factored
+
+
+ """
+ if not disk:
+ return False
+ disk_value = self._get_disk_key(disk)
+ # This doesn't neccessarily have to be a float.
+ # The current output from ceph-volume gives a float..
+ # This may change in the future..
+ # todo: harden this paragraph
+ if not disk_value:
+ logger.warning("Could not retrieve value for disk")
+ return False
+
+ disk_size = re.findall(r"\d+\.\d+", disk_value)[0]
+ disk_suffix = self._parse_suffix(disk_value)
+ disk_size_in_byte = self.to_byte((disk_size, disk_suffix))
+
+ if all(self.high) and all(self.low):
+ if disk_size_in_byte <= self.to_byte(
+ self.high) and disk_size_in_byte >= self.to_byte(self.low):
+ return True
+ # is a else: return False neccessary here?
+ # (and in all other branches)
+ logger.debug("Disk didn't match for 'high/low' filter")
+
+ elif all(self.low) and not all(self.high):
+ if disk_size_in_byte >= self.to_byte(self.low):
+ return True
+ logger.debug("Disk didn't match for 'low' filter")
+
+ elif all(self.high) and not all(self.low):
+ if disk_size_in_byte <= self.to_byte(self.high):
+ return True
+ logger.debug("Disk didn't match for 'high' filter")
+
+ elif all(self.exact):
+ if disk_size_in_byte == self.to_byte(self.exact):
+ return True
+ logger.debug("Disk didn't match for 'exact' filter")
+ else:
+ logger.debug("Neither high, low, nor exact was given")
+ raise Exception("No filters applied")
+ return False
--- /dev/null
+import logging
+
+try:
+ from typing import List, Optional
+except ImportError:
+ pass
+
+from ceph.deployment.inventory import Device
+
+from ..inventory import Devices
+from ..drive_group import DriveGroupSpec, DeviceSelection
+
+from .filter import FilterGenerator
+
+logger = logging.getLogger(__name__)
+
+
+class DriveSelection(object):
+ def __init__(self,
+ spec, # type: DriveGroupSpec
+ disks, # type: Devices
+ ):
+ self.disks = disks.copy()
+ self.spec = spec
+
+ self._data = self.assign_devices(self.spec.data_devices)
+ self._wal = self.assign_devices(self.spec.wal_devices)
+ self._db = self.assign_devices(self.spec.db_devices)
+ self._jornal = self.assign_devices(self.spec.journal_devices)
+
+ def data_devices(self):
+ # type: () -> List[Device]
+ return self._data
+
+ def wal_devices(self):
+ # type: () -> List[Device]
+ return self._wal
+
+ def db_devices(self):
+ # type: () -> List[Device]
+ return self._db
+
+ def journal_devices(self):
+ # type: () -> List[Device]
+ return self._jornal
+
+ @staticmethod
+ def _limit_reached(device_filter, len_devices,
+ disk_path):
+ # type: (DeviceSelection, int, str) -> bool
+ """ Check for the <limit> property and apply logic
+
+ If a limit is set in 'device_attrs' we have to stop adding
+ disks at some point.
+
+ If limit is set (>0) and len(devices) >= limit
+
+ :param int len_devices: Length of the already populated device set/list
+ :param str disk_path: The disk identifier (for logging purposes)
+ :return: True/False if the device should be added to the list of devices
+ :rtype: bool
+ """
+ limit = device_filter.limit or 0
+
+ if limit > 0 and len_devices >= limit:
+ logger.info("Refuse to add {} due to limit policy of <{}>".format(
+ disk_path, limit))
+ return True
+ return False
+
+ @staticmethod
+ def _has_mandatory_idents(disk):
+ # type: (Device) -> bool
+ """ Check for mandatory indentification fields
+ """
+ if disk.path:
+ logger.debug("Found matching disk: {}".format(disk.path))
+ return True
+ else:
+ raise Exception(
+ "Disk {} doesn't have a 'path' identifier".format(disk))
+
+ def assign_devices(self, device_filter):
+ # type: (Optional[DeviceSelection]) -> List[Device]
+ """ Assign drives based on used filters
+
+ Do not add disks when:
+
+ 1) Filter didn't match
+ 2) Disk doesn't have a mandatory identification item (path)
+ 3) The set :limit was reached
+
+ After the disk was added we make sure not to re-assign this disk
+ for another defined type[wal/db/journal devices]
+
+ return a sorted(by path) list of devices
+ """
+ if device_filter is None:
+ logger.debug('device_filter is None')
+ return []
+ devices = list() # type: List[Device]
+ for _filter in FilterGenerator(device_filter):
+ if not _filter.is_matchable:
+ logger.debug(
+ "Ignoring disk {}. Filter is not matchable".format(
+ device_filter))
+ continue
+
+ for disk in self.disks.devices:
+ logger.debug("Processing disk {}".format(disk.path))
+
+ # continue criterias
+ assert _filter.matcher is not None
+ if not _filter.matcher.compare(disk):
+ logger.debug(
+ "Ignoring disk {}. Filter did not match".format(
+ disk.path))
+ continue
+
+ if not self._has_mandatory_idents(disk):
+ logger.debug(
+ "Ignoring disk {}. Missing mandatory idents".format(
+ disk.path))
+ continue
+
+ # break on this condition.
+ if self._limit_reached(device_filter, len(devices), disk.path):
+ logger.debug("Ignoring disk {}. Limit reached".format(
+ disk.path))
+ break
+
+ if disk not in devices:
+ logger.debug('Adding disk {}'.format(disk.path))
+ devices.append(disk)
+
+ # This disk is already taken and must not be re-assigned.
+ for taken_device in devices:
+ if taken_device in self.disks.devices:
+ self.disks.devices.remove(taken_device)
+
+ return sorted([x for x in devices], key=lambda dev: dev.path)
--- /dev/null
+try:
+ from typing import List, Optional, Dict, Any
+except ImportError:
+ pass # for type checking
+
+
+class Devices(object):
+ """
+ A container for Device instances with reporting
+ """
+
+ def __init__(self, devices):
+ # type: (List[Device]) -> None
+ self.devices = devices # type: List[Device]
+
+ def __eq__(self, other):
+ return self.to_json() == other.to_json()
+
+ def to_json(self):
+ # type: () -> List[dict]
+ return [d.to_json() for d in self.devices]
+
+ @classmethod
+ def from_json(cls, input):
+ # type: (List[Dict[str, Any]]) -> Devices
+ return cls([Device.from_json(i) for i in input])
+
+ def copy(self):
+ return Devices(devices=list(self.devices))
+
+
+class Device(object):
+ report_fields = [
+ 'rejected_reasons',
+ 'available',
+ 'path',
+ 'sys_api',
+ 'lvs',
+ 'human_readable_type',
+ 'device_id'
+ ]
+
+ def __init__(self,
+ path, # type: str
+ sys_api=None, # type: Optional[Dict[str, Any]]
+ available=None, # type: Optional[bool]
+ rejected_reasons=None, # type: Optional[List[str]]
+ lvs=None, # type: Optional[List[str]]
+ device_id=None, # type: Optional[str]
+ ):
+ self.path = path
+ self.sys_api = sys_api
+ self.available = available
+ self.rejected_reasons = rejected_reasons
+ self.lvs = lvs
+ self.device_id = device_id
+
+ def to_json(self):
+ # type: () -> dict
+ return {
+ k: getattr(self, k) for k in self.report_fields
+ }
+
+ @classmethod
+ def from_json(cls, input):
+ # type: (Dict[str, Any]) -> Device
+ ret = cls(
+ **{
+ key: input.get(key, None)
+ for key in Device.report_fields
+ if key != 'human_readable_type'
+ }
+ )
+ return ret
+
+ @property
+ def human_readable_type(self):
+ # type: () -> str
+ if self.sys_api is None or 'rotational' not in self.sys_api:
+ return "unknown"
+ return 'hdd' if self.sys_api["rotational"] == "1" else 'sdd/nvme'
--- /dev/null
+from ceph.deployment.inventory import Device
+
+
+class InventoryFactory(object):
+ def __init__(self):
+ self.taken_paths = []
+
+ def _make_path(self, ident='b'):
+ return "/dev/{}{}".format(self.prefix, ident)
+
+ def _find_new_path(self):
+ cnt = 0
+ if len(self.taken_paths) >= 25:
+ raise Exception(
+ "Double-character disks are not implemetend. Maximum amount"
+ "of disks reached.")
+
+ while self.path in self.taken_paths:
+ ident = chr(ord('b') + cnt)
+ self.path = "/dev/{}{}".format(self.prefix, ident)
+ cnt += 1
+
+ def assemble(self):
+ if self.empty:
+ return {}
+ self._find_new_path()
+ inventory_sample = {
+ 'available': self.available,
+ 'lvs': [],
+ 'path': self.path,
+ 'rejected_reasons': self.rejected_reason,
+ 'sys_api': {
+ 'human_readable_size': self.human_readable_size,
+ 'locked': 1,
+ 'model': self.model,
+ 'nr_requests': '256',
+ 'partitions':
+ { # partitions are not as relevant for now, todo for later
+ 'sda1': {
+ 'sectors': '41940992',
+ 'sectorsize': 512,
+ 'size': self.human_readable_size,
+ 'start': '2048'
+ }
+ },
+ 'path': self.path,
+ 'removable': '0',
+ 'rev': '',
+ 'ro': '0',
+ 'rotational': str(self.rotational),
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'mq-deadline',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': self.size,
+ 'support_discard': '',
+ 'vendor': self.vendor
+ }
+ }
+
+ if self.available:
+ self.taken_paths.append(self.path)
+ return inventory_sample
+ return {}
+
+ def _init(self, **kwargs):
+ self.prefix = 'sd'
+ self.path = kwargs.get('path', self._make_path())
+ self.human_readable_size = kwargs.get('human_readable_size',
+ '50.00 GB')
+ self.vendor = kwargs.get('vendor', 'samsung')
+ self.model = kwargs.get('model', '42-RGB')
+ self.available = kwargs.get('available', True)
+ self.rejected_reason = kwargs.get('rejected_reason', [''])
+ self.rotational = kwargs.get('rotational', '1')
+ if not self.available:
+ self.rejected_reason = ['locked']
+ self.empty = kwargs.get('empty', False)
+ self.size = kwargs.get('size', 5368709121)
+
+ def produce(self, pieces=1, **kwargs):
+ if kwargs.get('path') and pieces > 1:
+ raise Exception("/path/ and /pieces/ are mutually exclusive")
+ # Move to custom init to track _taken_paths.
+ # class is invoked once in each context.
+ # if disks with different properties are being created
+ # we'd have to re-init the class and loose track of the
+ # taken_paths
+ self._init(**kwargs)
+ return [self.assemble() for x in range(0, pieces)]
+
+
+class DeviceFactory(object):
+ def __init__(self, device_setup):
+ self.device_setup = device_setup
+ self.pieces = device_setup.get('pieces', 1)
+ self.device_conf = device_setup.get('device_config', {})
+
+ def produce(self):
+ return [Device(**self.device_conf) for x in range(0, self.pieces)]
--- /dev/null
+import pytest
+
+from ceph.deployment.inventory import Devices, Device
+
+from ceph.deployment.drive_group import DriveGroupSpec, DriveGroupValidationError, DeviceSelection
+
+try:
+ from unittest.mock import patch
+except ImportError:
+ from mock import patch # type: ignore
+
+from ceph.deployment import drive_selection
+from ceph.tests.factories import InventoryFactory
+
+
+class TestMatcher(object):
+ """ Test Matcher base class
+ """
+
+ def test_get_disk_key_3(self):
+ """
+ virtual is False
+ key is found
+ retrun value of key is expected
+ """
+ disk_map = Device(path='/dev/vdb', sys_api={'foo': 'bar'})
+ ret = drive_selection.Matcher('foo', 'bar')._get_disk_key(disk_map)
+ assert ret is disk_map.sys_api.get('foo')
+
+ def test_get_disk_key_4(self):
+ """
+ virtual is False
+ key is not found
+ expect raise Exception
+ """
+ disk_map = Device(path='/dev/vdb')
+ with pytest.raises(Exception):
+ drive_selection.Matcher('bar', 'foo')._get_disk_key(disk_map)
+ pytest.fail("No disk_key found for foo or None")
+
+
+class TestSubstringMatcher(object):
+ def test_compare(self):
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(model='samsung'))
+ matcher = drive_selection.SubstringMatcher('model', 'samsung')
+ ret = matcher.compare(disk_dict)
+ assert ret is True
+
+ def test_compare_false(self):
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(model='nothing_matching'))
+ matcher = drive_selection.SubstringMatcher('model', 'samsung')
+ ret = matcher.compare(disk_dict)
+ assert ret is False
+
+
+class TestEqualityMatcher(object):
+ def test_compare(self):
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(rotates='1'))
+ matcher = drive_selection.EqualityMatcher('rotates', '1')
+ ret = matcher.compare(disk_dict)
+ assert ret is True
+
+ def test_compare_false(self):
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(rotates='1'))
+ matcher = drive_selection.EqualityMatcher('rotates', '0')
+ ret = matcher.compare(disk_dict)
+ assert ret is False
+
+
+class TestAllMatcher(object):
+ def test_compare(self):
+ disk_dict = Device(path='/dev/vdb')
+ matcher = drive_selection.AllMatcher('all', 'True')
+ ret = matcher.compare(disk_dict)
+ assert ret is True
+
+ def test_compare_value_not_true(self):
+ disk_dict = Device(path='/dev/vdb')
+ matcher = drive_selection.AllMatcher('all', 'False')
+ ret = matcher.compare(disk_dict)
+ assert ret is True
+
+
+class TestSizeMatcher(object):
+ def test_parse_filter_exact(self):
+ """ Testing exact notation with 20G """
+ matcher = drive_selection.SizeMatcher('size', '20G')
+ assert isinstance(matcher.exact, tuple)
+ assert matcher.exact[0] == '20'
+ assert matcher.exact[1] == 'GB'
+
+ def test_parse_filter_exact_GB_G(self):
+ """ Testing exact notation with 20G """
+ matcher = drive_selection.SizeMatcher('size', '20GB')
+ assert isinstance(matcher.exact, tuple)
+ assert matcher.exact[0] == '20'
+ assert matcher.exact[1] == 'GB'
+
+ def test_parse_filter_high_low(self):
+ """ Testing high-low notation with 20G:50G """
+
+ matcher = drive_selection.SizeMatcher('size', '20G:50G')
+ assert isinstance(matcher.exact, tuple)
+ assert matcher.low[0] == '20'
+ assert matcher.high[0] == '50'
+ assert matcher.low[1] == 'GB'
+ assert matcher.high[1] == 'GB'
+
+ def test_parse_filter_max_high(self):
+ """ Testing high notation with :50G """
+
+ matcher = drive_selection.SizeMatcher('size', ':50G')
+ assert isinstance(matcher.exact, tuple)
+ assert matcher.high[0] == '50'
+ assert matcher.high[1] == 'GB'
+
+ def test_parse_filter_min_low(self):
+ """ Testing low notation with 20G: """
+
+ matcher = drive_selection.SizeMatcher('size', '50G:')
+ assert isinstance(matcher.exact, tuple)
+ assert matcher.low[0] == '50'
+ assert matcher.low[1] == 'GB'
+
+ def test_to_byte_GB(self):
+ """ Pretty nonesense test.."""
+
+ ret = drive_selection.SizeMatcher('size', '10G').to_byte(('10', 'GB'))
+ assert ret == 10 * 1e+9
+
+ def test_to_byte_MB(self):
+ """ Pretty nonesense test.."""
+
+ ret = drive_selection.SizeMatcher('size', '10M').to_byte(('10', 'MB'))
+ assert ret == 10 * 1e+6
+
+ def test_to_byte_TB(self):
+ """ Pretty nonesense test.."""
+
+ ret = drive_selection.SizeMatcher('size', '10T').to_byte(('10', 'TB'))
+ assert ret == 10 * 1e+12
+
+ def test_to_byte_PB(self):
+ """ Expect to raise """
+
+ with pytest.raises(ValueError):
+ drive_selection.SizeMatcher('size', '10P').to_byte(('10', 'PB'))
+ assert 'Unit \'P\' is not supported'
+
+ def test_compare_exact(self):
+
+ matcher = drive_selection.SizeMatcher('size', '20GB')
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(size='20.00 GB'))
+ ret = matcher.compare(disk_dict)
+ assert ret is True
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("1.00 GB", False),
+ ("20.00 GB", True),
+ ("50.00 GB", True),
+ ("100.00 GB", True),
+ ("101.00 GB", False),
+ ("1101.00 GB", False),
+ ])
+ def test_compare_high_low(self, test_input, expected):
+
+ matcher = drive_selection.SizeMatcher('size', '20GB:100GB')
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(size=test_input))
+ ret = matcher.compare(disk_dict)
+ assert ret is expected
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("1.00 GB", True),
+ ("20.00 GB", True),
+ ("50.00 GB", True),
+ ("100.00 GB", False),
+ ("101.00 GB", False),
+ ("1101.00 GB", False),
+ ])
+ def test_compare_high(self, test_input, expected):
+
+ matcher = drive_selection.SizeMatcher('size', ':50GB')
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(size=test_input))
+ ret = matcher.compare(disk_dict)
+ assert ret is expected
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("1.00 GB", False),
+ ("20.00 GB", False),
+ ("50.00 GB", True),
+ ("100.00 GB", True),
+ ("101.00 GB", True),
+ ("1101.00 GB", True),
+ ])
+ def test_compare_low(self, test_input, expected):
+
+ matcher = drive_selection.SizeMatcher('size', '50GB:')
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(size=test_input))
+ ret = matcher.compare(disk_dict)
+ assert ret is expected
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("1.00 GB", False),
+ ("20.00 GB", False),
+ ("50.00 GB", False),
+ ("100.00 GB", False),
+ ("101.00 GB", False),
+ ("1101.00 GB", True),
+ ("9.10 TB", True),
+ ])
+ def test_compare_at_least_1TB(self, test_input, expected):
+
+ matcher = drive_selection.SizeMatcher('size', '1TB:')
+ disk_dict = Device(path='/dev/sdz', sys_api=dict(size=test_input))
+ ret = matcher.compare(disk_dict)
+ assert ret is expected
+
+ def test_compare_raise(self):
+
+ matcher = drive_selection.SizeMatcher('size', 'None')
+ disk_dict = Device(path='/dev/vdb', sys_api=dict(size='20.00 GB'))
+ with pytest.raises(Exception):
+ matcher.compare(disk_dict)
+ pytest.fail("Couldn't parse size")
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("10G", ('10', 'GB')),
+ ("20GB", ('20', 'GB')),
+ ("10g", ('10', 'GB')),
+ ("20gb", ('20', 'GB')),
+ ])
+ def test_get_k_v(self, test_input, expected):
+ assert drive_selection.SizeMatcher('size', '10G')._get_k_v(test_input) == expected
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("10G", ('GB')),
+ ("10g", ('GB')),
+ ("20GB", ('GB')),
+ ("20gb", ('GB')),
+ ("20TB", ('TB')),
+ ("20tb", ('TB')),
+ ("20T", ('TB')),
+ ("20t", ('TB')),
+ ("20MB", ('MB')),
+ ("20mb", ('MB')),
+ ("20M", ('MB')),
+ ("20m", ('MB')),
+ ])
+ def test_parse_suffix(self, test_input, expected):
+ assert drive_selection.SizeMatcher('size', '10G')._parse_suffix(test_input) == expected
+
+ @pytest.mark.parametrize("test_input,expected", [
+ ("G", 'GB'),
+ ("GB", 'GB'),
+ ("TB", 'TB'),
+ ("T", 'TB'),
+ ("MB", 'MB'),
+ ("M", 'MB'),
+ ])
+ def test_normalize_suffix(self, test_input, expected):
+
+ assert drive_selection.SizeMatcher('10G', 'size')._normalize_suffix(test_input) == expected
+
+ def test_normalize_suffix_raises(self):
+
+ with pytest.raises(ValueError):
+ drive_selection.SizeMatcher('10P', 'size')._normalize_suffix("P")
+ pytest.fail("Unit 'P' not supported")
+
+
+class TestDriveGroup(object):
+ @pytest.fixture(scope='class')
+ def test_fix(self, empty=None):
+ def make_sample_data(empty=empty,
+ data_limit=0,
+ wal_limit=0,
+ db_limit=0,
+ osds_per_device='',
+ disk_format='bluestore'):
+ raw_sample_bluestore = {
+ 'host_pattern': 'data*',
+ 'data_devices': {
+ 'size': '30G:50G',
+ 'model': '42-RGB',
+ 'vendor': 'samsung',
+ 'limit': data_limit
+ },
+ 'wal_devices': {
+ 'model': 'fast',
+ 'limit': wal_limit
+ },
+ 'db_devices': {
+ 'size': ':20G',
+ 'limit': db_limit
+ },
+ 'db_slots': 5,
+ 'wal_slots': 5,
+ 'block_wal_size': '5G',
+ 'block_db_size': '10G',
+ 'objectstore': disk_format,
+ 'osds_per_device': osds_per_device,
+ 'encrypted': True,
+ }
+ raw_sample_filestore = {
+ 'host_pattern': 'data*',
+ 'objectstore': 'filestore',
+ 'data_devices': {
+ 'size': '30G:50G',
+ 'model': 'foo',
+ 'vendor': '1x',
+ 'limit': data_limit
+ },
+ 'journal_devices': {
+ 'size': ':20G'
+ },
+ 'journal_size': '5G',
+ 'osds_per_device': osds_per_device,
+ 'encrypted': True,
+ }
+ if disk_format == 'filestore':
+ raw_sample = raw_sample_filestore
+ else:
+ raw_sample = raw_sample_bluestore
+
+ if empty:
+ raw_sample = {'host_pattern': 'data*'}
+
+ dgo = DriveGroupSpec.from_json(raw_sample)
+ return dgo
+
+ return make_sample_data
+
+ def test_encryption_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.encrypted is True
+
+ def test_encryption_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.encrypted is False
+
+ def test_db_slots_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.db_slots == 5
+
+ def test_db_slots_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.db_slots is None
+
+ def test_wal_slots_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.wal_slots == 5
+
+ def test_wal_slots_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.wal_slots is None
+
+ def test_block_wal_size_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.block_wal_size == 5000000000
+
+ def test_block_wal_size_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.block_wal_size is None
+
+ def test_block_db_size_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.block_db_size == 10000000000
+
+ def test_block_db_size_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.block_db_size is None
+
+ def test_data_devices_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.data_devices == DeviceSelection(
+ model='42-RGB',
+ size='30G:50G',
+ vendor='samsung',
+ limit=0,
+ )
+
+ def test_data_devices_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.data_devices is None
+
+ def test_db_devices_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.db_devices == DeviceSelection(
+ size=':20G',
+ limit=0,
+ )
+
+ def test_db_devices_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.db_devices is None
+
+ def test_wal_device_prop(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.wal_devices == DeviceSelection(
+ model='fast',
+ limit=0,
+ )
+
+ def test_journal_device_prop(self, test_fix):
+ test_fix = test_fix(disk_format='filestore')
+ assert test_fix.journal_devices == DeviceSelection(
+ size=':20G'
+ )
+
+ def test_wal_device_prop_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.wal_devices is None
+
+ def test_filestore_format_prop(self, test_fix):
+ test_fix = test_fix(disk_format='filestore')
+ assert test_fix.objectstore == 'filestore'
+
+ def test_bluestore_format_prop(self, test_fix):
+ test_fix = test_fix(disk_format='bluestore')
+ assert test_fix.objectstore == 'bluestore'
+
+ def test_default_format_prop(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.objectstore == 'bluestore'
+
+ def test_journal_size(self, test_fix):
+ test_fix = test_fix(disk_format='filestore')
+ assert test_fix.journal_size == 5000000000
+
+ def test_osds_per_device(self, test_fix):
+ test_fix = test_fix(osds_per_device='3')
+ assert test_fix.osds_per_device == '3'
+
+ def test_osds_per_device_default(self, test_fix):
+ test_fix = test_fix()
+ assert test_fix.osds_per_device == ''
+
+ def test_journal_size_empty(self, test_fix):
+ test_fix = test_fix(empty=True)
+ assert test_fix.journal_size is None
+
+ @pytest.fixture
+ def inventory(self, available=True):
+ def make_sample_data(available=available,
+ data_devices=10,
+ wal_devices=0,
+ db_devices=2,
+ human_readable_size_data='50.00 GB',
+ human_readable_size_wal='20.00 GB',
+ size=5368709121,
+ human_readable_size_db='20.00 GB'):
+ factory = InventoryFactory()
+ inventory_sample = []
+ data_disks = factory.produce(
+ pieces=data_devices,
+ available=available,
+ size=size,
+ human_readable_size=human_readable_size_data)
+ wal_disks = factory.produce(
+ pieces=wal_devices,
+ human_readable_size=human_readable_size_wal,
+ rotational='0',
+ model='ssd_type_model',
+ size=size,
+ available=available)
+ db_disks = factory.produce(
+ pieces=db_devices,
+ human_readable_size=human_readable_size_db,
+ rotational='0',
+ size=size,
+ model='ssd_type_model',
+ available=available)
+ inventory_sample.extend(data_disks)
+ inventory_sample.extend(wal_disks)
+ inventory_sample.extend(db_disks)
+
+ return Devices(devices=inventory_sample)
+
+ return make_sample_data
+
+ if False:
+ def test_filter_devices_10_size_min_max(self, test_fix, inventory):
+ """ Test_fix's data_device_attrs is configured to take any disk from
+ 30G - 50G or with vendor samsung or with model 42-RGB
+ The default inventory setup is configured to have 10 data devices(50G)
+ and 2 wal devices(20G).
+ The expected match is 12
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(test_fix.data_device_attrs)
+ assert len(ret) == 12
+
+ def test_filter_devices_size_exact(self, test_fix, inventory):
+ """
+ Configure to only take disks with 20G (exact)
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size='20G'))
+ assert len(ret) == 2
+
+ def test_filter_devices_2_max(self, test_fix, inventory):
+ """
+ Configure to only take disks with a max of 30G
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size=':30G'))
+ assert len(ret) == 2
+
+ def test_filter_devices_0_max(self, test_fix, inventory):
+ """
+ Configure to only take disks with a max of 10G
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size=':10G'))
+ assert len(ret) == 0
+
+ def test_filter_devices_12_min(self, test_fix, inventory):
+ """
+ Configure to only take disks with a min of 10G
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size='10G:'))
+ assert len(ret) == 12
+
+ def test_filter_devices_12_min_20G(self, test_fix, inventory):
+ """
+ Configure to only take disks with a min of 20G
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size='20G:'))
+ assert len(ret) == 12
+
+ def test_filter_devices_0_model(self, test_fix, inventory):
+ """
+ Configure to only take disks with a model of modelA
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(model='unknown'))
+ assert len(ret) == 0
+
+ def test_filter_devices_2_model(self, test_fix, inventory):
+ """
+ Configure to only take disks with a model of model*(wildcard)
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(model='ssd_type_model'))
+ assert len(ret) == 2
+
+ def test_filter_devices_12_vendor(self, test_fix, inventory):
+ """
+ Configure to only take disks with a vendor of samsung
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(vendor='samsung'))
+ assert len(ret) == 12
+
+ def test_filter_devices_2_rotational(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 0
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(rotational='0'))
+ assert len(ret) == 2
+
+ def test_filter_devices_10_rotational(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(rotational='1'))
+ assert len(ret) == 10
+
+ def test_filter_devices_limit(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ This should take two disks, but limit=1 is in place
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(rotational='1', limit=1))
+ assert len(ret) == 1
+
+ def test_filter_devices_all_limit_2(self, test_fix, inventory):
+ """
+ Configure to take all disks
+ limiting to two
+ """
+ inventory()
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(all=True, limit=2))
+ assert len(ret) == 2
+
+ def test_filter_devices_empty_list_eq_matcher(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ This should take 10 disks, but limit=1 is in place
+ Available is set to False. No disks are assigned
+ """
+ inventory(available=False)
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(rotational='1', limit=1))
+ assert len(ret) == 0
+
+ def test_filter_devices_empty_string_matcher(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ This should take two disks, but limit=1 is in place
+ Available is set to False. No disks are assigned
+ """
+ inventory(available=False)
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(vendor='samsung', limit=1))
+ assert len(ret) == 0
+
+ def test_filter_devices_empty_size_matcher(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ This should take two disks, but limit=1 is in place
+ Available is set to False. No disks are assigned
+ """
+ inventory(available=False)
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(size='10G:100G', limit=1))
+ assert len(ret) == 0
+
+ def test_filter_devices_empty_all_matcher(self, test_fix, inventory):
+ """
+ Configure to only take disks with a rotational flag of 1
+ This should take two disks, but limit=1 is in place
+ Available is set to False. No disks are assigned
+ """
+ inventory(available=False)
+ test_fix = test_fix()
+ ret = test_fix._filter_devices(dict(all=True, limit=1))
+ assert len(ret) == 0
+
+ @patch('ceph.deployment.drive_selection.DriveGroup._check_filter')
+ def test_check_filter_support(self, check_filter_mock, test_fix):
+ test_fix = test_fix()
+ test_fix._check_filter_support()
+ check_filter_mock.assert_called
+
+ def test_check_filter(self, test_fix):
+ test_fix = test_fix()
+ ret = test_fix._check_filter(dict(model='foo'))
+ assert ret is None
+
+ def test_check_filter_raise(self, test_fix):
+ test_fix = test_fix()
+ with pytest.raises(DriveGroupValidationError):
+ test_fix._check_filter(dict(unknown='foo'))
+ pytest.fail("Filter unknown is not supported")
+
+ def test_list_devices(self):
+ pass
+
+
+class TestFilter(object):
+ def test_is_matchable(self):
+ ret = drive_selection.Filter(name='name', matcher=None)
+ assert ret.is_matchable is False
+
+
+def _mk_device(rotational=True, locked=False):
+ return [Device(
+ path='??',
+ sys_api={
+ "rotational": '1' if rotational else '0',
+ "vendor": "Vendor",
+ "human_readable_size": "394.27 GB",
+ "partitions": {},
+ "locked": int(locked),
+ "sectorsize": "512",
+ "removable": "0",
+ "path": "??",
+ "support_discard": "",
+ "model": "Model",
+ "ro": "0",
+ "nr_requests": "128",
+ "size": 423347879936
+ },
+ available=not locked,
+ rejected_reasons=['locked'] if locked else [],
+ lvs=[],
+ device_id="Model-Vendor-foobar"
+ )]
+
+
+def _mk_inventory(devices):
+ devs = []
+ for dev_, name in zip(devices, map(chr, range(ord('a'), ord('z')))):
+ dev = Device.from_json(dev_.to_json())
+ dev.path = '/dev/sd' + name
+ dev.sys_api = dict(dev_.sys_api, path='/dev/sd' + name)
+ devs.append(dev)
+ return Devices(devices=devs)
+
+
+class TestDriveSelection(object):
+
+ testdata = [
+ (
+ DriveGroupSpec(host_pattern='*', data_devices=DeviceSelection(all=True)),
+ _mk_inventory(_mk_device() * 5),
+ ['/dev/sda', '/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde'], []
+ ),
+ (
+ DriveGroupSpec(
+ host_pattern='*',
+ data_devices=DeviceSelection(all=True, limit=3),
+ db_devices=DeviceSelection(all=True)
+ ),
+ _mk_inventory(_mk_device() * 5),
+ ['/dev/sda', '/dev/sdb', '/dev/sdc'], ['/dev/sdd', '/dev/sde']
+ ),
+ (
+ DriveGroupSpec(
+ host_pattern='*',
+ data_devices=DeviceSelection(rotational=True),
+ db_devices=DeviceSelection(rotational=False)
+ ),
+ _mk_inventory(_mk_device(rotational=False) + _mk_device(rotational=True)),
+ ['/dev/sdb'], ['/dev/sda']
+ ),
+ (
+ DriveGroupSpec(
+ host_pattern='*',
+ data_devices=DeviceSelection(rotational=True),
+ db_devices=DeviceSelection(rotational=False)
+ ),
+ _mk_inventory(_mk_device(rotational=True)*2 + _mk_device(rotational=False)),
+ ['/dev/sda', '/dev/sdb'], ['/dev/sdc']
+ ),
+ (
+ DriveGroupSpec(
+ host_pattern='*',
+ data_devices=DeviceSelection(rotational=True),
+ db_devices=DeviceSelection(rotational=False)
+ ),
+ _mk_inventory(_mk_device(rotational=True)*2),
+ ['/dev/sda', '/dev/sdb'], []
+ ),
+ ]
+
+ @pytest.mark.parametrize("spec,inventory,expected_data,expected_db", testdata)
+ def test_disk_selection(self, spec, inventory, expected_data, expected_db):
+ sel = drive_selection.DriveSelection(spec, inventory)
+ assert [d.path for d in sel.data_devices()] == expected_data
+ assert [d.path for d in sel.db_devices()] == expected_db
def test_DriveGroup_fail():
- with pytest.raises(TypeError):
+ with pytest.raises(DriveGroupValidationError):
DriveGroupSpec.from_json({})
def test_drivegroup_pattern():
- dg = DriveGroupSpec('node[1-3]', DeviceSelection())
+ dg = DriveGroupSpec('node[1-3]', DeviceSelection(all=True))
assert dg.hosts(['node{}'.format(i) for i in range(10)]) == ['node1', 'node2', 'node3']
assert spec.data_devices.paths == ['/dev/sda']
with pytest.raises(DriveGroupValidationError, match='exclusive'):
- DeviceSelection(paths=['/dev/sda'], rotates=False)
+ DeviceSelection(paths=['/dev/sda'], rotational=False)
[tool:pytest]
-addopts = -vv
+addopts = -vv --log-cli-level=DEBUG
[aliases]
test=pytest
if sys.version_info >= (3,0):
mypy = ['mypy', 'pytest-mypy']
- pytest = 'pytest >=2.1.3'
+ pytest = ['pytest >=2.1.3']
else:
mypy = []
- pytest = 'pytest >=2.1.3,<5'
+ pytest = ['pytest >=2.1.3,<5', 'mock']
with open("README.rst", "r") as fh:
'six',
),
tests_require=[
- pytest,
'tox',
- ] + mypy,
+ 'pyyaml'
+ ] + mypy + pytest,
classifiers = [
'Intended Audience :: Developer',
'Operating System :: POSIX :: Linux',