spec = j_c.pop('spec')
j_c.update(spec)
j_c.pop('objectstore', None)
+ j_c.pop('filter_logic', None)
return j_c
assert spec_json == convert_to_old_style_json(spec.to_json())
"db_slots", "wal_slots", "block_db_size", "placement", "service_id", "service_type",
"data_devices", "db_devices", "wal_devices", "journal_devices",
"data_directories", "osds_per_device", "objectstore", "osd_id_claims",
- "journal_size", "unmanaged"
+ "journal_size", "unmanaged", "filter_logic"
]
def __init__(self,
journal_size=None, # type: Optional[int]
service_type=None, # type: Optional[str]
unmanaged=False, # type: bool
+ filter_logic='AND' # type: str
):
assert service_type is None or service_type == 'osd'
super(DriveGroupSpec, self).__init__('osd', service_id=service_id,
#: See :ref:`orchestrator-osd-replace`
self.osd_id_claims = osd_id_claims or dict()
+ #: The logic gate we use to match disks with filters.
+ #: defaults to 'AND'
+ self.filter_logic = filter_logic.upper()
+
@classmethod
def _from_json_impl(cls, json_drive_group):
# type: (dict) -> DriveGroupSpec
if self.block_db_size is not None and type(self.block_db_size) != int:
raise DriveGroupValidationError('block_db_size must be of type int')
+ if self.filter_logic not in ['AND', 'OR']:
+ raise DriveGroupValidationError('filter_logic must be either <AND> or <OR>')
+
def __repr__(self):
keys = [
key for key in self._supported_features if getattr(self, key) is not None
if disk in devices:
continue
- if not all(m.compare(disk) for m in FilterGenerator(device_filter)):
- logger.debug(
- "Ignoring disk {}. Filter did not match".format(
- disk.path))
- continue
+ if self.spec.filter_logic == 'AND':
+ if not all(m.compare(disk) for m in FilterGenerator(device_filter)):
+ logger.debug(
+ "Ignoring disk {}. Not all filter did match the disk".format(
+ disk.path))
+ continue
+
+ if self.spec.filter_logic == 'OR':
+ if not any(m.compare(disk) for m in FilterGenerator(device_filter)):
+ logger.debug(
+ "Ignoring disk {}. No filter matched the disk".format(
+ disk.path))
+ continue
logger.debug('Adding disk {}'.format(disk.path))
devices.append(disk)
host_pattern: '*'
data_devices:
limit: 1
+"""),
+
+ yaml.safe_load("""
+service_type: osd
+service_id: mydg
+placement:
+ host_pattern: '*'
+data_devices:
+ all: True
+filter_logic: XOR
""")
)
])
sel = drive_selection.DriveSelection(spec, inventory)
cmd = translate.to_ceph_volume(sel, ['0', '1']).run()
assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --osd-ids 0 1 --yes --no-systemd'
+
+
+def test_ceph_volume_command_8():
+ spec = DriveGroupSpec(placement=PlacementSpec(host_pattern='*'),
+ data_devices=DeviceSelection(rotational=True, model='INTEL SSDS'),
+ db_devices=DeviceSelection(model='INTEL SSDP'),
+ filter_logic='OR',
+ osd_id_claims={}
+ )
+ inventory = _mk_inventory(_mk_device(rotational=True, size='1.82 TB', model='ST2000DM001-1ER1') + # data
+ _mk_device(rotational=False, size="223.0 GB", model='INTEL SSDSC2KG24') + # data
+ _mk_device(rotational=False, size="349.0 GB", model='INTEL SSDPED1K375GA') # wal/db
+ )
+ sel = drive_selection.DriveSelection(spec, inventory)
+ cmd = translate.to_ceph_volume(sel, []).run()
+ assert cmd == 'lvm batch --no-auto /dev/sda /dev/sdb --db-devices /dev/sdc --yes --no-systemd'
model: MC-55-44-XZ
db_devices:
model: SSD-123-foo
+ filter_logic: AND
objectstore: bluestore
wal_devices:
model: NVME-QQQQ-987
def _mk_device(rotational=True,
locked=False,
- size="394.27 GB"):
+ size="394.27 GB",
+ vendor='Vendor',
+ model='Model'):
return [Device(
path='??',
sys_api={
"rotational": '1' if rotational else '0',
- "vendor": "Vendor",
+ "vendor": vendor,
"human_readable_size": size,
"partitions": {},
"locked": int(locked),
"removable": "0",
"path": "??",
"support_discard": "",
- "model": "Model",
+ "model": model,
"ro": "0",
"nr_requests": "128",
"size": 423347879936 # ignore coversion from human_readable_size