_run_cephadm.assert_called_with(
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm")
+ def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.return_value = ('{}', '', 0)
+ with with_host(cephadm_module, 'test'):
+
+ spec = DriveGroupSpec(
+ service_id='noncollocated',
+ placement=PlacementSpec(
+ hosts=['test']
+ ),
+ data_devices=DeviceSelection(paths=['/dev/sdb']),
+ db_devices=DeviceSelection(paths=['/dev/sdc']),
+ wal_devices=DeviceSelection(paths=['/dev/sdd'])
+ )
+
+ c = cephadm_module.apply([spec])
+ assert wait(cephadm_module, c) == ['Scheduled osd.noncollocated update...']
+
+ inventory = Devices([
+ Device('/dev/sdb', available=True),
+ Device('/dev/sdc', available=True),
+ Device('/dev/sdd', available=True)
+ ])
+
+ cephadm_module.cache.update_host_devices_networks('test', inventory.devices, {})
+
+ _run_cephadm.return_value = (['{}'], '', 0)
+
+ assert CephadmServe(cephadm_module)._apply_all_services() is False
+
+ _run_cephadm.assert_any_call(
+ 'test', 'osd', 'ceph-volume',
+ ['--config-json', '-', '--', 'lvm', 'batch',
+ '--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
+ '--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
+ env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
+ error_ok=True, stdin='{"config": "", "keyring": ""}')
+ _run_cephadm.assert_called_with(
+ 'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'])
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.module.SpecStore.save")
def test_apply_osd_save_placement(self, _save_spec, cephadm_module):
out = wait(cephadm_module, c)
assert out == "Created no osd(s) on host test; already created?"
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_create_noncollocated_osd(self, cephadm_module):
+ with with_host(cephadm_module, 'test'):
+ dg = DriveGroupSpec(placement=PlacementSpec(host_pattern='test'),
+ data_devices=DeviceSelection(paths=['']))
+ c = cephadm_module.create_osds(dg)
+ out = wait(cephadm_module, c)
+ assert out == "Created no osd(s) on host test; already created?"
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_prepare_drivegroup(self, cephadm_module):
with with_host(cephadm_module, 'test'):
self.spec = spec
self.existing_daemons = existing_daemons or 0
- if self.spec.data_devices.paths: # type: ignore
- # re: type: ignore there is *always* a path attribute assigned to DeviceSelection
- # it's just None if actual drivegroups are used
- self._data = self.spec.data_devices.paths # type: ignore
- self._db = [] # type: List
- self._wal = [] # type: List
- self._journal = [] # type: List
- else:
- self._data = self.assign_devices(self.spec.data_devices)
- self._wal = self.assign_devices(self.spec.wal_devices)
- self._db = self.assign_devices(self.spec.db_devices)
- self._journal = self.assign_devices(self.spec.journal_devices)
+ self._data = self.assign_devices(self.spec.data_devices)
+ self._wal = self.assign_devices(self.spec.wal_devices)
+ self._db = self.assign_devices(self.spec.db_devices)
+ self._journal = self.assign_devices(self.spec.journal_devices)
def data_devices(self):
# type: () -> List[Device]
logger.debug('data_devices is None')
return []
+ if device_filter.paths:
+ logger.debug('device filter is using explicit paths')
+ return device_filter.paths
+
devices = list() # type: List[Device]
for disk in self.disks:
logger.debug("Processing disk {}".format(disk.path))