if replace_osd_ids is None:
replace_osd_ids = OsdIdClaims(self.mgr).filtered_by_host(host)
assert replace_osd_ids is not None
- # check result
+
+ # check result: lvm
osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
host, 'osd', 'ceph-volume',
[
daemon_spec,
osd_uuid_map=osd_uuid_map)
+ # check result: raw
+ raw_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
+ host, 'osd', 'ceph-volume',
+ [
+ '--',
+ 'raw', 'list',
+ '--format', 'json',
+ ])
+ for osd_uuid, osd in raw_elems.items():
+ if osd.get('ceph_fsid') != fsid:
+ continue
+ osd_id = str(osd.get('osd_id', '-1'))
+ if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids:
+ # if it exists but is part of the replacement operation, don't skip
+ continue
+ if osd_id not in osd_uuid_map:
+ logger.debug('osd id {} does not exist in cluster'.format(osd_id))
+ continue
+ if osd_uuid_map.get(osd_id) != osd_uuid:
+ logger.debug('mismatched osd uuid (cluster has %s, osd '
+ 'has %s)' % (osd_uuid_map.get(osd_id), osd_uuid))
+ continue
+ if osd_id in created:
+ continue
+
+ created.append(osd_id)
+ daemon_spec = CephadmDaemonDeploySpec(
+ service_name=service_name,
+ daemon_id=osd_id,
+ host=host,
+ daemon_type='osd',
+ )
+ daemon_spec.final_config, daemon_spec.deps = self.generate_config(daemon_spec)
+ CephadmServe(self.mgr)._create_daemon(
+ daemon_spec,
+ osd_uuid_map=osd_uuid_map)
+
if created:
self.mgr.cache.invalidate_host_devices(host)
self.mgr.cache.invalidate_autotune(host)
import json
+import logging
from contextlib import contextmanager
['--config-json', '-', '--', 'lvm', 'batch',
'--no-auto', '/dev/sdb', '--yes', '--no-systemd'],
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True, stdin='{"config": "", "keyring": ""}')
- _run_cephadm.assert_called_with(
+ _run_cephadm.assert_any_call(
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
+ _run_cephadm.assert_any_call(
+ 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
'--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
error_ok=True, stdin='{"config": "", "keyring": ""}')
- _run_cephadm.assert_called_with(
+ _run_cephadm.assert_any_call(
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False)
+ _run_cephadm.assert_any_call(
+ 'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False)
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
@mock.patch("cephadm.module.SpecStore.save")
]
})
- ceph_volume_lvm_list = {
- '1': [{
- 'tags': {
- 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
- 'ceph.osd_fsid': 'uuid'
- },
- 'type': 'data'
- }]
- }
- _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
- _run_cephadm.reset_mock()
+ def _ceph_volume_list(s, host, entity, cmd, **kwargs):
+ logging.info(f'ceph-volume cmd: {cmd}')
+ if 'raw' in cmd:
+ return json.dumps({
+ "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
+ "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
+ "device": "/dev/loop0",
+ "osd_id": 21,
+ "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
+ "type": "bluestore"
+ },
+ }), '', 0
+ if 'lvm' in cmd:
+ return json.dumps({
+ '1': [{
+ 'tags': {
+ 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
+ 'ceph.osd_fsid': 'uuid'
+ },
+ 'type': 'data'
+ }]
+ }), '', 0
+ return '{}', '', 0
+ _run_cephadm.reset_mock(return_value=True)
+ _run_cephadm.side_effect = _ceph_volume_list
assert cephadm_module._osd_activate(
['test']).stdout == "Created osd(s) 1 on host 'test'"
assert _run_cephadm.mock_calls == [
['--name', 'osd.1', '--meta-json', mock.ANY,
'--config-json', '-', '--osd-fsid', 'uuid'],
stdin=mock.ANY, image=''),
+ mock.call('test', 'osd', 'ceph-volume',
+ ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, image=''),
]
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
]
})
- ceph_volume_lvm_list = {
- '1': [{
- 'tags': {
- 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
- 'ceph.osd_fsid': 'uuid'
- },
- 'type': 'data'
- }, {
- 'tags': {
- 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
- 'ceph.osd_fsid': 'uuid'
- },
- 'type': 'db'
- }]
- }
- _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
- _run_cephadm.reset_mock()
+ def _ceph_volume_list(s, host, entity, cmd, **kwargs):
+ logging.info(f'ceph-volume cmd: {cmd}')
+ if 'raw' in cmd:
+ return json.dumps({
+ "21a4209b-f51b-4225-81dc-d2dca5b8b2f5": {
+ "ceph_fsid": "64c84f19-fe1d-452a-a731-ab19dc144aa8",
+ "device": "/dev/loop0",
+ "osd_id": 21,
+ "osd_uuid": "21a4209b-f51b-4225-81dc-d2dca5b8b2f5",
+ "type": "bluestore"
+ },
+ }), '', 0
+ if 'lvm' in cmd:
+ return json.dumps({
+ '1': [{
+ 'tags': {
+ 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
+ 'ceph.osd_fsid': 'uuid'
+ },
+ 'type': 'data'
+ }, {
+ 'tags': {
+ 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
+ 'ceph.osd_fsid': 'uuid'
+ },
+ 'type': 'db'
+ }]
+ }), '', 0
+ return '{}', '', 0
+ _run_cephadm.reset_mock(return_value=True)
+ _run_cephadm.side_effect = _ceph_volume_list
assert cephadm_module._osd_activate(
['test']).stdout == "Created osd(s) 1 on host 'test'"
assert _run_cephadm.mock_calls == [
['--name', 'osd.1', '--meta-json', mock.ANY,
'--config-json', '-', '--osd-fsid', 'uuid'],
stdin=mock.ANY, image=''),
+ mock.call('test', 'osd', 'ceph-volume',
+ ['--', 'raw', 'list', '--format', 'json'], no_fsid=False, image=''),
]