cephadm bootstrap -c /root/ceph.conf ...
+
This can easily be done with other filters, like `size` or `vendor` as well.
+
+Activate existing OSDs
+======================
+
+In case the OS of a host was reinstalled, existing OSDs need to be activated
+again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that
+activates all existing OSDs on a host.
+
+.. prompt:: bash #
+
+ ceph cephadm osd activate <host>...
+
+This will scan all existing disks for OSDs and deploy corresponding daemons.
return False
return conf.last_modified > dt
+ @orchestrator._cli_write_command(
+ 'cephadm osd activate'
+ )
+ def _osd_activate(self, host: List[str]) -> HandleCommandResult:
+ """
+ Start OSD containers for existing OSDs
+ """
+
+ @forall_hosts
+ def run(h: str) -> str:
+ return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd')
+
+ return HandleCommandResult(stdout='\n'.join(run(host)))
+
def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection',
'remoto.backends.LegacyModuleExecute']:
"""
raise RuntimeError(
'cephadm exited with an error code: %d, stderr:%s' % (
code, '\n'.join(err)))
+ return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(),
+ replace_osd_ids)
+ def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str,
+ replace_osd_ids: Optional[List[str]] = None) -> str:
+
+ if replace_osd_ids is None:
+ replace_osd_ids = self.find_destroyed_osds().get(host, [])
+ assert replace_osd_ids is not None
# check result
osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
host, 'osd', 'ceph-volume',
created.append(osd_id)
daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec(
- service_name=drive_group.service_name(),
+ service_name=service_name,
daemon_id=osd_id,
host=host,
daemon_type='osd',
['--', 'inventory', '--format=json'], image='',
no_fsid=False),
]
+
+ @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
+ def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+ _run_cephadm.return_value = ('{}', '', 0)
+ with with_host(cephadm_module, 'test', refresh_hosts=False):
+ cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
+ 'osds': [
+ {
+ 'osd': 1,
+ 'up_from': 0,
+ 'uuid': 'uuid'
+ }
+ ]
+ })
+
+ ceph_volume_lvm_list = {
+ '1': [{
+ 'tags': {
+ 'ceph.cluster_fsid': cephadm_module._cluster_fsid,
+ 'ceph.osd_fsid': 'uuid'
+ }
+ }]
+ }
+ _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
+ assert cephadm_module._osd_activate(
+ ['test']).stdout == "Created osd(s) 1 on host 'test'"