From: Sebastian Wagner Date: Tue, 23 Feb 2021 11:24:06 +0000 (+0100) Subject: mgr/cephadm: Activate existing OSDs X-Git-Tag: v17.1.0~2730^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=50295a65a177dbc0803fe5af92459d51d9aff71a;p=ceph.git mgr/cephadm: Activate existing OSDs Adds `ceph cephadm osd activate` Fixes: https://tracker.ceph.com/issues/49159 Signed-off-by: Sebastian Wagner --- diff --git a/doc/cephadm/operations.rst b/doc/cephadm/operations.rst index 5433303eb21..ea629f3432f 100644 --- a/doc/cephadm/operations.rst +++ b/doc/cephadm/operations.rst @@ -217,3 +217,4 @@ Then, run bootstrap referencing this file:: cephadm bootstrap -c /root/ceph.conf ... + diff --git a/doc/cephadm/osd.rst b/doc/cephadm/osd.rst index 2d3e52ad037..8ff9a6c700c 100644 --- a/doc/cephadm/osd.rst +++ b/doc/cephadm/osd.rst @@ -668,3 +668,16 @@ It is also possible to specify directly device paths in specific hosts like the This can easily be done with other filters, like `size` or `vendor` as well. + +Activate existing OSDs +====================== + +In case the OS of a host was reinstalled, existing OSDs need to be activated +again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that +activates all existing OSDs on a host. + +.. prompt:: bash # + + ceph cephadm osd activate ... + +This will scan all existing disks for OSDs and deploy corresponding daemons. diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index afd9314dead..2ba739b3c11 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -1092,6 +1092,20 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, return False return conf.last_modified > dt + @orchestrator._cli_write_command( + 'cephadm osd activate' + ) + def _osd_activate(self, host: List[str]) -> HandleCommandResult: + """ + Start OSD containers for existing OSDs + """ + + @forall_hosts + def run(h: str) -> str: + return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd') + + return HandleCommandResult(stdout='\n'.join(run(host))) + def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection', 'remoto.backends.LegacyModuleExecute']: """ diff --git a/src/pybind/mgr/cephadm/services/osd.py b/src/pybind/mgr/cephadm/services/osd.py index 732a416870d..24622c4cf98 100644 --- a/src/pybind/mgr/cephadm/services/osd.py +++ b/src/pybind/mgr/cephadm/services/osd.py @@ -84,7 +84,15 @@ class OSDService(CephService): raise RuntimeError( 'cephadm exited with an error code: %d, stderr:%s' % ( code, '\n'.join(err))) + return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(), + replace_osd_ids) + def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str, + replace_osd_ids: Optional[List[str]] = None) -> str: + + if replace_osd_ids is None: + replace_osd_ids = self.find_destroyed_osds().get(host, []) + assert replace_osd_ids is not None # check result osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json( host, 'osd', 'ceph-volume', @@ -117,7 +125,7 @@ class OSDService(CephService): created.append(osd_id) daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec( - service_name=drive_group.service_name(), + service_name=service_name, daemon_id=osd_id, host=host, daemon_type='osd', diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index 5ba22769fed..cc93ec8dacb 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -1088,3 +1088,29 @@ Traceback (most recent call last): ['--', 'inventory', '--format=json'], image='', no_fsid=False), ] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.return_value = ('{}', '', 0) + with with_host(cephadm_module, 'test', refresh_hosts=False): + cephadm_module.mock_store_set('_ceph_get', 'osd_map', { + 'osds': [ + { + 'osd': 1, + 'up_from': 0, + 'uuid': 'uuid' + } + ] + }) + + ceph_volume_lvm_list = { + '1': [{ + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + } + }] + } + _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0) + assert cephadm_module._osd_activate( + ['test']).stdout == "Created osd(s) 1 on host 'test'"