From ec3c92d5ae3dbf15e2ae8f32b2df0acfb4f8af85 Mon Sep 17 00:00:00 2001 From: Sebastian Wagner Date: Tue, 23 Feb 2021 12:24:06 +0100 Subject: [PATCH] mgr/cephadm: Activate existing OSDs Adds `ceph cephadm osd activate` Fixes: https://tracker.ceph.com/issues/49159 Signed-off-by: Sebastian Wagner (cherry picked from commit 50295a65a177dbc0803fe5af92459d51d9aff71a) --- doc/cephadm/operations.rst | 1 + doc/cephadm/osd.rst | 13 ++++++++++ src/pybind/mgr/cephadm/module.py | 14 +++++++++++ src/pybind/mgr/cephadm/services/osd.py | 10 +++++++- src/pybind/mgr/cephadm/tests/test_cephadm.py | 26 ++++++++++++++++++++ 5 files changed, 63 insertions(+), 1 deletion(-) diff --git a/doc/cephadm/operations.rst b/doc/cephadm/operations.rst index 5433303eb21da..ea629f3432fad 100644 --- a/doc/cephadm/operations.rst +++ b/doc/cephadm/operations.rst @@ -217,3 +217,4 @@ Then, run bootstrap referencing this file:: cephadm bootstrap -c /root/ceph.conf ... + diff --git a/doc/cephadm/osd.rst b/doc/cephadm/osd.rst index 2d3e52ad03705..8ff9a6c700ce6 100644 --- a/doc/cephadm/osd.rst +++ b/doc/cephadm/osd.rst @@ -668,3 +668,16 @@ It is also possible to specify directly device paths in specific hosts like the This can easily be done with other filters, like `size` or `vendor` as well. + +Activate existing OSDs +====================== + +In case the OS of a host was reinstalled, existing OSDs need to be activated +again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that +activates all existing OSDs on a host. + +.. prompt:: bash # + + ceph cephadm osd activate ... + +This will scan all existing disks for OSDs and deploy corresponding daemons. diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 25ba55e11d772..4b162b2a50959 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -1061,6 +1061,20 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, return False return conf.last_modified > dt + @orchestrator._cli_write_command( + 'cephadm osd activate' + ) + def _osd_activate(self, host: List[str]) -> HandleCommandResult: + """ + Start OSD containers for existing OSDs + """ + + @forall_hosts + def run(h: str) -> str: + return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd') + + return HandleCommandResult(stdout='\n'.join(run(host))) + def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection', 'remoto.backends.LegacyModuleExecute']: """ diff --git a/src/pybind/mgr/cephadm/services/osd.py b/src/pybind/mgr/cephadm/services/osd.py index 732a416870dfd..24622c4cf9862 100644 --- a/src/pybind/mgr/cephadm/services/osd.py +++ b/src/pybind/mgr/cephadm/services/osd.py @@ -84,7 +84,15 @@ class OSDService(CephService): raise RuntimeError( 'cephadm exited with an error code: %d, stderr:%s' % ( code, '\n'.join(err))) + return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(), + replace_osd_ids) + def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str, + replace_osd_ids: Optional[List[str]] = None) -> str: + + if replace_osd_ids is None: + replace_osd_ids = self.find_destroyed_osds().get(host, []) + assert replace_osd_ids is not None # check result osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json( host, 'osd', 'ceph-volume', @@ -117,7 +125,7 @@ class OSDService(CephService): created.append(osd_id) daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec( - service_name=drive_group.service_name(), + service_name=service_name, daemon_id=osd_id, host=host, daemon_type='osd', diff --git a/src/pybind/mgr/cephadm/tests/test_cephadm.py b/src/pybind/mgr/cephadm/tests/test_cephadm.py index 91ebe6c0d07c1..3e9acc4e7ecb8 100644 --- a/src/pybind/mgr/cephadm/tests/test_cephadm.py +++ b/src/pybind/mgr/cephadm/tests/test_cephadm.py @@ -1168,3 +1168,29 @@ Traceback (most recent call last): ['--', 'inventory', '--format=json'], image='', no_fsid=False), ] + + @mock.patch("cephadm.serve.CephadmServe._run_cephadm") + def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator): + _run_cephadm.return_value = ('{}', '', 0) + with with_host(cephadm_module, 'test', refresh_hosts=False): + cephadm_module.mock_store_set('_ceph_get', 'osd_map', { + 'osds': [ + { + 'osd': 1, + 'up_from': 0, + 'uuid': 'uuid' + } + ] + }) + + ceph_volume_lvm_list = { + '1': [{ + 'tags': { + 'ceph.cluster_fsid': cephadm_module._cluster_fsid, + 'ceph.osd_fsid': 'uuid' + } + }] + } + _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0) + assert cephadm_module._osd_activate( + ['test']).stdout == "Created osd(s) 1 on host 'test'" -- 2.39.5