]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: Activate existing OSDs
authorSebastian Wagner <sebastian.wagner@suse.com>
Tue, 23 Feb 2021 11:24:06 +0000 (12:24 +0100)
committerSebastian Wagner <sebastian.wagner@suse.com>
Tue, 2 Mar 2021 11:20:32 +0000 (12:20 +0100)
Adds `ceph cephadm osd activate`

Fixes: https://tracker.ceph.com/issues/49159
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
doc/cephadm/operations.rst
doc/cephadm/osd.rst
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/services/osd.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index 5433303eb21da1705c99bf07d56bc6b54122706e..ea629f3432fadd426c42ff593488be0a41c2b94d 100644 (file)
@@ -217,3 +217,4 @@ Then, run bootstrap referencing this file::
 
   cephadm bootstrap -c /root/ceph.conf ...
 
+
index 2d3e52ad03705e613b37fceca9e3531a08e50df1..8ff9a6c700ce6f606658c08691e2b4f715ccacb5 100644 (file)
@@ -668,3 +668,16 @@ It is also possible to specify directly device paths in specific hosts like the
 
 
 This can easily be done with other filters, like `size` or `vendor` as well.
+
+Activate existing OSDs
+======================
+
+In case the OS of a host was reinstalled, existing OSDs need to be activated
+again. For this use case, cephadm provides a wrapper for :ref:`ceph-volume-lvm-activate` that
+activates all existing OSDs on a host.
+
+.. prompt:: bash #
+
+   ceph cephadm osd activate <host>...
+
+This will scan all existing disks for OSDs and deploy corresponding daemons.
index afd9314dead8ca0af4d7492ce2beaa298bffb2b6..2ba739b3c116ceba4cf80527cbda8e24f74ae9db 100644 (file)
@@ -1092,6 +1092,20 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule,
             return False
         return conf.last_modified > dt
 
+    @orchestrator._cli_write_command(
+        'cephadm osd activate'
+    )
+    def _osd_activate(self, host: List[str]) -> HandleCommandResult:
+        """
+        Start OSD containers for existing OSDs
+        """
+
+        @forall_hosts
+        def run(h: str) -> str:
+            return self.osd_service.deploy_osd_daemons_for_existing_osds(h, 'osd')
+
+        return HandleCommandResult(stdout='\n'.join(run(host)))
+
     def _get_connection(self, host: str) -> Tuple['remoto.backends.BaseConnection',
                                                   'remoto.backends.LegacyModuleExecute']:
         """
index 732a416870dfd67412174546a9829f90aedf06cb..24622c4cf9862ea835e85fcc4b57a1658b37a77d 100644 (file)
@@ -84,7 +84,15 @@ class OSDService(CephService):
             raise RuntimeError(
                 'cephadm exited with an error code: %d, stderr:%s' % (
                     code, '\n'.join(err)))
+        return self.deploy_osd_daemons_for_existing_osds(host, drive_group.service_name(),
+                                                         replace_osd_ids)
 
+    def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: str,
+                                             replace_osd_ids: Optional[List[str]] = None) -> str:
+
+        if replace_osd_ids is None:
+            replace_osd_ids = self.find_destroyed_osds().get(host, [])
+            assert replace_osd_ids is not None
         # check result
         osds_elems: dict = CephadmServe(self.mgr)._run_cephadm_json(
             host, 'osd', 'ceph-volume',
@@ -117,7 +125,7 @@ class OSDService(CephService):
 
                 created.append(osd_id)
                 daemon_spec: CephadmDaemonDeploySpec = CephadmDaemonDeploySpec(
-                    service_name=drive_group.service_name(),
+                    service_name=service_name,
                     daemon_id=osd_id,
                     host=host,
                     daemon_type='osd',
index 5ba22769feda7d8a0c4114d3d6061785fafeefeb..cc93ec8dacbb8be78b85234c3040bfa0a8904354 100644 (file)
@@ -1088,3 +1088,29 @@ Traceback (most recent call last):
                           ['--', 'inventory', '--format=json'], image='',
                           no_fsid=False),
             ]
+
+    @mock.patch("cephadm.serve.CephadmServe._run_cephadm")
+    def test_osd_activate(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
+        _run_cephadm.return_value = ('{}', '', 0)
+        with with_host(cephadm_module, 'test', refresh_hosts=False):
+            cephadm_module.mock_store_set('_ceph_get', 'osd_map', {
+                'osds': [
+                    {
+                        'osd': 1,
+                        'up_from': 0,
+                        'uuid': 'uuid'
+                    }
+                ]
+            })
+
+            ceph_volume_lvm_list = {
+                '1': [{
+                    'tags': {
+                        'ceph.cluster_fsid': cephadm_module._cluster_fsid,
+                        'ceph.osd_fsid': 'uuid'
+                    }
+                }]
+            }
+            _run_cephadm.return_value = (json.dumps(ceph_volume_lvm_list), '', 0)
+            assert cephadm_module._osd_activate(
+                ['test']).stdout == "Created osd(s) 1 on host 'test'"