]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: drop osdspec_affinity tracking
authorSage Weil <sage@newdream.net>
Tue, 23 Nov 2021 18:38:50 +0000 (13:38 -0500)
committerSebastian Wagner <sewagner@redhat.com>
Mon, 3 Jan 2022 13:59:55 +0000 (14:59 +0100)
We identify which drivespec legacy OSDs belong(ed) to by metadata they
report to the mgr.  Modern cephadm does this instead by looking at the
'service' property in the unit.meta file.  Having cephadm query the osd
metadata is expensive for large clusters, so let's avoid this and rely
entirely on unit.meta.

Worst case, some upgraded clusters will show OSDs as service 'osd' instead
of service 'osd.whatever' for whatever drivespec created them.

Signed-off-by: Sage Weil <sage@newdream.net>
(cherry picked from commit 9c2b8f557ef374212b5d11464b04f443cb8d84e6)

Conflicts:
src/pybind/mgr/cephadm/module.py
    src/pybind/mgr/cephadm/serve.py

Signed-off-by: Sebastian Wagner <sewagner@redhat.com>
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/serve.py
src/pybind/mgr/cephadm/services/osd.py
src/pybind/mgr/cephadm/tests/fixtures.py

index 3343c344e9e9d9cf98f6929f458bd52af0efe95c..b61cc54df068c6f1d6d4b51f4bd0251482f8f2b7 100644 (file)
@@ -2068,7 +2068,7 @@ Then run the following:
             for h, dm in self.cache.get_daemons_with_volatile_status():
                 osds_to_remove = []
                 for name, dd in dm.items():
-                    if dd.daemon_type == 'osd' and (dd.service_name() == service_name or not dd.osdspec_affinity):
+                    if dd.daemon_type == 'osd' and dd.service_name() == service_name:
                         osds_to_remove.append(str(dd.daemon_id))
                 if osds_to_remove:
                     osds_msg[h] = osds_to_remove
index 6849fae142f01d2fb107002d345a187e84dd8f3b..26a92571da133bc788d2dd030842dcc34d2f2666 100644 (file)
@@ -411,8 +411,6 @@ class CephadmServe:
             sd.rank = int(d['rank']) if d.get('rank') is not None else None
             sd.rank_generation = int(d['rank_generation']) if d.get(
                 'rank_generation') is not None else None
-            if sd.daemon_type == 'osd':
-                sd.osdspec_affinity = self.mgr.osd_service.get_osdspec_affinity(sd.daemon_id)
             if 'state' in d:
                 sd.status_desc = d['state']
                 sd.status = {
index f485257f2b47027411e87be5f10e4f1a36237357..dc4476b709a1cd0ab498450ec39fc299aca9eed8 100644 (file)
@@ -311,9 +311,6 @@ class OSDService(CephService):
             error_ok=True)
         return out, err, code
 
-    def get_osdspec_affinity(self, osd_id: str) -> str:
-        return self.mgr.get('osd_metadata').get(osd_id, {}).get('osdspec_affinity', '')
-
     def post_remove(self, daemon: DaemonDescription, is_failed_deploy: bool) -> None:
         # Do not remove the osd.N keyring, if we failed to deploy the OSD, because
         # we cannot recover from it. The OSD keys are created by ceph-volume and not by
index 3a3d7c139063347f9f561bb53f1adf8660231863..b0ce44225ebffea10b657dffda94708830bd148e 100644 (file)
@@ -42,7 +42,6 @@ def with_cephadm_module(module_options=None, store=None):
     with mock.patch("cephadm.module.CephadmOrchestrator.get_ceph_option", get_ceph_option),\
             mock.patch("cephadm.services.osd.RemoveUtil._run_mon_cmd"), \
             mock.patch("cephadm.module.CephadmOrchestrator.get_osdmap"), \
-            mock.patch("cephadm.services.osd.OSDService.get_osdspec_affinity", return_value='test_spec'), \
             mock.patch("cephadm.module.CephadmOrchestrator.remote"):
 
         m = CephadmOrchestrator.__new__(CephadmOrchestrator)