]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: do not place osds on _no_schedule hosts
authorSage Weil <sage@newdream.net>
Tue, 20 Apr 2021 14:08:56 +0000 (10:08 -0400)
committerSage Weil <sage@newdream.net>
Tue, 4 May 2021 16:18:00 +0000 (11:18 -0500)
Signed-off-by: Sage Weil <sage@newdream.net>
(cherry picked from commit f5cee666d2277fcf06c15930dc00617b86aa0c0e)

src/pybind/mgr/cephadm/inventory.py
src/pybind/mgr/cephadm/services/osd.py

index 11ed505b334d972cb83cb19d7b68376e50cfc408..df9381a3dcdf9c675665e823c003b478e7026e26 100644 (file)
@@ -82,6 +82,12 @@ class Inventory:
             self._inventory[host]['labels'].remove(label)
         self.save()
 
+    def has_label(self, host: str, label: str) -> bool:
+        return (
+            host in self._inventory
+            and label in self._inventory[host].get('labels', [])
+        )
+
     def get_addr(self, host: str) -> str:
         self.assert_host(host)
         return self._inventory[host].get('addr', host)
index b776a88f454736d2fe5307d38f2a321574aad286..73e0bd591aca4e0bfc06e9430faf8dd99144fb30 100644 (file)
@@ -42,6 +42,9 @@ class OSDService(CephService):
                 self.mgr.log.debug("skipping apply of %s on %s (no change)" % (
                     host, drive_group))
                 return None
+            # skip this host if we cannot schedule here
+            if self.mgr.inventory.has_label(host, '_no_schedule'):
+                return None
 
             cmd = self.driveselection_to_ceph_volume(drive_selection,
                                                      osd_id_claims.get(host, []))
@@ -144,7 +147,7 @@ class OSDService(CephService):
     def prepare_drivegroup(self, drive_group: DriveGroupSpec) -> List[Tuple[str, DriveSelection]]:
         # 1) use fn_filter to determine matching_hosts
         matching_hosts = drive_group.placement.filter_matching_hostspecs(
-            self.mgr.inventory.all_specs())
+            self.mgr._schedulable_hosts())
         # 2) Map the inventory to the InventoryHost object
         host_ds_map = []
 
@@ -253,7 +256,7 @@ class OSDService(CephService):
         if not osdspecs:
             self.mgr.log.debug("No OSDSpecs found")
             return []
-        return sum([spec.placement.filter_matching_hostspecs(self.mgr.inventory.all_specs()) for spec in osdspecs], [])
+        return sum([spec.placement.filter_matching_hostspecs(self.mgr._schedulable_hosts()) for spec in osdspecs], [])
 
     def resolve_osdspecs_for_host(self, host: str,
                                   specs: Optional[List[DriveGroupSpec]] = None) -> List[DriveGroupSpec]:
@@ -263,7 +266,7 @@ class OSDService(CephService):
             specs = [cast(DriveGroupSpec, spec) for (sn, spec) in self.mgr.spec_store.spec_preview.items()
                      if spec.service_type == 'osd']
         for spec in specs:
-            if host in spec.placement.filter_matching_hostspecs(self.mgr.inventory.all_specs()):
+            if host in spec.placement.filter_matching_hostspecs(self.mgr._schedulable_hosts()):
                 self.mgr.log.debug(f"Found OSDSpecs for host: <{host}> -> <{spec}>")
                 matching_specs.append(spec)
         return matching_specs