]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/rook: placementspec->host matching adaption
authorJoshua Schmid <jschmid@suse.de>
Tue, 12 May 2020 08:24:25 +0000 (10:24 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Mon, 8 Jun 2020 11:52:24 +0000 (13:52 +0200)
Signed-off-by: Joshua Schmid <jschmid@suse.de>
(cherry picked from commit d488de90ad7fbcf4ea838d8ca33511fdae7823d8)

src/pybind/mgr/rook/module.py
src/pybind/mgr/rook/rook_cluster.py

index 3459f2ce62136079f8884a5d88e20c33d4c41a5a..44228731ba0085646798b0858ad2312a9cf6e1a1 100644 (file)
@@ -478,7 +478,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         )
 
     def create_osds(self, drive_group):
-        # type: (DriveGroupSpec) -> RookCompletion
+        # type: (DriveGroupSpec) -> orchestrator.Completion
         """ Creates OSDs from a drive group specification.
 
         $: ceph orch osd create -i <dg.file>
@@ -492,10 +492,9 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
         if drive_group.data_directories:
             targets += drive_group.data_directories
 
-        def execute(all_hosts_):
-            # type: (List[orchestrator.HostSpec]) -> orchestrator.Completion
-            all_hosts = [h.hostname for h in all_hosts_]
-            matching_hosts = drive_group.placement.pattern_matches_hosts(all_hosts)
+        def execute():
+            # type: () -> orchestrator.Completion
+            matching_hosts = drive_group.placement.filter_matching_hosts(self.get_hosts)
 
             assert len(matching_hosts) == 1
 
@@ -514,13 +513,12 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
                         matching_hosts,
                         targets),
                 mgr=self,
-                on_complete=lambda _:self.rook_cluster.add_osds(drive_group, all_hosts),
-                calc_percent=lambda: has_osds(all_hosts)
+                on_complete=lambda _:self.rook_cluster.add_osds(drive_group, matching_hosts),
+                calc_percent=lambda: has_osds(matching_hosts)
             )
 
         @deferred_read
-        def has_osds(all_hosts):
-            matching_hosts = drive_group.placement.pattern_matches_hosts(all_hosts)
+        def has_osds(matching_hosts):
 
             # Find OSD pods on this host
             pod_osd_ids = set()
@@ -551,7 +549,7 @@ class RookOrchestrator(MgrModule, orchestrator.Orchestrator):
 
             return found is not None
 
-        c = self.get_hosts().then(execute)
+        c = execute()
         return c
 
     def blink_device_light(self, ident_fault: str, on: bool, locs: List[orchestrator.DeviceLightLoc]) -> RookCompletion:
index cd4b58990039c88b2d659a5f7d33d389bd724582..05c533ae85762a0ea78c381120df0df1b0083651 100644 (file)
@@ -521,7 +521,7 @@ class RookCluster(object):
             return new
         return self._patch(cnfs.CephNFS, 'cephnfses',svc_id, _update_nfs_count)
 
-    def add_osds(self, drive_group, all_hosts):
+    def add_osds(self, drive_group, matching_hosts):
         # type: (DriveGroupSpec, List[str]) -> str
         """
         Rook currently (0.8) can only do single-drive OSDs, so we
@@ -543,7 +543,7 @@ class RookCluster(object):
                 new_cluster.spec.storage.nodes = ccl.NodesList()
 
             current_nodes = getattr(current_cluster.spec.storage, 'nodes', ccl.NodesList())
-            matching_host = drive_group.placement.pattern_matches_hosts(all_hosts)[0]
+            matching_host = matching_hosts[0]
 
             if matching_host not in [n.name for n in current_nodes]:
                 pd = ccl.NodesItem(