]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: PEP8tify schedule.py
authorSebastian Wagner <sebastian.wagner@suse.com>
Tue, 25 Aug 2020 13:30:51 +0000 (15:30 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Tue, 25 Aug 2020 13:30:51 +0000 (15:30 +0200)
Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/schedule.py

index 17fbe6bcb24766c5266fe0bff1aba59236db5e2c..5fada81961c1a71efa6da5a05e7723b529968922 100644 (file)
@@ -10,6 +10,7 @@ from orchestrator import OrchestratorValidationError
 logger = logging.getLogger(__name__)
 T = TypeVar('T')
 
+
 class BaseScheduler(object):
     """
     Base Scheduler Interface
@@ -34,6 +35,7 @@ class SimpleScheduler(BaseScheduler):
     1) Shuffle the provided host_pool
     2) Select from list up to :count
     """
+
     def __init__(self, spec):
         super(SimpleScheduler, self).__init__(spec)
 
@@ -54,8 +56,8 @@ class HostAssignment(object):
     def __init__(self,
                  spec,  # type: ServiceSpec
                  get_hosts_func,  # type: Callable
-                 get_daemons_func, # type: Callable[[str],List[orchestrator.DaemonDescription]]
-                 filter_new_host=None, # type: Optional[Callable[[str],bool]]
+                 get_daemons_func,  # type: Callable[[str],List[orchestrator.DaemonDescription]]
+                 filter_new_host=None,  # type: Optional[Callable[[str],bool]]
                  scheduler=None,  # type: Optional[BaseScheduler]
                  ):
         assert spec and get_hosts_func and get_daemons_func
@@ -145,7 +147,7 @@ class HostAssignment(object):
             # if a host already has the anticipated daemon, merge it with the candidates
             # to get a list of HostPlacementSpec that can be deployed on.
             return list(merge_hostspecs(hosts_with_daemons, others))
-    
+
     def get_hosts_with_active_daemon(self, hosts: List[HostPlacementSpec]) -> List[HostPlacementSpec]:
         active_hosts: List['HostPlacementSpec'] = []
         for daemon in self.daemons:
@@ -155,7 +157,7 @@ class HostAssignment(object):
                         active_hosts.append(h)
         # remove duplicates before returning
         return list(dict.fromkeys(active_hosts))
-    
+
     def prefer_hosts_with_active_daemons(self, hosts: List[HostPlacementSpec], count) -> List[HostPlacementSpec]:
         # try to prefer host with active daemon if possible
         active_hosts = self.get_hosts_with_active_daemon(hosts)
@@ -166,7 +168,7 @@ class HostAssignment(object):
                 return self.scheduler.place(active_hosts, count)
             else:
                 return list(merge_hostspecs(self.scheduler.place(active_hosts, count),
-                            self.scheduler.place(hosts, count - len(active_hosts))))
+                                            self.scheduler.place(hosts, count - len(active_hosts))))
         # ask the scheduler to return a set of hosts with a up to the value of <count>
         return self.scheduler.place(hosts, count)
 
@@ -201,7 +203,8 @@ class HostAssignment(object):
             ]
         # If none of the above and also no <count>
         if self.spec.placement.count is None:
-            raise OrchestratorValidationError("placement spec is empty: no hosts, no label, no pattern, no count")
+            raise OrchestratorValidationError(
+                "placement spec is empty: no hosts, no label, no pattern, no count")
         # backward compatibility: consider an empty placements to be the same pattern = *
         return [
             HostPlacementSpec(x, '', '')
@@ -249,4 +252,3 @@ def difference_hostspecs(l: List[HostPlacementSpec], r: List[HostPlacementSpec])
     """
     r_names = {h.hostname for h in r}
     return [h for h in l if h.hostname not in r_names]
-