]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
Merge PR #32003 into master
authorSage Weil <sage@redhat.com>
Thu, 12 Dec 2019 23:05:11 +0000 (17:05 -0600)
committerSage Weil <sage@redhat.com>
Thu, 12 Dec 2019 23:05:11 +0000 (17:05 -0600)
* refs/pull/32003/head:
mgr/ssh fix tests for update_mds/mgr/mds/rgw/rbd-mirror
Allow List[HostSpec] in PlacementSpec
mgr/rook: adapt to arg passing change in orchestrator_cli.py
mgr/ssh: Add SimpleScheduler and streamline arg passing
orch_cli: adapt to changes in PlacementSpec and ServiceSpec
orch: extend PlacementSpec and add StatefulServiceSpec

Reviewed-by: Sebastian Wagner <swagner@suse.com>
1  2 
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/orchestrator_cli/module.py

index 51d0b2a56c47c04432fe5dff4eab99f2061f3b80,40a09d95a4cd424c79e4a43e65529f061c29606c..07f2d24d0cdc9705d900b049687c2b3dcdc0bed9
@@@ -1475,4 -1472,119 +1472,120 @@@ class CephadmOrchestrator(MgrModule, or
                      'current_name': s.container_image_name,
                      'current_id': s.container_image_id,
                  }
 -        return trivial_result(json.dumps(r, indent=4))
 +        return trivial_result(json.dumps(r, indent=4, sort_keys=True))
+ class BaseScheduler(object):
+     """
+     Base Scheduler Interface
+     * requires a placement_spec
+     `place(host_pool)` needs to return a List[HostSpec, ..]
+     """
+     def __init__(self, placement_spec):
+         # type: (orchestrator.PlacementSpec) -> None
+         self.placement_spec = placement_spec
+     def place(self):
+         raise NotImplementedError
+ class SimpleScheduler(BaseScheduler):
+     """
+     The most simple way to pick/schedule a set of hosts.
+     1) Shuffle the provided host_pool
+     2) Select from list up to :count
+     """
+     def __init__(self, placement_spec):
+         super(SimpleScheduler, self).__init__(placement_spec)
+     def place(self, host_pool, count=None):
+         # type: (List, Optional(int)) -> List
+         if not host_pool:
+             raise Exception('List of host candidates is empty')
+         host_pool = [HostSpec(x, '', '') for x in host_pool]
+         # shuffle for pseudo random selection
+         random.shuffle(host_pool)
+         return host_pool[:count]
+ class NodeAssignment(object):
+     """
+     A class to detect if nodes are being passed imperative or declarative
+     If the spec is populated via the `nodes/hosts` field it will not load
+     any nodes into the list.
+     If the spec isn't populated, i.e. when only num or label is present (declarative)
+     it will use the provided `get_host_func` to load it from the inventory.
+     Schedulers can be assigned to pick hosts from the pool.
+     """
+     def __init__(self,
+                  spec=None,  # type: orchestrator.PlacementSpec
+                  scheduler=None,  # type: BaseScheduler
+                  get_hosts_func=None,  # type: Callable
+                  service_type=None,  # type: str
+                  ):
+         assert spec and get_hosts_func and service_type
+         self.spec = spec
+         self.scheduler = scheduler if scheduler else SimpleScheduler(self.spec.placement)
+         self.get_hosts_func = get_hosts_func
+         self.service_type = service_type
+     def load(self):
+         # type: () -> orchestrator.PlacementSpec
+         """
+         Load nodes into the spec.placement.nodes container.
+         """
+         self.load_labeled_nodes()
+         self.assign_nodes()
+         return self.spec
+     def load_labeled_nodes(self):
+         # type: () -> None
+         """
+         Assign nodes based on their label
+         """
+         # Querying for labeled nodes doesn't work currently.
+         # Leaving this open for the next iteration
+         # NOTE: This currently queries for all hosts without label restriction
+         if self.spec.placement.label:
+             logger.info("Found labels. Assinging nodes that match the label")
+             candidates = [HostSpec(x[0], '', '') for x in self.get_hosts_func()]  # TODO: query for labels
+             logger.info('Assigning nodes to spec: {}'.format(candidates))
+             self.spec.placement.set_nodes(candidates)
+     def assign_nodes(self):
+         # type: () -> None
+         """
+         Use the assigned scheduler to load nodes into the spec.placement.nodes container
+         """
+         # If no imperative or declarative host assignments, use the scheduler to pick from the
+         # host pool (assuming `count` is set)
+         if not self.spec.placement.label and not self.spec.placement.nodes and self.spec.placement.count:
+             logger.info("Found num spec. Looking for labeled nodes.".format(self.scheduler))
+             # TODO: actually query for labels (self.service_type)
+             candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()],
+                                               count=self.spec.placement.count)
+             # Not enough nodes to deploy on
+             if len(candidates) != self.spec.placement.count:
+                 logger.warning("Did not find enough labeled nodes to \
+                                scale to <{}> services. Falling back to unlabeled nodes.".
+                                format(self.spec.placement.count))
+             else:
+                 logger.info('Assigning nodes to spec: {}'.format(candidates))
+                 self.spec.placement.set_nodes(candidates)
+                 return None
+             candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()], count=self.spec.placement.count)
+             # Not enough nodes to deploy on
+             if len(candidates) != self.spec.placement.count:
+                 raise OrchestratorValidationError("Cannot place {} services on {} hosts.".
+                                                   format(self.spec.placement.count, len(candidates)))
+             logger.info('Assigning nodes to spec: {}'.format(candidates))
+             self.spec.placement.set_nodes(candidates)
+             return None
++