From: Sage Weil Date: Thu, 12 Dec 2019 23:05:11 +0000 (-0600) Subject: Merge PR #32003 into master X-Git-Tag: v15.1.0~524 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=b640a76e588a6a80c3c71128ba4ba2b1cb050cea;p=ceph.git Merge PR #32003 into master * refs/pull/32003/head: mgr/ssh fix tests for update_mds/mgr/mds/rgw/rbd-mirror Allow List[HostSpec] in PlacementSpec mgr/rook: adapt to arg passing change in orchestrator_cli.py mgr/ssh: Add SimpleScheduler and streamline arg passing orch_cli: adapt to changes in PlacementSpec and ServiceSpec orch: extend PlacementSpec and add StatefulServiceSpec Reviewed-by: Sebastian Wagner --- b640a76e588a6a80c3c71128ba4ba2b1cb050cea diff --cc src/pybind/mgr/cephadm/module.py index 51d0b2a56c47,40a09d95a4cd..07f2d24d0cdc --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@@ -1475,4 -1472,119 +1472,120 @@@ class CephadmOrchestrator(MgrModule, or 'current_name': s.container_image_name, 'current_id': s.container_image_id, } - return trivial_result(json.dumps(r, indent=4)) + return trivial_result(json.dumps(r, indent=4, sort_keys=True)) + + + class BaseScheduler(object): + """ + Base Scheduler Interface + + * requires a placement_spec + + `place(host_pool)` needs to return a List[HostSpec, ..] + """ + + def __init__(self, placement_spec): + # type: (orchestrator.PlacementSpec) -> None + self.placement_spec = placement_spec + + def place(self): + raise NotImplementedError + + + class SimpleScheduler(BaseScheduler): + """ + The most simple way to pick/schedule a set of hosts. + 1) Shuffle the provided host_pool + 2) Select from list up to :count + """ + def __init__(self, placement_spec): + super(SimpleScheduler, self).__init__(placement_spec) + + def place(self, host_pool, count=None): + # type: (List, Optional(int)) -> List + if not host_pool: + raise Exception('List of host candidates is empty') + host_pool = [HostSpec(x, '', '') for x in host_pool] + # shuffle for pseudo random selection + random.shuffle(host_pool) + return host_pool[:count] + + + class NodeAssignment(object): + """ + A class to detect if nodes are being passed imperative or declarative + If the spec is populated via the `nodes/hosts` field it will not load + any nodes into the list. + If the spec isn't populated, i.e. when only num or label is present (declarative) + it will use the provided `get_host_func` to load it from the inventory. + + Schedulers can be assigned to pick hosts from the pool. + """ + + def __init__(self, + spec=None, # type: orchestrator.PlacementSpec + scheduler=None, # type: BaseScheduler + get_hosts_func=None, # type: Callable + service_type=None, # type: str + ): + assert spec and get_hosts_func and service_type + self.spec = spec + self.scheduler = scheduler if scheduler else SimpleScheduler(self.spec.placement) + self.get_hosts_func = get_hosts_func + self.service_type = service_type + + def load(self): + # type: () -> orchestrator.PlacementSpec + """ + Load nodes into the spec.placement.nodes container. + """ + self.load_labeled_nodes() + self.assign_nodes() + return self.spec + + def load_labeled_nodes(self): + # type: () -> None + """ + Assign nodes based on their label + """ + # Querying for labeled nodes doesn't work currently. + # Leaving this open for the next iteration + # NOTE: This currently queries for all hosts without label restriction + if self.spec.placement.label: + logger.info("Found labels. Assinging nodes that match the label") + candidates = [HostSpec(x[0], '', '') for x in self.get_hosts_func()] # TODO: query for labels + logger.info('Assigning nodes to spec: {}'.format(candidates)) + self.spec.placement.set_nodes(candidates) + + def assign_nodes(self): + # type: () -> None + """ + Use the assigned scheduler to load nodes into the spec.placement.nodes container + """ + # If no imperative or declarative host assignments, use the scheduler to pick from the + # host pool (assuming `count` is set) + if not self.spec.placement.label and not self.spec.placement.nodes and self.spec.placement.count: + logger.info("Found num spec. Looking for labeled nodes.".format(self.scheduler)) + # TODO: actually query for labels (self.service_type) + candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()], + count=self.spec.placement.count) + # Not enough nodes to deploy on + if len(candidates) != self.spec.placement.count: + logger.warning("Did not find enough labeled nodes to \ + scale to <{}> services. Falling back to unlabeled nodes.". + format(self.spec.placement.count)) + else: + logger.info('Assigning nodes to spec: {}'.format(candidates)) + self.spec.placement.set_nodes(candidates) + return None + + candidates = self.scheduler.place([x[0] for x in self.get_hosts_func()], count=self.spec.placement.count) + # Not enough nodes to deploy on + if len(candidates) != self.spec.placement.count: + raise OrchestratorValidationError("Cannot place {} services on {} hosts.". + format(self.spec.placement.count, len(candidates))) + + logger.info('Assigning nodes to spec: {}'.format(candidates)) + self.spec.placement.set_nodes(candidates) + return None ++