From: Sage Weil Date: Wed, 10 Mar 2021 00:01:39 +0000 (-0500) Subject: mgr/cephadm/schedule: calculate additions/removals in place() X-Git-Tag: v17.1.0~2613^2~4 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=0000b21b71b2eb7dd2710a81fbf2e8f245f367fd;p=ceph.git mgr/cephadm/schedule: calculate additions/removals in place() We already have to examine existing daemons to choose the placements. There is no reason to make the caller call another method to (re)calculate what the net additions and removals are. Signed-off-by: Sage Weil --- diff --git a/src/pybind/mgr/cephadm/migrations.py b/src/pybind/mgr/cephadm/migrations.py index b48165836c75..cf30d15c28e7 100644 --- a/src/pybind/mgr/cephadm/migrations.py +++ b/src/pybind/mgr/cephadm/migrations.py @@ -90,7 +90,7 @@ class Migrations: def convert_to_explicit(spec: ServiceSpec) -> None: existing_daemons = self.mgr.cache.get_daemons_by_service(spec.service_name()) - placements = HostAssignment( + placements, to_add, to_remove = HostAssignment( spec=spec, hosts=self.mgr.inventory.all_specs(), daemons=existing_daemons, diff --git a/src/pybind/mgr/cephadm/module.py b/src/pybind/mgr/cephadm/module.py index 6e2c735a59ab..d9cdd8f88ab2 100644 --- a/src/pybind/mgr/cephadm/module.py +++ b/src/pybind/mgr/cephadm/module.py @@ -2093,16 +2093,13 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule, allow_colo=self.cephadm_services[spec.service_type].allow_colo(), ) ha.validate() - hosts = ha.place() - - add_daemon_hosts = ha.add_daemon_hosts(hosts) - remove_daemon_hosts = ha.remove_daemon_hosts(hosts) + hosts, to_add, to_remove = ha.place() return { 'service_name': spec.service_name(), 'service_type': spec.service_type, - 'add': [hs.hostname for hs in add_daemon_hosts], - 'remove': [d.hostname for d in remove_daemon_hosts] + 'add': [hs.hostname for hs in to_add], + 'remove': [d.hostname for d in to_remove] } @handle_orch_error diff --git a/src/pybind/mgr/cephadm/schedule.py b/src/pybind/mgr/cephadm/schedule.py index 31ce9b9a6265..6f33856f9ce5 100644 --- a/src/pybind/mgr/cephadm/schedule.py +++ b/src/pybind/mgr/cephadm/schedule.py @@ -1,6 +1,6 @@ import logging import random -from typing import List, Optional, Callable, Iterable, TypeVar, Set +from typing import List, Optional, Callable, TypeVar, Set, Tuple import orchestrator from ceph.deployment.service_spec import HostPlacementSpec, ServiceSpec @@ -109,7 +109,7 @@ class HostAssignment(object): f'hosts for label {self.spec.placement.label}') def place(self): - # type: () -> List[HostPlacementSpec] + # type: () -> Tuple[List[HostPlacementSpec], List[HostPlacementSpec], List[orchestrator.DaemonDescription]] """ Generate a list of HostPlacementSpec taking into account: @@ -125,20 +125,15 @@ class HostAssignment(object): # get candidate hosts based on [hosts, label, host_pattern] candidates = self.get_candidates() # type: List[HostPlacementSpec] - if not candidates: - return [] # sigh - # If we don't have the list of candidates is definitive. + # consider enough slots to fulfill target count-per-host or count if count is None: - logger.debug('Provided hosts: %s' % candidates) if self.spec.placement.count_per_host: per_host = self.spec.placement.count_per_host else: per_host = 1 - return candidates * per_host - - if self.allow_colo: - # consider enough slots to fit target count + candidates = candidates * per_host + elif self.allow_colo and candidates: per_host = 1 + ((count - 1) // len(candidates)) candidates = candidates * per_host @@ -151,37 +146,48 @@ class HostAssignment(object): # sort candidates into existing/used slots that already have a # daemon, and others (the rest) - existing_active: List[HostPlacementSpec] = [] - existing_standby: List[HostPlacementSpec] = [] - existing: List[HostPlacementSpec] = [] - others = candidates + existing_active: List[orchestrator.DaemonDescription] = [] + existing_standby: List[orchestrator.DaemonDescription] = [] + existing_slots: List[HostPlacementSpec] = [] + to_remove: List[orchestrator.DaemonDescription] = [] + others = candidates.copy() for d in daemons: hs = d.get_host_placement() + found = False for i in others: if i == hs: others.remove(i) if d.is_active: - existing_active.append(hs) + existing_active.append(d) else: - existing_standby.append(hs) - existing.append(hs) + existing_standby.append(d) + existing_slots.append(hs) + found = True break - logger.debug('Hosts with existing active daemons: {}'.format(existing_active)) - logger.debug('Hosts with existing standby daemons: {}'.format(existing_standby)) + if not found: + to_remove.append(d) + + existing = existing_active + existing_standby + + # If we don't have the list of candidates is definitive. + if count is None: + logger.debug('Provided hosts: %s' % candidates) + return candidates, others, to_remove # The number of new slots that need to be selected in order to fulfill count need = count - len(existing) # we don't need any additional placements if need <= 0: - del existing[count:] - return existing + to_remove.extend(existing[count:]) + del existing_slots[count:] + return existing_slots, [], to_remove # ask the scheduler to select additional slots - chosen = self.scheduler.place(others, need) + to_add = self.scheduler.place(others, need) logger.debug('Combine hosts with existing daemons %s + new hosts %s' % ( - existing, chosen)) - return existing + chosen + existing, to_add)) + return existing_slots + to_add, to_add, to_remove def add_daemon_hosts(self, host_pool: List[HostPlacementSpec]) -> List[HostPlacementSpec]: hosts_with_daemons = {d.hostname for d in self.daemons} @@ -239,37 +245,3 @@ class HostAssignment(object): random.Random(seed).shuffle(hosts) return hosts - - -def merge_hostspecs( - lh: List[HostPlacementSpec], - rh: List[HostPlacementSpec] -) -> Iterable[HostPlacementSpec]: - """ - Merge two lists of HostPlacementSpec by hostname. always returns `lh` first. - - >>> list(merge_hostspecs([HostPlacementSpec(hostname='h', name='x', network='')], - ... [HostPlacementSpec(hostname='h', name='y', network='')])) - [HostPlacementSpec(hostname='h', network='', name='x')] - - """ - lh_names = {h.hostname for h in lh} - yield from lh - yield from (h for h in rh if h.hostname not in lh_names) - - -def difference_hostspecs( - lh: List[HostPlacementSpec], - rh: List[HostPlacementSpec] -) -> List[HostPlacementSpec]: - """ - returns lh "minus" rh by hostname. - - >>> list(difference_hostspecs([HostPlacementSpec(hostname='h1', name='x', network=''), - ... HostPlacementSpec(hostname='h2', name='y', network='')], - ... [HostPlacementSpec(hostname='h2', name='', network='')])) - [HostPlacementSpec(hostname='h1', network='', name='x')] - - """ - rh_names = {h.hostname for h in rh} - return [h for h in lh if h.hostname not in rh_names] diff --git a/src/pybind/mgr/cephadm/serve.py b/src/pybind/mgr/cephadm/serve.py index 920fccb37761..54a0f07744b2 100644 --- a/src/pybind/mgr/cephadm/serve.py +++ b/src/pybind/mgr/cephadm/serve.py @@ -2,7 +2,7 @@ import json import logging from collections import defaultdict from contextlib import contextmanager -from typing import TYPE_CHECKING, Optional, List, cast, Set, Dict, Any, Union, Tuple, Iterator +from typing import TYPE_CHECKING, Optional, List, cast, Dict, Any, Union, Tuple, Iterator from cephadm import remotes @@ -561,8 +561,8 @@ class CephadmServe: ) try: - hosts: List[HostPlacementSpec] = ha.place() - self.log.debug('Usable hosts: %s' % hosts) + all_slots, slots_to_add, daemons_to_remove = ha.place() + self.log.debug('Add %s, remove %s' % (slots_to_add, daemons_to_remove)) except OrchestratorError as e: self.log.error('Failed to apply %s spec %s: %s' % ( spec.service_name(), spec, e)) @@ -572,23 +572,21 @@ class CephadmServe: r = None # sanity check - if service_type in ['mon', 'mgr'] and len(hosts) < 1: - self.log.debug('cannot scale mon|mgr below 1 (hosts=%s)' % hosts) + final_count = len(daemons) + len(slots_to_add) - len(daemons_to_remove) + if service_type in ['mon', 'mgr'] and final_count < 1: + self.log.debug('cannot scale mon|mgr below 1)') return False # add any? did_config = False - add_daemon_hosts: List[HostPlacementSpec] = ha.add_daemon_hosts(hosts) - self.log.debug('Hosts that will receive new daemons: %s' % add_daemon_hosts) - - remove_daemons: Set[orchestrator.DaemonDescription] = ha.remove_daemon_hosts(hosts) - self.log.debug('Daemons that will be removed: %s' % remove_daemons) + self.log.debug('Hosts that will receive new daemons: %s' % slots_to_add) + self.log.debug('Daemons that will be removed: %s' % daemons_to_remove) if service_type == 'ha-rgw': - spec = self.update_ha_rgw_definitive_hosts(spec, hosts, add_daemon_hosts) + spec = self.update_ha_rgw_definitive_hosts(spec, all_slots, slots_to_add) - for host, network, name in add_daemon_hosts: + for host, network, name in slots_to_add: for daemon_type in service_to_daemon_types(service_type): daemon_id = self.mgr.get_unique_name(daemon_type, host, daemons, prefix=spec.service_id, @@ -626,17 +624,17 @@ class CephadmServe: daemons.append(sd) # remove any? - def _ok_to_stop(remove_daemons: Set[orchestrator.DaemonDescription]) -> bool: + def _ok_to_stop(remove_daemons: List[orchestrator.DaemonDescription]) -> bool: daemon_ids = [d.daemon_id for d in remove_daemons] assert None not in daemon_ids # setting force flag retains previous behavior r = svc.ok_to_stop(cast(List[str], daemon_ids), force=True) return not r.retval - while remove_daemons and not _ok_to_stop(remove_daemons): + while daemons_to_remove and not _ok_to_stop(daemons_to_remove): # let's find a subset that is ok-to-stop - remove_daemons.pop() - for d in remove_daemons: + daemons_to_remove.pop() + for d in daemons_to_remove: r = True # NOTE: we are passing the 'force' flag here, which means # we can delete a mon instances data. diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py index 990e84cc0766..49c420c6ba3b 100644 --- a/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -127,7 +127,7 @@ def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): except IndexError: try: spec = mk_spec() - host_res = HostAssignment( + host_res, to_add, to_remove = HostAssignment( spec=spec, hosts=hosts, daemons=daemons, @@ -142,7 +142,7 @@ def run_scheduler_test(results, mk_spec, hosts, daemons, key_elems): for _ in range(10): # scheduler has a random component try: spec = mk_spec() - host_res = HostAssignment( + host_res, to_add, to_remove = HostAssignment( spec=spec, hosts=hosts, daemons=daemons @@ -328,9 +328,11 @@ class NodeAssignmentTest(NamedTuple): hosts: List[str] daemons: List[DaemonDescription] expected: List[str] + expected_add: List[str] + expected_remove: List[DaemonDescription] -@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected", +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected,expected_add,expected_remove", [ # noqa: E128 # just hosts NodeAssignmentTest( @@ -338,7 +340,7 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(hosts=['smithi060:[v2:172.21.15.60:3301,v1:172.21.15.60:6790]=c']), ['smithi060'], [], - ['smithi060'] + ['smithi060'], ['smithi060'], [] ), # all_hosts NodeAssignmentTest( @@ -349,7 +351,7 @@ class NodeAssignmentTest(NamedTuple): DaemonDescription('mgr', 'a', 'host1'), DaemonDescription('mgr', 'b', 'host2'), ], - ['host1', 'host2', 'host3'] + ['host1', 'host2', 'host3'], ['host3'], [] ), # all_hosts + count_per_host NodeAssignmentTest( @@ -360,7 +362,9 @@ class NodeAssignmentTest(NamedTuple): DaemonDescription('mds', 'a', 'host1'), DaemonDescription('mds', 'b', 'host2'), ], - ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'] + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'], + ['host3', 'host1', 'host2', 'host3'], + [] ), # count that is bigger than the amount of hosts. Truncate to len(hosts) # mgr should not be co-located to each other. @@ -369,7 +373,7 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(count=4), 'host1 host2 host3'.split(), [], - ['host1', 'host2', 'host3'] + ['host1', 'host2', 'host3'], ['host1', 'host2', 'host3'], [] ), # count that is bigger than the amount of hosts; wrap around. NodeAssignmentTest( @@ -377,7 +381,9 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(count=6), 'host1 host2 host3'.split(), [], - ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'] + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'], + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'], + [] ), # count + partial host list NodeAssignmentTest( @@ -388,7 +394,7 @@ class NodeAssignmentTest(NamedTuple): DaemonDescription('mgr', 'a', 'host1'), DaemonDescription('mgr', 'b', 'host2'), ], - ['host3'] + ['host3'], ['host3'], ['mgr.a', 'mgr.b'] ), # count + partial host list (with colo) NodeAssignmentTest( @@ -399,7 +405,7 @@ class NodeAssignmentTest(NamedTuple): DaemonDescription('mgr', 'a', 'host1'), DaemonDescription('mgr', 'b', 'host2'), ], - ['host3', 'host3', 'host3'] + ['host3', 'host3', 'host3'], ['host3', 'host3', 'host3'], ['mgr.a', 'mgr.b'] ), # count 1 + partial host list NodeAssignmentTest( @@ -410,7 +416,7 @@ class NodeAssignmentTest(NamedTuple): DaemonDescription('mgr', 'a', 'host1'), DaemonDescription('mgr', 'b', 'host2'), ], - ['host3'] + ['host3'], ['host3'], ['mgr.a', 'mgr.b'] ), # count + partial host list + existing NodeAssignmentTest( @@ -420,7 +426,7 @@ class NodeAssignmentTest(NamedTuple): [ DaemonDescription('mgr', 'a', 'host1'), ], - ['host3'] + ['host3'], ['host3'], ['mgr.a'] ), # count + partial host list + existing (deterministic) NodeAssignmentTest( @@ -430,7 +436,7 @@ class NodeAssignmentTest(NamedTuple): [ DaemonDescription('mgr', 'a', 'host1'), ], - ['host1'] + ['host1'], [], [] ), # count + partial host list + existing (deterministic) NodeAssignmentTest( @@ -440,7 +446,7 @@ class NodeAssignmentTest(NamedTuple): [ DaemonDescription('mgr', 'a', 'host2'), ], - ['host1'] + ['host1'], ['host1'], ['mgr.a'] ), # label only NodeAssignmentTest( @@ -448,7 +454,7 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(label='foo'), 'host1 host2 host3'.split(), [], - ['host1', 'host2', 'host3'] + ['host1', 'host2', 'host3'], ['host1', 'host2', 'host3'], [] ), # label + count (truncate to host list) NodeAssignmentTest( @@ -456,7 +462,7 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(count=4, label='foo'), 'host1 host2 host3'.split(), [], - ['host1', 'host2', 'host3'] + ['host1', 'host2', 'host3'], ['host1', 'host2', 'host3'], [] ), # label + count (with colo) NodeAssignmentTest( @@ -464,7 +470,9 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(count=6, label='foo'), 'host1 host2 host3'.split(), [], - ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'] + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'], + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3'], + [] ), # label only + count_per_hst NodeAssignmentTest( @@ -473,7 +481,10 @@ class NodeAssignmentTest(NamedTuple): 'host1 host2 host3'.split(), [], ['host1', 'host2', 'host3', 'host1', 'host2', 'host3', - 'host1', 'host2', 'host3'] + 'host1', 'host2', 'host3'], + ['host1', 'host2', 'host3', 'host1', 'host2', 'host3', + 'host1', 'host2', 'host3'], + [] ), # host_pattern NodeAssignmentTest( @@ -481,7 +492,7 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(host_pattern='mgr*'), 'mgrhost1 mgrhost2 datahost'.split(), [], - ['mgrhost1', 'mgrhost2'] + ['mgrhost1', 'mgrhost2'], ['mgrhost1', 'mgrhost2'], [] ), # host_pattern + count_per_host NodeAssignmentTest( @@ -489,10 +500,13 @@ class NodeAssignmentTest(NamedTuple): PlacementSpec(host_pattern='mds*', count_per_host=3), 'mdshost1 mdshost2 datahost'.split(), [], - ['mdshost1', 'mdshost2', 'mdshost1', 'mdshost2', 'mdshost1', 'mdshost2'] + ['mdshost1', 'mdshost2', 'mdshost1', 'mdshost2', 'mdshost1', 'mdshost2'], + ['mdshost1', 'mdshost2', 'mdshost1', 'mdshost2', 'mdshost1', 'mdshost2'], + [] ), ]) -def test_node_assignment(service_type, placement, hosts, daemons, expected): +def test_node_assignment(service_type, placement, hosts, daemons, + expected, expected_add, expected_remove): service_id = None allow_colo = False if service_type == 'rgw': @@ -505,13 +519,32 @@ def test_node_assignment(service_type, placement, hosts, daemons, expected): service_id=service_id, placement=placement) - hosts = HostAssignment( + all_slots, to_add, to_remove = HostAssignment( spec=spec, hosts=[HostSpec(h, labels=['foo']) for h in hosts], daemons=daemons, allow_colo=allow_colo ).place() - assert sorted([h.hostname for h in hosts]) == sorted(expected) + + got = [hs.hostname for hs in all_slots] + num_wildcard = 0 + for i in expected: + if i == '*': + num_wildcard += 1 + else: + got.remove(i) + assert num_wildcard == len(got) + + got = [hs.hostname for hs in to_add] + num_wildcard = 0 + for i in expected_add: + if i == '*': + num_wildcard += 1 + else: + got.remove(i) + assert num_wildcard == len(got) + + assert sorted([d.name() for d in to_remove]) == sorted(expected_remove) class NodeAssignmentTest2(NamedTuple): @@ -586,7 +619,7 @@ class NodeAssignmentTest2(NamedTuple): ]) def test_node_assignment2(service_type, placement, hosts, daemons, expected_len, in_set): - hosts = HostAssignment( + hosts, to_add, to_remove = HostAssignment( spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h, labels=['foo']) for h in hosts], daemons=daemons, @@ -619,7 +652,7 @@ def test_node_assignment2(service_type, placement, hosts, ]) def test_node_assignment3(service_type, placement, hosts, daemons, expected_len, must_have): - hosts = HostAssignment( + hosts, to_add, to_remove = HostAssignment( spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h) for h in hosts], daemons=daemons, @@ -681,7 +714,7 @@ class NodeAssignmentTestBadSpec(NamedTuple): ]) def test_bad_specs(service_type, placement, hosts, daemons, expected): with pytest.raises(OrchestratorValidationError) as e: - hosts = HostAssignment( + hosts, to_add, to_remove = HostAssignment( spec=ServiceSpec(service_type, placement=placement), hosts=[HostSpec(h) for h in hosts], daemons=daemons, @@ -695,9 +728,11 @@ class ActiveAssignmentTest(NamedTuple): hosts: List[str] daemons: List[DaemonDescription] expected: List[List[str]] + expected_add: List[List[str]] + expected_remove: List[List[str]] -@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected", +@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected,expected_add,expected_remove", [ ActiveAssignmentTest( 'mgr', @@ -708,7 +743,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3'), ], - [['host1', 'host2'], ['host1', 'host3']] + [['host1', 'host2'], ['host1', 'host3']], + [[]], + [['mgr.b'], ['mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -719,7 +756,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host1', 'host3'], ['host2', 'host3']] + [['host1', 'host3'], ['host2', 'host3']], + [[]], + [['mgr.a'], ['mgr.b']] ), ActiveAssignmentTest( 'mgr', @@ -730,7 +769,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2', is_active=True), DaemonDescription('mgr', 'c', 'host3'), ], - [['host2']] + [['host2']], + [[]], + [['mgr.a', 'mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -741,7 +782,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host3']] + [['host3']], + [[]], + [['mgr.a', 'mgr.b']] ), ActiveAssignmentTest( 'mgr', @@ -752,7 +795,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host1'], ['host3']] + [['host1'], ['host3']], + [[]], + [['mgr.a', 'mgr.b'], ['mgr.b', 'mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -763,7 +808,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2', is_active=True), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host2', 'host3']] + [['host2', 'host3']], + [[]], + [['mgr.a']] ), ActiveAssignmentTest( 'mgr', @@ -774,7 +821,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2', is_active=True), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host1'], ['host2'], ['host3']] + [['host1'], ['host2'], ['host3']], + [[]], + [['mgr.a', 'mgr.b'], ['mgr.b', 'mgr.c'], ['mgr.a', 'mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -786,7 +835,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3'), ], - [['host1']] + [['host1']], + [[]], + [['mgr.a2', 'mgr.b', 'mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -798,7 +849,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3'), ], - [['host1']] + [['host1']], + [[]], + [['mgr.a', 'mgr.b', 'mgr.c'], ['mgr.a2', 'mgr.b', 'mgr.c']] ), ActiveAssignmentTest( 'mgr', @@ -810,7 +863,9 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host1', 'host3']] + [['host1', 'host3']], + [[]], + [['mgr.a2', 'mgr.b']] ), # Explicit placement should override preference for active daemon ActiveAssignmentTest( @@ -822,19 +877,23 @@ class ActiveAssignmentTest(NamedTuple): DaemonDescription('mgr', 'b', 'host2'), DaemonDescription('mgr', 'c', 'host3', is_active=True), ], - [['host1']] + [['host1']], + [[]], + [['mgr.b', 'mgr.c']] ), ]) -def test_active_assignment(service_type, placement, hosts, daemons, expected): +def test_active_assignment(service_type, placement, hosts, daemons, expected, expected_add, expected_remove): spec = ServiceSpec(service_type=service_type, service_id=None, placement=placement) - hosts = HostAssignment( + hosts, to_add, to_remove = HostAssignment( spec=spec, hosts=[HostSpec(h) for h in hosts], daemons=daemons, ).place() assert sorted([h.hostname for h in hosts]) in expected + assert sorted([h.hostname for h in to_add]) in expected_add + assert sorted([h.name() for h in to_remove]) in expected_remove