From: Sage Weil Date: Thu, 4 Mar 2021 23:30:35 +0000 (-0500) Subject: mgr/cephadm: do not worry about even # of monitors X-Git-Tag: v16.2.0~106^2~44 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=2cdffcbbcd78d67a932b9ac3fb2a41baf4da32ad;p=ceph.git mgr/cephadm: do not worry about even # of monitors Ceph works just fine with an even number of monitors. Upside: more copies of critical cluster data Downside: we can tolerate the same number of down mons as N-1, and now we are slightly more likely to have a failing mon because we have 1 more that might fail. On balance it's not clear that have one fewer mon is any better, so avoid the confusion and complexity of second-guessing ourselves. Signed-off-by: Sage Weil (cherry picked from commit ba88a8e5bf81431c37011ba96ffc843eb970683f) --- diff --git a/src/pybind/mgr/cephadm/schedule.py b/src/pybind/mgr/cephadm/schedule.py index 7d2e2308bae6..f63a1a2029ed 100644 --- a/src/pybind/mgr/cephadm/schedule.py +++ b/src/pybind/mgr/cephadm/schedule.py @@ -122,13 +122,6 @@ class HostAssignment(object): # If we don't have the list of candidates is definitive. if count is None: logger.debug('Provided hosts: %s' % candidates) - # if asked to place even number of mons, deploy 1 less - if self.spec.service_type == 'mon' and (len(candidates) % 2) == 0: - count = len(candidates) - 1 - logger.info("Deploying %s monitor(s) instead of %s so monitors may achieve consensus" % ( - count, len(candidates))) - return candidates[0:count] - # do not deploy ha-rgw on hosts that don't support virtual ips if self.spec.service_type == 'ha-rgw' and self.filter_new_host: old = candidates @@ -139,23 +132,6 @@ class HostAssignment(object): logger.info('filtered %s down to %s' % (old, candidates)) return candidates - # if asked to place even number of mons, deploy 1 less - if self.spec.service_type == 'mon': - # if count >= number of candidates then number of candidates - # is determining factor in how many mons will be placed - if count >= len(candidates): - if (len(candidates) % 2) == 0: - logger.info("Deploying %s monitor(s) instead of %s so monitors may achieve consensus" % ( - len(candidates) - 1, len(candidates))) - count = len(candidates) - 1 - # if count < number of candidates then count is determining - # factor in how many mons will get placed - else: - if (count % 2) == 0: - logger.info( - "Deploying %s monitor(s) instead of %s so monitors may achieve consensus" % (count - 1, count)) - count = count - 1 - # prefer hosts that already have services. # this avoids re-assigning to _new_ hosts # and constant re-distribution of hosts when new nodes are diff --git a/src/pybind/mgr/cephadm/tests/test_scheduling.py b/src/pybind/mgr/cephadm/tests/test_scheduling.py index d59b2ed9377a..5066c7dab1ac 100644 --- a/src/pybind/mgr/cephadm/tests/test_scheduling.py +++ b/src/pybind/mgr/cephadm/tests/test_scheduling.py @@ -763,115 +763,3 @@ def test_active_assignment(service_type, placement, hosts, daemons, expected): hosts=[HostSpec(h) for h in hosts], get_daemons_func=lambda _: daemons).place() assert sorted([h.hostname for h in hosts]) in expected - - -class OddMonsTest(NamedTuple): - service_type: str - placement: PlacementSpec - hosts: List[str] - daemons: List[DaemonDescription] - expected_count: int - - -@pytest.mark.parametrize("service_type,placement,hosts,daemons,expected_count", - [ - OddMonsTest( - 'mon', - PlacementSpec(count=5), - 'host1 host2 host3 host4'.split(), - [], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=4), - 'host1 host2 host3 host4 host5'.split(), - [], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=5), - 'host1 host2 host3 host4 host5'.split(), - [], - 5 - ), - OddMonsTest( - 'mon', - PlacementSpec(hosts='host1 host2 host3 host4'.split()), - 'host1 host2 host3 host4 host5'.split(), - [], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(hosts='host1 host2 host3 host4 host5'.split()), - 'host1 host2 host3 host4 host5'.split(), - [], - 5 - ), - OddMonsTest( - 'mon', - PlacementSpec(host_pattern='*'), - 'host1 host2 host3 host4'.split(), - [], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=5, hosts='host1 host2 host3 host4'.split()), - 'host1 host2 host3 host4 host5'.split(), - [], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=2, hosts='host1 host2 host3'.split()), - 'host1 host2 host3 host4 host5'.split(), - [], - 1 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=5), - 'host1 host2 host3 host4'.split(), - [ - DaemonDescription('mon', 'a', 'host1'), - DaemonDescription('mon', 'b', 'host2'), - DaemonDescription('mon', 'c', 'host3'), - ], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(count=5), - 'host1 host2 host3 host4'.split(), - [ - DaemonDescription('mon', 'a', 'host1'), - DaemonDescription('mon', 'b', 'host2'), - ], - 3 - ), - OddMonsTest( - 'mon', - PlacementSpec(hosts='host1 host2 host3 host4'.split()), - 'host1 host2 host3 host4 host5'.split(), - [ - DaemonDescription('mon', 'a', 'host1'), - DaemonDescription('mon', 'b', 'host2'), - DaemonDescription('mon', 'c', 'host3'), - ], - 3 - ), - ]) -def test_odd_mons(service_type, placement, hosts, daemons, expected_count): - - spec = ServiceSpec(service_type=service_type, - service_id=None, - placement=placement) - - hosts = HostAssignment( - spec=spec, - hosts=[HostSpec(h) for h in hosts], - get_daemons_func=lambda _: daemons).place() - assert len(hosts) == expected_count