From: Sebastian Wagner Date: Fri, 24 Jul 2020 14:20:22 +0000 (+0200) Subject: mgr/cephadm: Add migration to keep the service names consistent X-Git-Tag: v16.1.0~1563^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=refs%2Fpull%2F36284%2Fhead;p=ceph.git mgr/cephadm: Add migration to keep the service names consistent After 15.2.4, we unified some service IDs: MONs, MGRs etc no longer have a service id. Which means, the service names changed: mon.foo -> mon mgr.foo -> mgr This fixes the data structure consistency Signed-off-by: Sebastian Wagner --- diff --git a/src/pybind/mgr/cephadm/migrations.py b/src/pybind/mgr/cephadm/migrations.py index 0c964c7d4bc20..55468d96a2353 100644 --- a/src/pybind/mgr/cephadm/migrations.py +++ b/src/pybind/mgr/cephadm/migrations.py @@ -9,7 +9,7 @@ from orchestrator import OrchestratorError if TYPE_CHECKING: from .module import CephadmOrchestrator -LAST_MIGRATION = 1 +LAST_MIGRATION = 2 logger = logging.getLogger(__name__) @@ -53,6 +53,10 @@ class Migrations: if self.migrate_0_1(): self.set(1) + if self.mgr.migration_current == 1: + if self.migrate_1_2(): + self.set(2) + def migrate_0_1(self) -> bool: """ Migration 0 -> 1 @@ -126,3 +130,29 @@ class Migrations: convert_to_explicit(spec) return True + + def migrate_1_2(self) -> bool: + """ + After 15.2.4, we unified some service IDs: MONs, MGRs etc no longer have a service id. + Which means, the service names changed: + + mon.foo -> mon + mgr.foo -> mgr + + This fixes the data structure consistency + """ + bad_specs = {} + for name, spec in self.mgr.spec_store.specs.items(): + if name != spec.service_name(): + bad_specs[name] = (spec.service_name(), spec) + + for old, (new, old_spec) in bad_specs.items(): + if new not in self.mgr.spec_store.specs: + spec = old_spec + else: + spec = self.mgr.spec_store.specs[new] + spec.unmanaged = True + self.mgr.spec_store.save(spec) + self.mgr.spec_store.rm(old) + + return True diff --git a/src/pybind/mgr/cephadm/tests/test_migration.py b/src/pybind/mgr/cephadm/tests/test_migration.py index 5b78987c72878..5c917071064a2 100644 --- a/src/pybind/mgr/cephadm/tests/test_migration.py +++ b/src/pybind/mgr/cephadm/tests/test_migration.py @@ -1,7 +1,9 @@ +import json from datetime import datetime from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec from cephadm import CephadmOrchestrator +from cephadm.inventory import SPEC_STORE_PREFIX, DATEFMT from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host from tests import mock @@ -38,8 +40,105 @@ def test_migrate_scheduler(cephadm_module: CephadmOrchestrator): cephadm_module.cache.last_daemon_update['host2'] = datetime.now() cephadm_module.migration.migrate() - assert cephadm_module.migration_current == 1 + assert cephadm_module.migration_current == 2 out = [o.spec.placement for o in wait(cephadm_module, cephadm_module.describe_service())] assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])] + +@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', + json.dumps({ + 'spec': { + 'service_type': 'mon', + 'service_id': 'wrong', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime.utcnow().strftime(DATEFMT), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + assert len(cephadm_module.spec_store.specs) == 1 + assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' + + cephadm_module.migration_current = 1 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == 2 + + assert len(cephadm_module.spec_store.specs) == 1 + assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( + service_type='mon', + unmanaged=True, + placement=PlacementSpec(hosts=['host1']) + ) + +@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon', + json.dumps({ + 'spec': { + 'service_type': 'mon', + 'placement': { + 'count': 5, + } + }, + 'created': datetime.utcnow().strftime(DATEFMT), + }, sort_keys=True), + ) + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong', + json.dumps({ + 'spec': { + 'service_type': 'mon', + 'service_id': 'wrong', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime.utcnow().strftime(DATEFMT), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + assert len(cephadm_module.spec_store.specs) == 2 + assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon' + assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon' + + cephadm_module.migration_current = 1 + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == 2 + + assert len(cephadm_module.spec_store.specs) == 1 + assert cephadm_module.spec_store.specs['mon'] == ServiceSpec( + service_type='mon', + unmanaged=True, + placement=PlacementSpec(count=5) + ) + +@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]')) +def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds', + json.dumps({ + 'spec': { + 'service_type': 'mds', + 'placement': { + 'hosts': ['host1'] + } + }, + 'created': datetime.utcnow().strftime(DATEFMT), + }, sort_keys=True), + ) + + cephadm_module.spec_store.load() + + # there is nothing to migrate, as the spec is gone now. + assert len(cephadm_module.spec_store.specs) == 0 +