]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: Add migration to keep the service names consistent 36284/head
authorSebastian Wagner <sebastian.wagner@suse.com>
Fri, 24 Jul 2020 14:20:22 +0000 (16:20 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Thu, 30 Jul 2020 12:31:39 +0000 (14:31 +0200)
After 15.2.4, we unified some service IDs: MONs, MGRs etc no longer have a service id.
Which means, the service names changed:

mon.foo -> mon
mgr.foo -> mgr

This fixes the data structure consistency

Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/migrations.py
src/pybind/mgr/cephadm/tests/test_migration.py

index 0c964c7d4bc20809b89a50751fa7b418e457c3b8..55468d96a23538e89ef3395b07b0b9344d1cf4df 100644 (file)
@@ -9,7 +9,7 @@ from orchestrator import OrchestratorError
 if TYPE_CHECKING:
     from .module import CephadmOrchestrator
 
-LAST_MIGRATION = 1
+LAST_MIGRATION = 2
 
 logger = logging.getLogger(__name__)
 
@@ -53,6 +53,10 @@ class Migrations:
             if self.migrate_0_1():
                 self.set(1)
 
+        if self.mgr.migration_current == 1:
+            if self.migrate_1_2():
+                self.set(2)
+
     def migrate_0_1(self) -> bool:
         """
         Migration 0 -> 1
@@ -126,3 +130,29 @@ class Migrations:
             convert_to_explicit(spec)
 
         return True
+
+    def migrate_1_2(self) -> bool:
+        """
+        After 15.2.4, we unified some service IDs: MONs, MGRs etc no longer have a service id.
+        Which means, the service names changed:
+
+        mon.foo -> mon
+        mgr.foo -> mgr
+
+        This fixes the data structure consistency
+        """
+        bad_specs = {}
+        for name, spec in self.mgr.spec_store.specs.items():
+            if name != spec.service_name():
+                bad_specs[name] = (spec.service_name(), spec)
+
+        for old, (new, old_spec) in bad_specs.items():
+            if new not in self.mgr.spec_store.specs:
+                spec = old_spec
+            else:
+                spec = self.mgr.spec_store.specs[new]
+            spec.unmanaged = True
+            self.mgr.spec_store.save(spec)
+            self.mgr.spec_store.rm(old)
+
+        return True
index 5b78987c7287859ff9806d43bdac7f253056aab3..5c917071064a2dba7268d42cbcf51b6bf0ba3599 100644 (file)
@@ -1,7 +1,9 @@
+import json
 from datetime import datetime
 
 from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
 from cephadm import CephadmOrchestrator
+from cephadm.inventory import SPEC_STORE_PREFIX, DATEFMT
 from cephadm.tests.fixtures import _run_cephadm, cephadm_module, wait, with_host
 from tests import mock
 
@@ -38,8 +40,105 @@ def test_migrate_scheduler(cephadm_module: CephadmOrchestrator):
             cephadm_module.cache.last_daemon_update['host2'] = datetime.now()
 
             cephadm_module.migration.migrate()
-            assert cephadm_module.migration_current == 1
+            assert cephadm_module.migration_current == 2
 
             out = [o.spec.placement for o in wait(cephadm_module, cephadm_module.describe_service())]
             assert out == [PlacementSpec(count=2, hosts=[HostPlacementSpec(hostname='host1', network='', name=''), HostPlacementSpec(hostname='host2', network='', name='')])]
 
+
+@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
+def test_migrate_service_id_mon_one(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong',
+            json.dumps({
+                'spec': {
+                    'service_type': 'mon',
+                    'service_id': 'wrong',
+                    'placement': {
+                        'hosts': ['host1']
+                    }
+                },
+                'created': datetime.utcnow().strftime(DATEFMT),
+            }, sort_keys=True),
+        )
+
+        cephadm_module.spec_store.load()
+
+        assert len(cephadm_module.spec_store.specs) == 1
+        assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
+
+        cephadm_module.migration_current = 1
+        cephadm_module.migration.migrate()
+        assert cephadm_module.migration_current == 2
+
+        assert len(cephadm_module.spec_store.specs) == 1
+        assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
+            service_type='mon',
+            unmanaged=True,
+            placement=PlacementSpec(hosts=['host1'])
+        )
+
+@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
+def test_migrate_service_id_mon_two(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon',
+            json.dumps({
+                'spec': {
+                    'service_type': 'mon',
+                    'placement': {
+                        'count': 5,
+                    }
+                },
+                'created': datetime.utcnow().strftime(DATEFMT),
+            }, sort_keys=True),
+        )
+        cephadm_module.set_store(SPEC_STORE_PREFIX + 'mon.wrong',
+            json.dumps({
+                'spec': {
+                    'service_type': 'mon',
+                    'service_id': 'wrong',
+                    'placement': {
+                        'hosts': ['host1']
+                    }
+                },
+                'created': datetime.utcnow().strftime(DATEFMT),
+            }, sort_keys=True),
+        )
+
+        cephadm_module.spec_store.load()
+
+        assert len(cephadm_module.spec_store.specs) == 2
+        assert cephadm_module.spec_store.specs['mon.wrong'].service_name() == 'mon'
+        assert cephadm_module.spec_store.specs['mon'].service_name() == 'mon'
+
+        cephadm_module.migration_current = 1
+        cephadm_module.migration.migrate()
+        assert cephadm_module.migration_current == 2
+
+        assert len(cephadm_module.spec_store.specs) == 1
+        assert cephadm_module.spec_store.specs['mon'] == ServiceSpec(
+            service_type='mon',
+            unmanaged=True,
+            placement=PlacementSpec(count=5)
+        )
+
+@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('[]'))
+def test_migrate_service_id_mds_one(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(SPEC_STORE_PREFIX + 'mds',
+            json.dumps({
+                'spec': {
+                    'service_type': 'mds',
+                    'placement': {
+                        'hosts': ['host1']
+                    }
+                },
+                'created': datetime.utcnow().strftime(DATEFMT),
+            }, sort_keys=True),
+        )
+
+        cephadm_module.spec_store.load()
+
+        # there is nothing to migrate, as the spec is gone now.
+        assert len(cephadm_module.spec_store.specs) == 0
+