]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/nfs: migrate pre-pacific nfs.ganesha-foo clusters to nfs.foo
authorSage Weil <sage@newdream.net>
Mon, 12 Jul 2021 18:57:10 +0000 (14:57 -0400)
committerSage Weil <sage@newdream.net>
Wed, 14 Jul 2021 20:20:11 +0000 (16:20 -0400)
In octopus, the service is nfs.ganesha-$id instead of nfs.$id

Signed-off-by: Sage Weil <sage@newdream.net>
src/pybind/mgr/cephadm/migrations.py
src/pybind/mgr/cephadm/tests/test_migration.py

index f5c0e007b2ae738dc7a4ef925fd4e58e3033e570..e2bccf040a0ede455bb9a70e912667040d94534d 100644 (file)
@@ -181,6 +181,11 @@ class Migrations:
         return True
 
     def migrate_nfs_spec(self, service_id: str, pool: str, ns: Optional[str]) -> None:
+        renamed = False
+        if service_id.startswith('ganesha-'):
+            service_id = service_id[8:]
+            renamed = True
+
         self.mgr.log.info(
             f'Migrating nfs.{service_id} from legacy pool {pool} namespace {ns}'
         )
@@ -201,6 +206,25 @@ class Migrations:
                 break
         self.mgr.log.info(f'Found {len(exports)} exports for legacy nfs.{service_id}')
 
+        if renamed:
+            # rename from nfs.ganesha-* to nfs.*.  This will destroy old daemons and
+            # deploy new ones.
+            self.mgr.log.info(f'Replacing nfs.ganesha-{service_id} with nfs.{service_id}')
+            spec = self.mgr.spec_store[f'nfs.ganesha-{service_id}'].spec
+            self.mgr.spec_store.rm(f'nfs.ganesha-{service_id}')
+            spec.service_id = service_id
+            self.mgr.spec_store.save(spec, True)
+        else:
+            # redeploy all ganesha daemons to ensures that the daemon
+            # cephx are correct AND container configs are set up properly
+            daemons = [d.name() for d in self.mgr.cache.get_daemons_by_service(f'nfs.{service_id}')]
+            self.mgr.log.info(f'Removing old nfs.{service_id} daemons {daemons}')
+            self.mgr.remove_daemons(daemons)
+
+            # re-save service spec (without pool and namespace properties!)
+            spec = self.mgr.spec_store[f'nfs.{service_id}'].spec
+            self.mgr.spec_store.save(spec)
+
         # import exports
         for export in exports:
             ex = ''
@@ -219,15 +243,7 @@ class Migrations:
             if ret:
                 self.mgr.log.warning(f'Failed to migrate export ({ret}): {err}\nExport was:\n{ex}')
 
-        # redeploy all ganesha daemons to ensures that the daemon
-        # cephx are correct AND container configs are set up properly
-        daemons = [d.name() for d in self.mgr.cache.get_daemons_by_service(f'nfs.{service_id}')]
-        self.mgr.log.info(f'Removing old daemons {daemons}')
-        self.mgr.remove_daemons(daemons)
 
-        # re-save service spec (without pool and namespace properties!)
-        spec = self.mgr.spec_store[f'nfs.{service_id}'].spec
-        self.mgr.spec_store.save(spec)
 
 
 def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None:
index d69628ea9067a6d4790760279038604b7af521ef..fa90ae70c08179df10c6c7b5d6235d73ef1499f2 100644 (file)
@@ -182,3 +182,36 @@ def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator):
 
         cephadm_module.migration.migrate()
         assert cephadm_module.migration_current == 3
+
+
+@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator):
+    with with_host(cephadm_module, 'host1'):
+        cephadm_module.set_store(
+            SPEC_STORE_PREFIX + 'mds',
+            json.dumps({
+                'spec': {
+                    'service_type': 'nfs',
+                    'service_id': 'ganesha-foo',
+                    'placement': {
+                        'hosts': ['host1']
+                    },
+                    'spec': {
+                        'pool': 'mypool',
+                        'namespace': 'foons',
+                    },
+                },
+                'created': datetime_to_str(datetime_now()),
+            }, sort_keys=True),
+        )
+        cephadm_module.migration_current = 1
+        cephadm_module.spec_store.load()
+
+        ls = json.loads(cephadm_module.get_store('nfs_migration_queue'))
+        assert ls == [['ganesha-foo', 'mypool', 'foons']]
+
+        cephadm_module.migration.migrate(True)
+        assert cephadm_module.migration_current == 2
+
+        cephadm_module.migration.migrate()
+        assert cephadm_module.migration_current == 3