From: Sage Weil Date: Mon, 12 Jul 2021 18:57:10 +0000 (-0400) Subject: mgr/nfs: migrate pre-pacific nfs.ganesha-foo clusters to nfs.foo X-Git-Tag: v17.1.0~1390^2~5 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=bd85c678a574a61d249f78034cf5f83a4668ae7d;p=ceph.git mgr/nfs: migrate pre-pacific nfs.ganesha-foo clusters to nfs.foo In octopus, the service is nfs.ganesha-$id instead of nfs.$id Signed-off-by: Sage Weil --- diff --git a/src/pybind/mgr/cephadm/migrations.py b/src/pybind/mgr/cephadm/migrations.py index f5c0e007b2ae..e2bccf040a0e 100644 --- a/src/pybind/mgr/cephadm/migrations.py +++ b/src/pybind/mgr/cephadm/migrations.py @@ -181,6 +181,11 @@ class Migrations: return True def migrate_nfs_spec(self, service_id: str, pool: str, ns: Optional[str]) -> None: + renamed = False + if service_id.startswith('ganesha-'): + service_id = service_id[8:] + renamed = True + self.mgr.log.info( f'Migrating nfs.{service_id} from legacy pool {pool} namespace {ns}' ) @@ -201,6 +206,25 @@ class Migrations: break self.mgr.log.info(f'Found {len(exports)} exports for legacy nfs.{service_id}') + if renamed: + # rename from nfs.ganesha-* to nfs.*. This will destroy old daemons and + # deploy new ones. + self.mgr.log.info(f'Replacing nfs.ganesha-{service_id} with nfs.{service_id}') + spec = self.mgr.spec_store[f'nfs.ganesha-{service_id}'].spec + self.mgr.spec_store.rm(f'nfs.ganesha-{service_id}') + spec.service_id = service_id + self.mgr.spec_store.save(spec, True) + else: + # redeploy all ganesha daemons to ensures that the daemon + # cephx are correct AND container configs are set up properly + daemons = [d.name() for d in self.mgr.cache.get_daemons_by_service(f'nfs.{service_id}')] + self.mgr.log.info(f'Removing old nfs.{service_id} daemons {daemons}') + self.mgr.remove_daemons(daemons) + + # re-save service spec (without pool and namespace properties!) + spec = self.mgr.spec_store[f'nfs.{service_id}'].spec + self.mgr.spec_store.save(spec) + # import exports for export in exports: ex = '' @@ -219,15 +243,7 @@ class Migrations: if ret: self.mgr.log.warning(f'Failed to migrate export ({ret}): {err}\nExport was:\n{ex}') - # redeploy all ganesha daemons to ensures that the daemon - # cephx are correct AND container configs are set up properly - daemons = [d.name() for d in self.mgr.cache.get_daemons_by_service(f'nfs.{service_id}')] - self.mgr.log.info(f'Removing old daemons {daemons}') - self.mgr.remove_daemons(daemons) - # re-save service spec (without pool and namespace properties!) - spec = self.mgr.spec_store[f'nfs.{service_id}'].spec - self.mgr.spec_store.save(spec) def queue_migrate_nfs_spec(mgr: "CephadmOrchestrator", spec_dict: Dict[Any, Any]) -> None: diff --git a/src/pybind/mgr/cephadm/tests/test_migration.py b/src/pybind/mgr/cephadm/tests/test_migration.py index d69628ea9067..fa90ae70c081 100644 --- a/src/pybind/mgr/cephadm/tests/test_migration.py +++ b/src/pybind/mgr/cephadm/tests/test_migration.py @@ -182,3 +182,36 @@ def test_migrate_nfs_initial(cephadm_module: CephadmOrchestrator): cephadm_module.migration.migrate() assert cephadm_module.migration_current == 3 + + +@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]')) +def test_migrate_nfs_initial_octopus(cephadm_module: CephadmOrchestrator): + with with_host(cephadm_module, 'host1'): + cephadm_module.set_store( + SPEC_STORE_PREFIX + 'mds', + json.dumps({ + 'spec': { + 'service_type': 'nfs', + 'service_id': 'ganesha-foo', + 'placement': { + 'hosts': ['host1'] + }, + 'spec': { + 'pool': 'mypool', + 'namespace': 'foons', + }, + }, + 'created': datetime_to_str(datetime_now()), + }, sort_keys=True), + ) + cephadm_module.migration_current = 1 + cephadm_module.spec_store.load() + + ls = json.loads(cephadm_module.get_store('nfs_migration_queue')) + assert ls == [['ganesha-foo', 'mypool', 'foons']] + + cephadm_module.migration.migrate(True) + assert cephadm_module.migration_current == 2 + + cephadm_module.migration.migrate() + assert cephadm_module.migration_current == 3