From 2cb2abb385f97714cbe59371aa788394799db925 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 15 Jun 2021 12:34:47 -0500 Subject: [PATCH] mgr/nfs: merge 'nfs export {update,import}' -> 'nfs export apply' The only thing we lose is a strict 'update' that errors out if the export didn't already exist, and we don't have any known need for that. Fixes: https://tracker.ceph.com/issues/51265 Signed-off-by: Sage Weil --- src/pybind/mgr/nfs/export.py | 31 ++++------ src/pybind/mgr/nfs/module.py | 20 +++---- src/pybind/mgr/nfs/tests/test_nfs.py | 89 +++++++++++++++++++++++++++- 3 files changed, 105 insertions(+), 35 deletions(-) diff --git a/src/pybind/mgr/nfs/export.py b/src/pybind/mgr/nfs/export.py index 8fc932273aa..5b257154932 100644 --- a/src/pybind/mgr/nfs/export.py +++ b/src/pybind/mgr/nfs/export.py @@ -8,7 +8,7 @@ from os.path import normpath from rados import TimedOut, ObjectNotFound from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL -from .exception import NFSException, NFSInvalidOperation, NFSObjectNotFound, FSNotFound, \ +from .exception import NFSException, NFSInvalidOperation, FSNotFound, \ ClusterNotFound from .utils import POOL_NAME, available_clusters, check_fs, restart_nfs_service @@ -344,27 +344,17 @@ class ExportMgr: except Exception as e: return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}") - def update_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]: - try: - new_export = json.loads(export_config) - # check export type - return FSExport(self).update_export_1(cluster_id, new_export, False) - except NotImplementedError: - return 0, " Manual Restart of NFS PODS required for successful update of exports", "" - except Exception as e: - return exception_handler(e, f'Failed to update export: {e}') - - def import_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]: + def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]: try: if not export_config: raise NFSInvalidOperation("Empty Config!!") new_export = json.loads(export_config) # check export type - return FSExport(self).update_export_1(cluster_id, new_export, True) + return FSExport(self).update_export_1(cluster_id, new_export) except NotImplementedError: return 0, " Manual Restart of NFS PODS required for successful update of exports", "" except Exception as e: - return exception_handler(e, f'Failed to import export: {e}') + return exception_handler(e, f'Failed to update export: {e}') class FSExport(ExportMgr): @@ -492,7 +482,7 @@ class FSExport(ExportMgr): ret, out, err = self._exec(['radosgw-admin', 'user', 'create', '--uid', uid, '--display-name', uid]) if ret: - raise NFSException(f'Failed to create user {uid}') + raise NFSException(-1, f'Failed to create user {uid}') j = json.loads(out) # FIXME: make this more tolerate of unexpected output? access_key = j['keys'][0]['access_key'] @@ -535,7 +525,6 @@ class FSExport(ExportMgr): self, cluster_id: str, new_export: Dict, - can_create: bool ) -> Tuple[int, str, str]: for k in ['cluster_id', 'path', 'pseudo']: if k not in new_export: @@ -550,8 +539,11 @@ class FSExport(ExportMgr): new_export['pseudo']) if old_export: # Check if export id matches - if old_export.export_id != new_export.get('export_id'): - raise NFSInvalidOperation('Export ID changed, Cannot update export') + if new_export.get('export_id'): + if old_export.export_id != new_export.get('export_id'): + raise NFSInvalidOperation('Export ID changed, Cannot update export') + else: + new_export['export_id'] = old_export.export_id elif new_export.get('export_id'): old_export = self._fetch_export_obj(cluster_id, new_export['export_id']) if old_export: @@ -559,9 +551,6 @@ class FSExport(ExportMgr): old_export = self._fetch_export(cluster_id, old_export.pseudo) self.mgr.log.debug(f"export {old_export.export_id} pseudo {old_export.pseudo} -> {new_export_dict['pseudo']}") - if not old_export and not can_create: - raise NFSObjectNotFound('Export does not exist') - new_export = Export.from_dict( new_export.get('export_id', self._gen_export_id(cluster_id)), new_export diff --git a/src/pybind/mgr/nfs/module.py b/src/pybind/mgr/nfs/module.py index 4a84eda3670..aedabc05e88 100644 --- a/src/pybind/mgr/nfs/module.py +++ b/src/pybind/mgr/nfs/module.py @@ -58,13 +58,6 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): read_only=readonly, squash='none', addr=addr) - @CLICommand('nfs export import', perm='rw') - def _cmd_nfs_export_import(self, - cluster_id: str, - inbuf: str) -> Tuple[int, str, str]: - """Create one or more exports from JSON specification""" - return self.export_mgr.import_export(cluster_id, inbuf) - @CLICommand('nfs export rm', perm='rw') def _cmd_nfs_export_rm(self, cluster_id: str, binding: str) -> Tuple[int, str, str]: """Remove a cephfs export""" @@ -85,12 +78,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): """Fetch a export of a NFS cluster given the pseudo path/binding""" return self.export_mgr.get_export(cluster_id=cluster_id, pseudo_path=binding) - @CLICommand('nfs export update', perm='rw') - @CLICheckNonemptyFileInput(desc='CephFS Export configuration') - def _cmd_nfs_export_update(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]: - """Update an export of a NFS cluster by `-i `""" - # The export is passed to -i and it's processing is handled by the Ceph CLI. - return self.export_mgr.update_export(cluster_id, export_config=inbuf) + @CLICommand('nfs export apply', perm='rw') + @CLICheckNonemptyFileInput(desc='Export JSON specification') + def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]: + """Create or update an export by `-i `""" + # The export is passed to -i and it's processing + # is handled by the Ceph CLI. + return self.export_mgr.apply_export(cluster_id, export_config=inbuf) @CLICommand('nfs cluster create', perm='rw') def _cmd_nfs_cluster_create(self, diff --git a/src/pybind/mgr/nfs/tests/test_nfs.py b/src/pybind/mgr/nfs/tests/test_nfs.py index c826a4c0364..fe1776f52a0 100644 --- a/src/pybind/mgr/nfs/tests/test_nfs.py +++ b/src/pybind/mgr/nfs/tests/test_nfs.py @@ -610,7 +610,7 @@ NFS_CORE_PARAM { def _do_test_update_export(self, cluster_id, expected_exports): nfs_mod = Module('nfs', '', '') conf = ExportMgr(nfs_mod) - r = conf.update_export(cluster_id, json.dumps({ + r = conf.apply_export(cluster_id, json.dumps({ 'export_id': 2, 'daemons': expected_exports[2], 'path': 'bucket', @@ -653,6 +653,93 @@ NFS_CORE_PARAM { assert export.clients[0].access_type is None assert export.cluster_id == cluster_id + # do it again, with changes + r = conf.apply_export(cluster_id, json.dumps({ + 'export_id': 2, + 'daemons': expected_exports[2], + 'path': 'newbucket', + 'pseudo': '/rgw/bucket', + 'cluster_id': cluster_id, + 'tag': 'bucket_tag', + 'access_type': 'RO', + 'squash': 'root', + 'security_label': False, + 'protocols': [4], + 'transports': ['TCP'], + 'clients': [{ + 'addresses': ["192.168.10.0/16"], + 'access_type': None, + 'squash': None + }], + 'fsal': { + 'name': 'RGW', + 'user_id': 'testuser', + 'access_key_id': 'access_key', + 'secret_access_key': 'secret_key', + } + })) + assert r[0] == 0 + + export = conf._fetch_export('foo', '/rgw/bucket') + assert export.export_id == 2 + assert export.path == "newbucket" + assert export.pseudo == "/rgw/bucket" + assert export.access_type == "RO" + assert export.squash == "root" + assert export.protocols == [4] + assert export.transports == ["TCP"] + assert export.fsal.name == "RGW" + assert export.fsal.user_id == "testuser" + assert export.fsal.access_key_id == "access_key" + assert export.fsal.secret_access_key == "secret_key" + assert len(export.clients) == 1 + assert export.clients[0].squash is None + assert export.clients[0].access_type is None + assert export.cluster_id == cluster_id + + # again, but without export_id + r = conf.apply_export(cluster_id, json.dumps({ + 'daemons': expected_exports[2], + 'path': 'newestbucket', + 'pseudo': '/rgw/bucket', + 'cluster_id': cluster_id, + 'tag': 'bucket_tag', + 'access_type': 'RW', + 'squash': 'root', + 'security_label': False, + 'protocols': [4], + 'transports': ['TCP'], + 'clients': [{ + 'addresses': ["192.168.10.0/16"], + 'access_type': None, + 'squash': None + }], + 'fsal': { + 'name': 'RGW', + 'user_id': 'testuser', + 'access_key_id': 'access_key', + 'secret_access_key': 'secret_key', + } + })) + assert r[0] == 0 + + export = conf._fetch_export('foo', '/rgw/bucket') + assert export.export_id == 2 + assert export.path == "newestbucket" + assert export.pseudo == "/rgw/bucket" + assert export.access_type == "RW" + assert export.squash == "root" + assert export.protocols == [4] + assert export.transports == ["TCP"] + assert export.fsal.name == "RGW" + assert export.fsal.user_id == "testuser" + assert export.fsal.access_key_id == "access_key" + assert export.fsal.secret_access_key == "secret_key" + assert len(export.clients) == 1 + assert export.clients[0].squash is None + assert export.clients[0].access_type is None + assert export.cluster_id == cluster_id + def test_remove_export(self) -> None: with self._mock_orchestrator(True), mock.patch('nfs.module.ExportMgr._exec'): for cluster_id, info in self.clusters.items(): -- 2.39.5