]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/nfs: merge 'nfs export {update,import}' -> 'nfs export apply'
authorSage Weil <sage@newdream.net>
Tue, 15 Jun 2021 17:34:47 +0000 (12:34 -0500)
committerSage Weil <sage@newdream.net>
Mon, 21 Jun 2021 18:13:15 +0000 (14:13 -0400)
The only thing we lose is a strict 'update' that errors out if the
export didn't already exist, and we don't have any known need for that.

Fixes: https://tracker.ceph.com/issues/51265
Signed-off-by: Sage Weil <sage@newdream.net>
src/pybind/mgr/nfs/export.py
src/pybind/mgr/nfs/module.py
src/pybind/mgr/nfs/tests/test_nfs.py

index 8fc932273aaaa032dd7aa3baba573243253de7f4..5b257154932cff212aa947f4bc92e51f1705e3ec 100644 (file)
@@ -8,7 +8,7 @@ from os.path import normpath
 from rados import TimedOut, ObjectNotFound
 
 from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL
-from .exception import NFSException, NFSInvalidOperation, NFSObjectNotFound, FSNotFound, \
+from .exception import NFSException, NFSInvalidOperation, FSNotFound, \
     ClusterNotFound
 from .utils import POOL_NAME, available_clusters, check_fs, restart_nfs_service
 
@@ -344,27 +344,17 @@ class ExportMgr:
         except Exception as e:
             return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}")
 
-    def update_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
-        try:
-            new_export = json.loads(export_config)
-            # check export type
-            return FSExport(self).update_export_1(cluster_id, new_export, False)
-        except NotImplementedError:
-            return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
-        except Exception as e:
-            return exception_handler(e, f'Failed to update export: {e}')
-
-    def import_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
+    def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
         try:
             if not export_config:
                 raise NFSInvalidOperation("Empty Config!!")
             new_export = json.loads(export_config)
             # check export type
-            return FSExport(self).update_export_1(cluster_id, new_export, True)
+            return FSExport(self).update_export_1(cluster_id, new_export)
         except NotImplementedError:
             return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
         except Exception as e:
-            return exception_handler(e, f'Failed to import export: {e}')
+            return exception_handler(e, f'Failed to update export: {e}')
 
 
 class FSExport(ExportMgr):
@@ -492,7 +482,7 @@ class FSExport(ExportMgr):
                 ret, out, err = self._exec(['radosgw-admin', 'user', 'create', '--uid', uid,
                                             '--display-name', uid])
                 if ret:
-                    raise NFSException(f'Failed to create user {uid}')
+                    raise NFSException(-1, f'Failed to create user {uid}')
             j = json.loads(out)
             # FIXME: make this more tolerate of unexpected output?
             access_key = j['keys'][0]['access_key']
@@ -535,7 +525,6 @@ class FSExport(ExportMgr):
             self,
             cluster_id: str,
             new_export: Dict,
-            can_create: bool
     ) -> Tuple[int, str, str]:
         for k in ['cluster_id', 'path', 'pseudo']:
             if k not in new_export:
@@ -550,8 +539,11 @@ class FSExport(ExportMgr):
                                         new_export['pseudo'])
         if old_export:
             # Check if export id matches
-            if old_export.export_id != new_export.get('export_id'):
-                raise NFSInvalidOperation('Export ID changed, Cannot update export')
+            if new_export.get('export_id'):
+                if old_export.export_id != new_export.get('export_id'):
+                    raise NFSInvalidOperation('Export ID changed, Cannot update export')
+            else:
+                new_export['export_id'] = old_export.export_id
         elif new_export.get('export_id'):
             old_export = self._fetch_export_obj(cluster_id, new_export['export_id'])
             if old_export:
@@ -559,9 +551,6 @@ class FSExport(ExportMgr):
                 old_export = self._fetch_export(cluster_id, old_export.pseudo)
                 self.mgr.log.debug(f"export {old_export.export_id} pseudo {old_export.pseudo} -> {new_export_dict['pseudo']}")
 
-        if not old_export and not can_create:
-            raise NFSObjectNotFound('Export does not exist')
-
         new_export = Export.from_dict(
             new_export.get('export_id', self._gen_export_id(cluster_id)),
             new_export
index 4a84eda3670c79752735a0e87a292e45dcea3111..aedabc05e88dfb57f21823d815606bf484fb030b 100644 (file)
@@ -58,13 +58,6 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
                                              read_only=readonly, squash='none',
                                              addr=addr)
 
-    @CLICommand('nfs export import', perm='rw')
-    def _cmd_nfs_export_import(self,
-                               cluster_id: str,
-                               inbuf: str) -> Tuple[int, str, str]:
-        """Create one or more exports from JSON specification"""
-        return self.export_mgr.import_export(cluster_id, inbuf)
-
     @CLICommand('nfs export rm', perm='rw')
     def _cmd_nfs_export_rm(self, cluster_id: str, binding: str) -> Tuple[int, str, str]:
         """Remove a cephfs export"""
@@ -85,12 +78,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
         """Fetch a export of a NFS cluster given the pseudo path/binding"""
         return self.export_mgr.get_export(cluster_id=cluster_id, pseudo_path=binding)
 
-    @CLICommand('nfs export update', perm='rw')
-    @CLICheckNonemptyFileInput(desc='CephFS Export configuration')
-    def _cmd_nfs_export_update(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]:
-        """Update an export of a NFS cluster by `-i <json_file>`"""
-        # The export <json_file> is passed to -i and it's processing is handled by the Ceph CLI.
-        return self.export_mgr.update_export(cluster_id, export_config=inbuf)
+    @CLICommand('nfs export apply', perm='rw')
+    @CLICheckNonemptyFileInput(desc='Export JSON specification')
+    def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]:
+        """Create or update an export by `-i <json_file>`"""
+        # The export <json_file> is passed to -i and it's processing
+        # is handled by the Ceph CLI.
+        return self.export_mgr.apply_export(cluster_id, export_config=inbuf)
 
     @CLICommand('nfs cluster create', perm='rw')
     def _cmd_nfs_cluster_create(self,
index c826a4c03644b266c8d8415bc497e93c3629d5aa..fe1776f52a066ef05b0d9b36f80d549d2d77b544 100644 (file)
@@ -610,7 +610,7 @@ NFS_CORE_PARAM {
     def _do_test_update_export(self, cluster_id, expected_exports):
         nfs_mod = Module('nfs', '', '')
         conf = ExportMgr(nfs_mod)
-        r = conf.update_export(cluster_id, json.dumps({
+        r = conf.apply_export(cluster_id, json.dumps({
             'export_id': 2,
             'daemons': expected_exports[2],
             'path': 'bucket',
@@ -653,6 +653,93 @@ NFS_CORE_PARAM {
         assert export.clients[0].access_type is None
         assert export.cluster_id == cluster_id
 
+        # do it again, with changes
+        r = conf.apply_export(cluster_id, json.dumps({
+            'export_id': 2,
+            'daemons': expected_exports[2],
+            'path': 'newbucket',
+            'pseudo': '/rgw/bucket',
+            'cluster_id': cluster_id,
+            'tag': 'bucket_tag',
+            'access_type': 'RO',
+            'squash': 'root',
+            'security_label': False,
+            'protocols': [4],
+            'transports': ['TCP'],
+            'clients': [{
+                'addresses': ["192.168.10.0/16"],
+                'access_type': None,
+                'squash': None
+            }],
+            'fsal': {
+                'name': 'RGW',
+                'user_id': 'testuser',
+                'access_key_id': 'access_key',
+                'secret_access_key': 'secret_key',
+            }
+        }))
+        assert r[0] == 0
+
+        export = conf._fetch_export('foo', '/rgw/bucket')
+        assert export.export_id == 2
+        assert export.path == "newbucket"
+        assert export.pseudo == "/rgw/bucket"
+        assert export.access_type == "RO"
+        assert export.squash == "root"
+        assert export.protocols == [4]
+        assert export.transports == ["TCP"]
+        assert export.fsal.name == "RGW"
+        assert export.fsal.user_id == "testuser"
+        assert export.fsal.access_key_id == "access_key"
+        assert export.fsal.secret_access_key == "secret_key"
+        assert len(export.clients) == 1
+        assert export.clients[0].squash is None
+        assert export.clients[0].access_type is None
+        assert export.cluster_id == cluster_id
+
+        # again, but without export_id
+        r = conf.apply_export(cluster_id, json.dumps({
+            'daemons': expected_exports[2],
+            'path': 'newestbucket',
+            'pseudo': '/rgw/bucket',
+            'cluster_id': cluster_id,
+            'tag': 'bucket_tag',
+            'access_type': 'RW',
+            'squash': 'root',
+            'security_label': False,
+            'protocols': [4],
+            'transports': ['TCP'],
+            'clients': [{
+                'addresses': ["192.168.10.0/16"],
+                'access_type': None,
+                'squash': None
+            }],
+            'fsal': {
+                'name': 'RGW',
+                'user_id': 'testuser',
+                'access_key_id': 'access_key',
+                'secret_access_key': 'secret_key',
+            }
+        }))
+        assert r[0] == 0
+
+        export = conf._fetch_export('foo', '/rgw/bucket')
+        assert export.export_id == 2
+        assert export.path == "newestbucket"
+        assert export.pseudo == "/rgw/bucket"
+        assert export.access_type == "RW"
+        assert export.squash == "root"
+        assert export.protocols == [4]
+        assert export.transports == ["TCP"]
+        assert export.fsal.name == "RGW"
+        assert export.fsal.user_id == "testuser"
+        assert export.fsal.access_key_id == "access_key"
+        assert export.fsal.secret_access_key == "secret_key"
+        assert len(export.clients) == 1
+        assert export.clients[0].squash is None
+        assert export.clients[0].access_type is None
+        assert export.cluster_id == cluster_id
+
     def test_remove_export(self) -> None:
         with self._mock_orchestrator(True), mock.patch('nfs.module.ExportMgr._exec'):
             for cluster_id, info in self.clusters.items():