]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/dashboard: NFS 'create export' form: fixes 43682/head
authorAlfonso Martínez <almartin@redhat.com>
Thu, 4 Nov 2021 13:56:37 +0000 (14:56 +0100)
committerAlfonso Martínez <almartin@redhat.com>
Thu, 4 Nov 2021 16:33:21 +0000 (17:33 +0100)
* Do not allow a pseudo that is already in use by another export.
* Create mode form: prefill dropdown selectors if options > 0.
* Edit mode form: do not reset the field values that depend on other values that are being edited (unlike Create mode).
* Fix broken link: cluster service.
* Fix error message style for non-existent cephfs path.
* nfs-service.ts: lsDir: thow error if volume is not provided.
* File renaming: nfsganesha.py => nfs.py; test_ganesha.py => test_nfs.py

Fixes: https://tracker.ceph.com/issues/53083
Signed-off-by: Alfonso Martínez <almartin@redhat.com>
(cherry picked from commit d817a24e345516229bc637e2c675d12e6bfcc456)

 Conflicts:
src/pybind/mgr/dashboard/tests/test_ganesha.py
    - Delete file: in fact it's renamed as test_nfs.py

12 files changed:
src/pybind/mgr/dashboard/controllers/nfs.py [new file with mode: 0644]
src/pybind/mgr/dashboard/controllers/nfsganesha.py [deleted file]
src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.html
src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.spec.ts
src/pybind/mgr/dashboard/frontend/src/app/ceph/nfs/nfs-form/nfs-form.component.ts
src/pybind/mgr/dashboard/frontend/src/app/shared/api/nfs.service.spec.ts
src/pybind/mgr/dashboard/frontend/src/app/shared/api/nfs.service.ts
src/pybind/mgr/dashboard/plugins/feature_toggles.py
src/pybind/mgr/dashboard/tests/test_auth.py
src/pybind/mgr/dashboard/tests/test_ganesha.py [deleted file]
src/pybind/mgr/dashboard/tests/test_nfs.py [new file with mode: 0644]
src/pybind/mgr/nfs/export.py

diff --git a/src/pybind/mgr/dashboard/controllers/nfs.py b/src/pybind/mgr/dashboard/controllers/nfs.py
new file mode 100644 (file)
index 0000000..9985ba1
--- /dev/null
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import json
+import logging
+import os
+from functools import partial
+from typing import Any, Dict, List, Optional
+
+import cephfs
+from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
+
+from .. import mgr
+from ..security import Scope
+from ..services.cephfs import CephFS
+from ..services.exception import DashboardException, serialize_dashboard_exception
+from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
+    ReadPermission, RESTController, Task, UIRouter
+from ._version import APIVersion
+
+logger = logging.getLogger('controllers.nfs')
+
+
+class NFSException(DashboardException):
+    def __init__(self, msg):
+        super(NFSException, self).__init__(component="nfs", msg=msg)
+
+
+# documentation helpers
+EXPORT_SCHEMA = {
+    'export_id': (int, 'Export ID'),
+    'path': (str, 'Export path'),
+    'cluster_id': (str, 'Cluster identifier'),
+    'pseudo': (str, 'Pseudo FS path'),
+    'access_type': (str, 'Export access type'),
+    'squash': (str, 'Export squash policy'),
+    'security_label': (str, 'Security label'),
+    'protocols': ([int], 'List of protocol types'),
+    'transports': ([str], 'List of transport types'),
+    'fsal': ({
+        'name': (str, 'name of FSAL'),
+        'fs_name': (str, 'CephFS filesystem name', True),
+        'sec_label_xattr': (str, 'Name of xattr for security label', True),
+        'user_id': (str, 'User id', True)
+    }, 'FSAL configuration'),
+    'clients': ([{
+        'addresses': ([str], 'list of IP addresses'),
+        'access_type': (str, 'Client access type'),
+        'squash': (str, 'Client squash policy')
+    }], 'List of client configurations'),
+}
+
+
+CREATE_EXPORT_SCHEMA = {
+    'path': (str, 'Export path'),
+    'cluster_id': (str, 'Cluster identifier'),
+    'pseudo': (str, 'Pseudo FS path'),
+    'access_type': (str, 'Export access type'),
+    'squash': (str, 'Export squash policy'),
+    'security_label': (str, 'Security label'),
+    'protocols': ([int], 'List of protocol types'),
+    'transports': ([str], 'List of transport types'),
+    'fsal': ({
+        'name': (str, 'name of FSAL'),
+        'fs_name': (str, 'CephFS filesystem name', True),
+        'sec_label_xattr': (str, 'Name of xattr for security label', True)
+    }, 'FSAL configuration'),
+    'clients': ([{
+        'addresses': ([str], 'list of IP addresses'),
+        'access_type': (str, 'Client access type'),
+        'squash': (str, 'Client squash policy')
+    }], 'List of client configurations')
+}
+
+
+# pylint: disable=not-callable
+def NfsTask(name, metadata, wait_for):  # noqa: N802
+    def composed_decorator(func):
+        return Task("nfs/{}".format(name), metadata, wait_for,
+                    partial(serialize_dashboard_exception,
+                            include_http_status=True))(func)
+    return composed_decorator
+
+
+@APIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
+@APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
+class NFSGanesha(RESTController):
+
+    @EndpointDoc("Status of NFS-Ganesha management feature",
+                 responses={200: {
+                     'available': (bool, "Is API available?"),
+                     'message': (str, "Error message")
+                 }})
+    @Endpoint()
+    @ReadPermission
+    def status(self):
+        status = {'available': True, 'message': None}
+        try:
+            mgr.remote('nfs', 'cluster_ls')
+        except ImportError as error:
+            logger.exception(error)
+            status['available'] = False
+            status['message'] = str(error)  # type: ignore
+
+        return status
+
+
+@APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
+@APIDoc(group="NFS-Ganesha")
+class NFSGaneshaCluster(RESTController):
+    @ReadPermission
+    @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
+    def list(self):
+        return mgr.remote('nfs', 'cluster_ls')
+
+
+@APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
+@APIDoc(group="NFS-Ganesha")
+class NFSGaneshaExports(RESTController):
+    RESOURCE_ID = "cluster_id/export_id"
+
+    @staticmethod
+    def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
+        """
+        Method that avoids returning export info not exposed in the export schema
+        e.g., rgw user access/secret keys.
+        """
+        schema_fsal_info = {}
+        for key in export['fsal'].keys():
+            if key in EXPORT_SCHEMA['fsal'][0].keys():  # type: ignore
+                schema_fsal_info[key] = export['fsal'][key]
+        export['fsal'] = schema_fsal_info
+        return export
+
+    @EndpointDoc("List all NFS-Ganesha exports",
+                 responses={200: [EXPORT_SCHEMA]})
+    def list(self) -> List[Dict[str, Any]]:
+        exports = []
+        for export in mgr.remote('nfs', 'export_ls'):
+            exports.append(self._get_schema_export(export))
+
+        return exports
+
+    @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
+                        'cluster_id': '{cluster_id}'}, 2.0)
+    @EndpointDoc("Creates a new NFS-Ganesha export",
+                 parameters=CREATE_EXPORT_SCHEMA,
+                 responses={201: EXPORT_SCHEMA})
+    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
+    def create(self, path, cluster_id, pseudo, access_type,
+               squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
+        export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+        if export_mgr.get_export_by_pseudo(cluster_id, pseudo):
+            raise DashboardException(msg=f'Pseudo {pseudo} is already in use.',
+                                     component='nfs')
+        if hasattr(fsal, 'user_id'):
+            fsal.pop('user_id')  # mgr/nfs does not let you customize user_id
+        raw_ex = {
+            'path': path,
+            'pseudo': pseudo,
+            'cluster_id': cluster_id,
+            'access_type': access_type,
+            'squash': squash,
+            'security_label': security_label,
+            'protocols': protocols,
+            'transports': transports,
+            'fsal': fsal,
+            'clients': clients
+        }
+        ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
+        if ret == 0:
+            return self._get_schema_export(
+                export_mgr.get_export_by_pseudo(cluster_id, pseudo))
+        raise NFSException(f"Export creation failed {err}")
+
+    @EndpointDoc("Get an NFS-Ganesha export",
+                 parameters={
+                     'cluster_id': (str, 'Cluster identifier'),
+                     'export_id': (str, "Export ID")
+                 },
+                 responses={200: EXPORT_SCHEMA})
+    def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
+        export_id = int(export_id)
+        export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
+        if export:
+            export = self._get_schema_export(export)
+
+        return export
+
+    @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
+             2.0)
+    @EndpointDoc("Updates an NFS-Ganesha export",
+                 parameters=dict(export_id=(int, "Export ID"),
+                                 **CREATE_EXPORT_SCHEMA),
+                 responses={200: EXPORT_SCHEMA})
+    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
+    def set(self, cluster_id, export_id, path, pseudo, access_type,
+            squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
+
+        if hasattr(fsal, 'user_id'):
+            fsal.pop('user_id')  # mgr/nfs does not let you customize user_id
+        raw_ex = {
+            'path': path,
+            'pseudo': pseudo,
+            'cluster_id': cluster_id,
+            'export_id': export_id,
+            'access_type': access_type,
+            'squash': squash,
+            'security_label': security_label,
+            'protocols': protocols,
+            'transports': transports,
+            'fsal': fsal,
+            'clients': clients
+        }
+
+        export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+        ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
+        if ret == 0:
+            return self._get_schema_export(
+                export_mgr.get_export_by_pseudo(cluster_id, pseudo))
+        raise NFSException(f"Failed to update export: {err}")
+
+    @NfsTask('delete', {'cluster_id': '{cluster_id}',
+                        'export_id': '{export_id}'}, 2.0)
+    @EndpointDoc("Deletes an NFS-Ganesha export",
+                 parameters={
+                     'cluster_id': (str, 'Cluster identifier'),
+                     'export_id': (int, "Export ID")
+                 })
+    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
+    def delete(self, cluster_id, export_id):
+        export_id = int(export_id)
+
+        export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
+        if not export:
+            raise DashboardException(
+                http_status_code=404,
+                msg=f'Export with id {export_id} not found.',
+                component='nfs')
+        mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
+
+
+@UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
+class NFSGaneshaUi(BaseController):
+    @Endpoint('GET', '/fsals')
+    @ReadPermission
+    def fsals(self):
+        return NFS_GANESHA_SUPPORTED_FSALS
+
+    @Endpoint('GET', '/lsdir')
+    @ReadPermission
+    def lsdir(self, fs_name, root_dir=None, depth=1):  # pragma: no cover
+        if root_dir is None:
+            root_dir = "/"
+        if not root_dir.startswith('/'):
+            root_dir = '/{}'.format(root_dir)
+        root_dir = os.path.normpath(root_dir)
+
+        try:
+            depth = int(depth)
+            error_msg = ''
+            if depth < 0:
+                error_msg = '`depth` must be greater or equal to 0.'
+            if depth > 5:
+                logger.warning("Limiting depth to maximum value of 5: "
+                               "input depth=%s", depth)
+                depth = 5
+        except ValueError:
+            error_msg = '`depth` must be an integer.'
+        finally:
+            if error_msg:
+                raise DashboardException(code=400,
+                                         component='nfs',
+                                         msg=error_msg)
+
+        try:
+            cfs = CephFS(fs_name)
+            paths = [root_dir]
+            paths.extend([p['path'].rstrip('/')
+                          for p in cfs.ls_dir(root_dir, depth)])
+        except (cephfs.ObjectNotFound, cephfs.PermissionError):
+            paths = []
+        return {'paths': paths}
+
+    @Endpoint('GET', '/cephfs/filesystems')
+    @ReadPermission
+    def filesystems(self):
+        return CephFS.list_filesystems()
diff --git a/src/pybind/mgr/dashboard/controllers/nfsganesha.py b/src/pybind/mgr/dashboard/controllers/nfsganesha.py
deleted file mode 100644 (file)
index 7d16b91..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import absolute_import
-
-import json
-import logging
-import os
-from functools import partial
-from typing import Any, Dict, List, Optional
-
-import cephfs
-import cherrypy
-from mgr_module import NFS_GANESHA_SUPPORTED_FSALS
-
-from .. import mgr
-from ..security import Scope
-from ..services.cephfs import CephFS
-from ..services.exception import DashboardException, serialize_dashboard_exception
-from . import APIDoc, APIRouter, BaseController, Endpoint, EndpointDoc, \
-    ReadPermission, RESTController, Task, UIRouter
-from ._version import APIVersion
-
-logger = logging.getLogger('controllers.nfs')
-
-
-class NFSException(DashboardException):
-    def __init__(self, msg):
-        super(NFSException, self).__init__(component="nfs", msg=msg)
-
-
-# documentation helpers
-EXPORT_SCHEMA = {
-    'export_id': (int, 'Export ID'),
-    'path': (str, 'Export path'),
-    'cluster_id': (str, 'Cluster identifier'),
-    'pseudo': (str, 'Pseudo FS path'),
-    'access_type': (str, 'Export access type'),
-    'squash': (str, 'Export squash policy'),
-    'security_label': (str, 'Security label'),
-    'protocols': ([int], 'List of protocol types'),
-    'transports': ([str], 'List of transport types'),
-    'fsal': ({
-        'name': (str, 'name of FSAL'),
-        'fs_name': (str, 'CephFS filesystem name', True),
-        'sec_label_xattr': (str, 'Name of xattr for security label', True),
-        'user_id': (str, 'User id', True)
-    }, 'FSAL configuration'),
-    'clients': ([{
-        'addresses': ([str], 'list of IP addresses'),
-        'access_type': (str, 'Client access type'),
-        'squash': (str, 'Client squash policy')
-    }], 'List of client configurations'),
-}
-
-
-CREATE_EXPORT_SCHEMA = {
-    'path': (str, 'Export path'),
-    'cluster_id': (str, 'Cluster identifier'),
-    'pseudo': (str, 'Pseudo FS path'),
-    'access_type': (str, 'Export access type'),
-    'squash': (str, 'Export squash policy'),
-    'security_label': (str, 'Security label'),
-    'protocols': ([int], 'List of protocol types'),
-    'transports': ([str], 'List of transport types'),
-    'fsal': ({
-        'name': (str, 'name of FSAL'),
-        'fs_name': (str, 'CephFS filesystem name', True),
-        'sec_label_xattr': (str, 'Name of xattr for security label', True)
-    }, 'FSAL configuration'),
-    'clients': ([{
-        'addresses': ([str], 'list of IP addresses'),
-        'access_type': (str, 'Client access type'),
-        'squash': (str, 'Client squash policy')
-    }], 'List of client configurations')
-}
-
-
-# pylint: disable=not-callable
-def NfsTask(name, metadata, wait_for):  # noqa: N802
-    def composed_decorator(func):
-        return Task("nfs/{}".format(name), metadata, wait_for,
-                    partial(serialize_dashboard_exception,
-                            include_http_status=True))(func)
-    return composed_decorator
-
-
-@APIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
-@APIDoc("NFS-Ganesha Cluster Management API", "NFS-Ganesha")
-class NFSGanesha(RESTController):
-
-    @EndpointDoc("Status of NFS-Ganesha management feature",
-                 responses={200: {
-                     'available': (bool, "Is API available?"),
-                     'message': (str, "Error message")
-                 }})
-    @Endpoint()
-    @ReadPermission
-    def status(self):
-        status = {'available': True, 'message': None}
-        try:
-            mgr.remote('nfs', 'cluster_ls')
-        except ImportError as error:
-            logger.exception(error)
-            status['available'] = False
-            status['message'] = str(error)  # type: ignore
-
-        return status
-
-
-@APIRouter('/nfs-ganesha/cluster', Scope.NFS_GANESHA)
-@APIDoc(group="NFS-Ganesha")
-class NFSGaneshaCluster(RESTController):
-    @ReadPermission
-    @RESTController.MethodMap(version=APIVersion.EXPERIMENTAL)
-    def list(self):
-        return mgr.remote('nfs', 'cluster_ls')
-
-
-@APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
-@APIDoc(group="NFS-Ganesha")
-class NFSGaneshaExports(RESTController):
-    RESOURCE_ID = "cluster_id/export_id"
-
-    @staticmethod
-    def _get_schema_export(export: Dict[str, Any]) -> Dict[str, Any]:
-        """
-        Method that avoids returning export info not exposed in the export schema
-        e.g., rgw user access/secret keys.
-        """
-        schema_fsal_info = {}
-        for key in export['fsal'].keys():
-            if key in EXPORT_SCHEMA['fsal'][0].keys():  # type: ignore
-                schema_fsal_info[key] = export['fsal'][key]
-        export['fsal'] = schema_fsal_info
-        return export
-
-    @EndpointDoc("List all NFS-Ganesha exports",
-                 responses={200: [EXPORT_SCHEMA]})
-    def list(self) -> List[Dict[str, Any]]:
-        exports = []
-        for export in mgr.remote('nfs', 'export_ls'):
-            exports.append(self._get_schema_export(export))
-
-        return exports
-
-    @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
-                        'cluster_id': '{cluster_id}'}, 2.0)
-    @EndpointDoc("Creates a new NFS-Ganesha export",
-                 parameters=CREATE_EXPORT_SCHEMA,
-                 responses={201: EXPORT_SCHEMA})
-    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
-    def create(self, path, cluster_id, pseudo, access_type,
-               squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
-
-        if hasattr(fsal, 'user_id'):
-            fsal.pop('user_id')  # mgr/nfs does not let you customize user_id
-        raw_ex = {
-            'path': path,
-            'pseudo': pseudo,
-            'cluster_id': cluster_id,
-            'access_type': access_type,
-            'squash': squash,
-            'security_label': security_label,
-            'protocols': protocols,
-            'transports': transports,
-            'fsal': fsal,
-            'clients': clients
-        }
-        export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
-        ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
-        if ret == 0:
-            return self._get_schema_export(
-                export_mgr._get_export_dict(cluster_id, pseudo))  # pylint: disable=W0212
-        raise NFSException(f"Export creation failed {err}")
-
-    @EndpointDoc("Get an NFS-Ganesha export",
-                 parameters={
-                     'cluster_id': (str, 'Cluster identifier'),
-                     'export_id': (str, "Export ID")
-                 },
-                 responses={200: EXPORT_SCHEMA})
-    def get(self, cluster_id, export_id) -> Optional[Dict[str, Any]]:
-        export_id = int(export_id)
-        export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
-        if export:
-            export = self._get_schema_export(export)
-
-        return export
-
-    @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
-             2.0)
-    @EndpointDoc("Updates an NFS-Ganesha export",
-                 parameters=dict(export_id=(int, "Export ID"),
-                                 **CREATE_EXPORT_SCHEMA),
-                 responses={200: EXPORT_SCHEMA})
-    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
-    def set(self, cluster_id, export_id, path, pseudo, access_type,
-            squash, security_label, protocols, transports, fsal, clients) -> Dict[str, Any]:
-
-        if hasattr(fsal, 'user_id'):
-            fsal.pop('user_id')  # mgr/nfs does not let you customize user_id
-        raw_ex = {
-            'path': path,
-            'pseudo': pseudo,
-            'cluster_id': cluster_id,
-            'export_id': export_id,
-            'access_type': access_type,
-            'squash': squash,
-            'security_label': security_label,
-            'protocols': protocols,
-            'transports': transports,
-            'fsal': fsal,
-            'clients': clients
-        }
-
-        export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
-        ret, _, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
-        if ret == 0:
-            return self._get_schema_export(
-                export_mgr._get_export_dict(cluster_id, pseudo))  # pylint: disable=W0212
-        raise NFSException(f"Failed to update export: {err}")
-
-    @NfsTask('delete', {'cluster_id': '{cluster_id}',
-                        'export_id': '{export_id}'}, 2.0)
-    @EndpointDoc("Deletes an NFS-Ganesha export",
-                 parameters={
-                     'cluster_id': (str, 'Cluster identifier'),
-                     'export_id': (int, "Export ID")
-                 })
-    @RESTController.MethodMap(version=APIVersion(2, 0))  # type: ignore
-    def delete(self, cluster_id, export_id):
-        export_id = int(export_id)
-
-        export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
-        if not export:
-            raise cherrypy.HTTPError(404)  # pragma: no cover - the handling is too obvious
-        mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
-
-
-@UIRouter('/nfs-ganesha', Scope.NFS_GANESHA)
-class NFSGaneshaUi(BaseController):
-    @Endpoint('GET', '/fsals')
-    @ReadPermission
-    def fsals(self):
-        return NFS_GANESHA_SUPPORTED_FSALS
-
-    @Endpoint('GET', '/lsdir')
-    @ReadPermission
-    def lsdir(self, fs_name, root_dir=None, depth=1):  # pragma: no cover
-        if root_dir is None:
-            root_dir = "/"
-        if not root_dir.startswith('/'):
-            root_dir = '/{}'.format(root_dir)
-        root_dir = os.path.normpath(root_dir)
-
-        try:
-            depth = int(depth)
-            error_msg = ''
-            if depth < 0:
-                error_msg = '`depth` must be greater or equal to 0.'
-            if depth > 5:
-                logger.warning("Limiting depth to maximum value of 5: "
-                               "input depth=%s", depth)
-                depth = 5
-        except ValueError:
-            error_msg = '`depth` must be an integer.'
-        finally:
-            if error_msg:
-                raise DashboardException(code=400,
-                                         component='nfsganesha',
-                                         msg=error_msg)
-
-        try:
-            cfs = CephFS(fs_name)
-            paths = [root_dir]
-            paths.extend([p['path'].rstrip('/')
-                          for p in cfs.ls_dir(root_dir, depth)])
-        except (cephfs.ObjectNotFound, cephfs.PermissionError):
-            paths = []
-        return {'paths': paths}
-
-    @Endpoint('GET', '/cephfs/filesystems')
-    @ReadPermission
-    def filesystems(self):
-        return CephFS.list_filesystems()
index 3e390db7335e90ea480c302e177734b702248732..97a59ae32606577f5020a660591bc7629d0f1ab3 100644 (file)
@@ -39,7 +39,7 @@
             <span class="invalid-feedback"
                   *ngIf="nfsForm.showError('cluster_id', formDir, 'required') || allClusters?.length === 0"
                   i18n>This field is required.
-                       To create a new NFS cluster, <a routerLink="/services/create"
+                       To create a new NFS cluster, <a [routerLink]="['/services', {outlets: {modal: ['create']}}]"
                                                        class="btn-link">add a new NFS Service</a>.</span>
           </div>
         </div>
@@ -79,7 +79,7 @@
             </div>
           </div>
 
-          <!-- CephFS fs_name -->
+          <!-- CephFS Volume -->
           <div class="form-group row"
                *ngIf="nfsForm.getValue('name') === 'CEPH'">
             <label class="cd-col-form-label required"
             <span class="invalid-feedback"
                   *ngIf="nfsForm.showError('path', formDir, 'pattern')"
                   i18n>Path need to start with a '/' and can be followed by a word</span>
-            <span class="form-text text-muted"
+            <span class="invalid-feedback"
                   *ngIf="nfsForm.showError('path', formDir, 'pathNameNotAllowed')"
-                  i18n>The path does not exist.</span>
+                  i18n>The path does not exist in the selected volume.</span>
           </div>
         </div>
 
             <span class="invalid-feedback"
                   *ngIf="nfsForm.showError('pseudo', formDir, 'required')"
                   i18n>This field is required.</span>
+            <span class="invalid-feedback"
+                  *ngIf="nfsForm.showError('pseudo', formDir, 'pseudoAlreadyExists')"
+                  i18n>The pseudo is already in use by another export.</span>
             <span class="invalid-feedback"
                   *ngIf="nfsForm.showError('pseudo', formDir, 'pattern')"
                   i18n>Pseudo needs to start with a '/' and can't contain any of the following: &gt;, &lt;, |, &, ( or ).</span>
index 4bf34e2c7a0d1fafa1a769fcb4f837a9854b9adf..7cf3d61387a436a138b1b3d92630c1f980478065 100644 (file)
@@ -74,14 +74,14 @@ describe('NfsFormComponent', () => {
     expect(component.nfsForm.value).toEqual({
       access_type: 'RW',
       clients: [],
-      cluster_id: '',
-      fsal: { fs_name: 'a', name: '' },
+      cluster_id: 'mynfs',
+      fsal: { fs_name: 'a', name: 'CEPH' },
       path: '/',
       protocolNfsv4: true,
       pseudo: '',
       sec_label_xattr: 'security.selinux',
       security_label: false,
-      squash: '',
+      squash: 'no_root_squash',
       transportTCP: true,
       transportUDP: true
     });
index 46eeeec52cb22f839ca3afe6dc0e39864bbde455..a56c1105e08e49c2b89b9ac41fb5c7b9faec0d61 100644 (file)
@@ -24,6 +24,7 @@ import { CdFormGroup } from '~/app/shared/forms/cd-form-group';
 import { CdValidators } from '~/app/shared/forms/cd-validators';
 import { FinishedTask } from '~/app/shared/models/finished-task';
 import { Permission } from '~/app/shared/models/permissions';
+import { CdHttpErrorResponse } from '~/app/shared/services/api-interceptor.service';
 import { AuthStorageService } from '~/app/shared/services/auth-storage.service';
 import { TaskWrapperService } from '~/app/shared/services/task-wrapper.service';
 import { NfsFormClientComponent } from '../nfs-form-client/nfs-form-client.component';
@@ -163,7 +164,7 @@ export class NfsFormComponent extends CdForm implements OnInit {
       access_type: new FormControl('RW', {
         validators: [Validators.required]
       }),
-      squash: new FormControl('', {
+      squash: new FormControl(this.nfsSquash[0], {
         validators: [Validators.required]
       }),
       transportUDP: new FormControl(true, {
@@ -222,6 +223,9 @@ export class NfsFormComponent extends CdForm implements OnInit {
     for (const cluster of clusters) {
       this.allClusters.push({ cluster_id: cluster });
     }
+    if (!this.isEdit && this.allClusters.length > 0) {
+      this.nfsForm.get('cluster_id').setValue(this.allClusters[0].cluster_id);
+    }
   }
 
   resolveFsals(res: string[]) {
@@ -234,17 +238,18 @@ export class NfsFormComponent extends CdForm implements OnInit {
         this.allFsals.push(fsalItem);
       }
     });
-
-    if (this.allFsals.length === 1 && _.isUndefined(this.nfsForm.getValue('fsal'))) {
+    if (!this.isEdit && this.allFsals.length > 0) {
       this.nfsForm.patchValue({
-        fsal: this.allFsals[0]
+        fsal: {
+          name: this.allFsals[0].value
+        }
       });
     }
   }
 
   resolveFilesystems(filesystems: any[]) {
     this.allFsNames = filesystems;
-    if (filesystems.length === 1) {
+    if (!this.isEdit && filesystems.length > 0) {
       this.nfsForm.patchValue({
         fsal: {
           fs_name: filesystems[0].name
@@ -254,6 +259,7 @@ export class NfsFormComponent extends CdForm implements OnInit {
   }
 
   fsalChangeHandler() {
+    this.setPathValidation();
     const fsalValue = this.nfsForm.getValue('name');
     const checkAvailability =
       fsalValue === 'RGW'
@@ -276,13 +282,13 @@ export class NfsFormComponent extends CdForm implements OnInit {
     checkAvailability.subscribe({
       next: () => {
         this.setFsalAvailability(fsalValue, true);
-        this.nfsForm.patchValue({
-          path: fsalValue === 'RGW' ? '' : '/',
-          pseudo: this.generatePseudo(),
-          access_type: this.updateAccessType()
-        });
-
-        this.setPathValidation();
+        if (!this.isEdit) {
+          this.nfsForm.patchValue({
+            path: fsalValue === 'RGW' ? '' : '/',
+            pseudo: this.generatePseudo(),
+            access_type: this.updateAccessType()
+          });
+        }
 
         this.cdRef.detectChanges();
       },
@@ -360,9 +366,11 @@ export class NfsFormComponent extends CdForm implements OnInit {
   }
 
   pathChangeHandler() {
-    this.nfsForm.patchValue({
-      pseudo: this.generatePseudo()
-    });
+    if (!this.isEdit) {
+      this.nfsForm.patchValue({
+        pseudo: this.generatePseudo()
+      });
+    }
   }
 
   private getBucketTypeahead(path: string): Observable<any> {
@@ -430,11 +438,23 @@ export class NfsFormComponent extends CdForm implements OnInit {
     }
 
     action.subscribe({
-      error: () => this.nfsForm.setErrors({ cdSubmitButton: true }),
+      error: (errorResponse: CdHttpErrorResponse) => this.setFormErrors(errorResponse),
       complete: () => this.router.navigate(['/nfs'])
     });
   }
 
+  private setFormErrors(errorResponse: CdHttpErrorResponse) {
+    if (
+      errorResponse.error.detail &&
+      errorResponse.error.detail
+        .toString()
+        .includes(`Pseudo ${this.nfsForm.getValue('pseudo')} is already in use`)
+    ) {
+      this.nfsForm.get('pseudo').setErrors({ pseudoAlreadyExists: true });
+    }
+    this.nfsForm.setErrors({ cdSubmitButton: true });
+  }
+
   private buildRequest() {
     const requestModel: any = _.cloneDeep(this.nfsForm.value);
 
@@ -500,15 +520,14 @@ export class NfsFormComponent extends CdForm implements OnInit {
         return of({ required: true });
       }
       const fsName = this.nfsForm.getValue('fsal').fs_name;
-      return this.nfsService
-        .lsDir(fsName, control.value)
-        .pipe(
-          map((directory: Directory) =>
-            directory.paths.includes(control.value) === requiredExistenceResult
-              ? null
-              : { pathNameNotAllowed: true }
-          )
-        );
+      return this.nfsService.lsDir(fsName, control.value).pipe(
+        map((directory: Directory) =>
+          directory.paths.includes(control.value) === requiredExistenceResult
+            ? null
+            : { pathNameNotAllowed: true }
+        ),
+        catchError(() => of({ pathNameNotAllowed: true }))
+      );
     };
   }
 }
index c977f7ec5b514b3235d5cafae7c8acea36b362f9..139fa490bfd82ef1684ce9fcc5eab6a69503120a 100644 (file)
@@ -1,5 +1,5 @@
 import { HttpClientTestingModule, HttpTestingController } from '@angular/common/http/testing';
-import { TestBed } from '@angular/core/testing';
+import { fakeAsync, TestBed, tick } from '@angular/core/testing';
 
 import { configureTestBed } from '~/testing/unit-test-helper';
 import { NfsService } from './nfs.service';
@@ -63,4 +63,12 @@ describe('NfsService', () => {
     const req = httpTesting.expectOne('ui-api/nfs-ganesha/lsdir/a?root_dir=foo_dir');
     expect(req.request.method).toBe('GET');
   });
+
+  it('should not call lsDir if volume is not provided', fakeAsync(() => {
+    service.lsDir('', 'foo_dir').subscribe({
+      error: (error: string) => expect(error).toEqual('Please specify a filesystem volume.')
+    });
+    tick();
+    httpTesting.expectNone('ui-api/nfs-ganesha/lsdir/?root_dir=foo_dir');
+  }));
 });
index 88af7a68216f0034cd938ceb5a23559844d655b3..636335673978f57619adf54f648830404070182e 100644 (file)
@@ -1,7 +1,7 @@
 import { HttpClient } from '@angular/common/http';
 import { Injectable } from '@angular/core';
 
-import { Observable } from 'rxjs';
+import { Observable, throwError } from 'rxjs';
 
 import { NfsFSAbstractionLayer } from '~/app/ceph/nfs/models/nfs.fsal';
 import { ApiClient } from '~/app/shared/api/api-client';
@@ -87,6 +87,9 @@ export class NfsService extends ApiClient {
   }
 
   lsDir(fs_name: string, root_dir: string): Observable<Directory> {
+    if (!fs_name) {
+      return throwError($localize`Please specify a filesystem volume.`);
+    }
     return this.http.get<Directory>(`${this.uiApiPath}/lsdir/${fs_name}?root_dir=${root_dir}`);
   }
 
index 6253566b71dbded310ea422f92833301f24db437..0cce9244469dd40d0e1062a3d43b3bd7fdbfc678 100644 (file)
@@ -9,7 +9,7 @@ from mgr_module import CLICommand, Option
 
 from ..controllers.cephfs import CephFS
 from ..controllers.iscsi import Iscsi, IscsiTarget
-from ..controllers.nfsganesha import NFSGanesha, NFSGaneshaExports
+from ..controllers.nfs import NFSGanesha, NFSGaneshaExports
 from ..controllers.rbd import Rbd, RbdSnapshot, RbdTrash
 from ..controllers.rbd_mirroring import RbdMirroringPoolMode, \
     RbdMirroringPoolPeer, RbdMirroringSummary
index 698db40e1e5ef5df0f7c8aad2c919ba554775274..d9755de98e45ad1f8af3528fcc0cc9d2c9bbb1ce 100644 (file)
@@ -41,7 +41,7 @@ class AuthTest(ControllerTestCase):
 
     @patch('dashboard.controllers.auth.JwtManager.gen_token', Mock(return_value='my-token'))
     @patch('dashboard.controllers.auth.AuthManager.authenticate', Mock(return_value={
-        'permissions': {'read-only': ['read']},
+        'permissions': {'rgw': ['read']},
         'pwdExpirationDate': 1000000,
         'pwdUpdateRequired': False
     }))
@@ -51,7 +51,7 @@ class AuthTest(ControllerTestCase):
         self.assertJsonBody({
             'token': 'my-token',
             'username': 'my-user',
-            'permissions': {'read-only': ['read']},
+            'permissions': {'rgw': ['read']},
             'pwdExpirationDate': 1000000,
             'sso': False,
             'pwdUpdateRequired': False
diff --git a/src/pybind/mgr/dashboard/tests/test_ganesha.py b/src/pybind/mgr/dashboard/tests/test_ganesha.py
deleted file mode 100644 (file)
index f3338d7..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-# -*- coding: utf-8 -*-
-# pylint: disable=too-many-lines
-from __future__ import absolute_import
-
-from unittest.mock import patch
-from urllib.parse import urlencode
-
-from ..controllers.nfsganesha import NFSGaneshaExports, NFSGaneshaUi
-from . import ControllerTestCase  # pylint: disable=no-name-in-module
-
-
-class NFSGaneshaExportsTest(ControllerTestCase):
-
-    def test_get_schema_export(self):
-        export = {
-            "export_id": 2,
-            "path": "bk1",
-            "cluster_id": "myc",
-            "pseudo": "/bk-ps",
-            "access_type": "RO",
-            "squash": "root_id_squash",
-            "security_label": False,
-            "protocols": [
-                4
-            ],
-            "transports": [
-                "TCP",
-                "UDP"
-            ],
-            "fsal": {
-                "name": "RGW",
-                "user_id": "dashboard",
-                "access_key_id": "UUU5YVVOQ2P5QTOPYNAN",
-                "secret_access_key": "7z87tMUUsHr67ZWx12pCbWkp9UyOldxhDuPY8tVN"
-            },
-            "clients": []
-        }
-        expected_schema_export = export
-        del expected_schema_export['fsal']['access_key_id']
-        del expected_schema_export['fsal']['secret_access_key']
-        self.assertDictEqual(
-            expected_schema_export,
-            NFSGaneshaExports._get_schema_export(export))  # pylint: disable=protected-access
-
-
-class NFSGaneshaUiControllerTest(ControllerTestCase):
-    @classmethod
-    def setup_server(cls):
-        # pylint: disable=protected-access
-        NFSGaneshaUi._cp_config['tools.authenticate.on'] = False
-        cls.setup_controllers([NFSGaneshaUi])
-
-    @classmethod
-    def _create_ls_dir_url(cls, fs_name, query_params):
-        api_url = '/ui-api/nfs-ganesha/lsdir/{}'.format(fs_name)
-        if query_params is not None:
-            return '{}?{}'.format(api_url, urlencode(query_params))
-        return api_url
-
-    @patch('dashboard.controllers.nfsganesha.CephFS')
-    def test_lsdir(self, cephfs_class):
-        cephfs_class.return_value.ls_dir.return_value = [
-            {'path': '/foo'},
-            {'path': '/foo/bar'}
-        ]
-        mocked_ls_dir = cephfs_class.return_value.ls_dir
-
-        reqs = [
-            {
-                'params': None,
-                'cephfs_ls_dir_args': ['/', 1],
-                'path0': '/',
-                'status': 200
-            },
-            {
-                'params': {'root_dir': '/', 'depth': '1'},
-                'cephfs_ls_dir_args': ['/', 1],
-                'path0': '/',
-                'status': 200
-            },
-            {
-                'params': {'root_dir': '', 'depth': '1'},
-                'cephfs_ls_dir_args': ['/', 1],
-                'path0': '/',
-                'status': 200
-            },
-            {
-                'params': {'root_dir': '/foo', 'depth': '3'},
-                'cephfs_ls_dir_args': ['/foo', 3],
-                'path0': '/foo',
-                'status': 200
-            },
-            {
-                'params': {'root_dir': 'foo', 'depth': '6'},
-                'cephfs_ls_dir_args': ['/foo', 5],
-                'path0': '/foo',
-                'status': 200
-            },
-            {
-                'params': {'root_dir': '/', 'depth': '-1'},
-                'status': 400
-            },
-            {
-                'params': {'root_dir': '/', 'depth': 'abc'},
-                'status': 400
-            }
-        ]
-
-        for req in reqs:
-            self._get(self._create_ls_dir_url('a', req['params']))
-            self.assertStatus(req['status'])
-
-            # Returned paths should contain root_dir as first element
-            if req['status'] == 200:
-                paths = self.json_body()['paths']
-                self.assertEqual(paths[0], req['path0'])
-                cephfs_class.assert_called_once_with('a')
-
-            # Check the arguments passed to `CephFS.ls_dir`.
-            if req.get('cephfs_ls_dir_args'):
-                mocked_ls_dir.assert_called_once_with(*req['cephfs_ls_dir_args'])
-            else:
-                mocked_ls_dir.assert_not_called()
-            mocked_ls_dir.reset_mock()
-            cephfs_class.reset_mock()
-
-    @patch('dashboard.controllers.nfsganesha.cephfs')
-    @patch('dashboard.controllers.nfsganesha.CephFS')
-    def test_lsdir_non_existed_dir(self, cephfs_class, cephfs):
-        cephfs.ObjectNotFound = Exception
-        cephfs.PermissionError = Exception
-        cephfs_class.return_value.ls_dir.side_effect = cephfs.ObjectNotFound()
-        self._get(self._create_ls_dir_url('a', {'root_dir': '/foo', 'depth': '3'}))
-        cephfs_class.assert_called_once_with('a')
-        cephfs_class.return_value.ls_dir.assert_called_once_with('/foo', 3)
-        self.assertStatus(200)
-        self.assertJsonBody({'paths': []})
diff --git a/src/pybind/mgr/dashboard/tests/test_nfs.py b/src/pybind/mgr/dashboard/tests/test_nfs.py
new file mode 100644 (file)
index 0000000..087ca18
--- /dev/null
@@ -0,0 +1,229 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+from copy import deepcopy
+from unittest.mock import Mock, patch
+from urllib.parse import urlencode
+
+from .. import mgr
+from ..controllers._version import APIVersion
+from ..controllers.nfs import NFSGaneshaExports, NFSGaneshaUi
+from ..tests import ControllerTestCase
+from ..tools import NotificationQueue, TaskManager
+
+
+class NFSGaneshaExportsTest(ControllerTestCase):
+    _nfs_module_export = {
+        "export_id": 1,
+        "path": "bk1",
+        "cluster_id": "myc",
+        "pseudo": "/bk-ps",
+        "access_type": "RO",
+        "squash": "root_id_squash",
+        "security_label": False,
+        "protocols": [
+            4
+        ],
+        "transports": [
+            "TCP",
+            "UDP"
+        ],
+        "fsal": {
+            "name": "RGW",
+            "user_id": "dashboard",
+            "access_key_id": "UUU5YVVOQ2P5QTOPYNAN",
+            "secret_access_key": "7z87tMUUsHr67ZWx12pCbWkp9UyOldxhDuPY8tVN"
+        },
+        "clients": []
+    }
+
+    @classmethod
+    def setUpClass(cls):
+        super().setUpClass()
+        cls._expected_export = deepcopy(cls._nfs_module_export)
+        del cls._expected_export['fsal']['access_key_id']
+        del cls._expected_export['fsal']['secret_access_key']
+
+    @classmethod
+    def tearDownClass(cls):
+        super().tearDownClass()
+        NotificationQueue.stop()
+
+    @classmethod
+    def setup_server(cls):
+        NotificationQueue.start_queue()
+        TaskManager.init()
+        cls.setup_controllers([NFSGaneshaExports])
+
+    def test_list_exports(self):
+        mgr.remote = Mock(return_value=[self._nfs_module_export])
+
+        self._get('/api/nfs-ganesha/export')
+        self.assertStatus(200)
+        self.assertJsonBody([self._expected_export])
+
+    def test_get_export(self):
+        mgr.remote = Mock(return_value=self._nfs_module_export)
+
+        self._get('/api/nfs-ganesha/export/myc/1')
+        self.assertStatus(200)
+        self.assertJsonBody(self._expected_export)
+
+    def test_create_export(self):
+        export_mgr = Mock()
+        created_nfs_export = deepcopy(self._nfs_module_export)
+        created_nfs_export['pseudo'] = 'new-pseudo'
+        created_nfs_export['export_id'] = 2
+        export_mgr.get_export_by_pseudo.side_effect = [None, created_nfs_export]
+        export_mgr.apply_export.return_value = (0, '', '')
+        mgr.remote.return_value = export_mgr
+
+        export_create_body = deepcopy(self._expected_export)
+        del export_create_body['export_id']
+        export_create_body['pseudo'] = created_nfs_export['pseudo']
+
+        self._post('/api/nfs-ganesha/export',
+                   export_create_body,
+                   version=APIVersion(2, 0))
+        self.assertStatus(201)
+        expected_body = export_create_body
+        expected_body['export_id'] = created_nfs_export['export_id']
+        self.assertJsonBody(export_create_body)
+
+    def test_create_export_with_existing_pseudo_fails(self):
+        export_mgr = Mock()
+        export_mgr.get_export_by_pseudo.return_value = self._nfs_module_export
+        mgr.remote.return_value = export_mgr
+
+        export_create_body = deepcopy(self._expected_export)
+        del export_create_body['export_id']
+
+        self._post('/api/nfs-ganesha/export',
+                   export_create_body,
+                   version=APIVersion(2, 0))
+        self.assertStatus(400)
+        response = self.json_body()
+        self.assertIn(f'Pseudo {export_create_body["pseudo"]} is already in use',
+                      response['detail'])
+
+    def test_set_export(self):
+        export_mgr = Mock()
+        updated_nfs_export = deepcopy(self._nfs_module_export)
+        updated_nfs_export['pseudo'] = 'updated-pseudo'
+        export_mgr.get_export_by_pseudo.return_value = updated_nfs_export
+        export_mgr.apply_export.return_value = (0, '', '')
+        mgr.remote.return_value = export_mgr
+
+        updated_export_body = deepcopy(self._expected_export)
+        updated_export_body['pseudo'] = updated_nfs_export['pseudo']
+
+        self._put('/api/nfs-ganesha/export/myc/2',
+                  updated_export_body,
+                  version=APIVersion(2, 0))
+        self.assertStatus(200)
+        self.assertJsonBody(updated_export_body)
+
+    def test_delete_export(self):
+        mgr.remote = Mock(side_effect=[self._nfs_module_export, None])
+
+        self._delete('/api/nfs-ganesha/export/myc/2',
+                     version=APIVersion(2, 0))
+        self.assertStatus(204)
+
+    def test_delete_export_not_found(self):
+        mgr.remote = Mock(return_value=None)
+
+        self._delete('/api/nfs-ganesha/export/myc/3',
+                     version=APIVersion(2, 0))
+        self.assertStatus(404)
+
+
+class NFSGaneshaUiControllerTest(ControllerTestCase):
+    @classmethod
+    def setup_server(cls):
+        cls.setup_controllers([NFSGaneshaUi])
+
+    @classmethod
+    def _create_ls_dir_url(cls, fs_name, query_params):
+        api_url = '/ui-api/nfs-ganesha/lsdir/{}'.format(fs_name)
+        if query_params is not None:
+            return '{}?{}'.format(api_url, urlencode(query_params))
+        return api_url
+
+    @patch('dashboard.controllers.nfs.CephFS')
+    def test_lsdir(self, cephfs_class):
+        cephfs_class.return_value.ls_dir.return_value = [
+            {'path': '/foo'},
+            {'path': '/foo/bar'}
+        ]
+        mocked_ls_dir = cephfs_class.return_value.ls_dir
+
+        reqs = [
+            {
+                'params': None,
+                'cephfs_ls_dir_args': ['/', 1],
+                'path0': '/',
+                'status': 200
+            },
+            {
+                'params': {'root_dir': '/', 'depth': '1'},
+                'cephfs_ls_dir_args': ['/', 1],
+                'path0': '/',
+                'status': 200
+            },
+            {
+                'params': {'root_dir': '', 'depth': '1'},
+                'cephfs_ls_dir_args': ['/', 1],
+                'path0': '/',
+                'status': 200
+            },
+            {
+                'params': {'root_dir': '/foo', 'depth': '3'},
+                'cephfs_ls_dir_args': ['/foo', 3],
+                'path0': '/foo',
+                'status': 200
+            },
+            {
+                'params': {'root_dir': 'foo', 'depth': '6'},
+                'cephfs_ls_dir_args': ['/foo', 5],
+                'path0': '/foo',
+                'status': 200
+            },
+            {
+                'params': {'root_dir': '/', 'depth': '-1'},
+                'status': 400
+            },
+            {
+                'params': {'root_dir': '/', 'depth': 'abc'},
+                'status': 400
+            }
+        ]
+
+        for req in reqs:
+            self._get(self._create_ls_dir_url('a', req['params']))
+            self.assertStatus(req['status'])
+
+            # Returned paths should contain root_dir as first element
+            if req['status'] == 200:
+                paths = self.json_body()['paths']
+                self.assertEqual(paths[0], req['path0'])
+                cephfs_class.assert_called_once_with('a')
+
+            # Check the arguments passed to `CephFS.ls_dir`.
+            if req.get('cephfs_ls_dir_args'):
+                mocked_ls_dir.assert_called_once_with(*req['cephfs_ls_dir_args'])
+            else:
+                mocked_ls_dir.assert_not_called()
+            mocked_ls_dir.reset_mock()
+            cephfs_class.reset_mock()
+
+    @patch('dashboard.controllers.nfs.cephfs')
+    @patch('dashboard.controllers.nfs.CephFS')
+    def test_lsdir_non_existed_dir(self, cephfs_class, cephfs):
+        cephfs.ObjectNotFound = Exception
+        cephfs.PermissionError = Exception
+        cephfs_class.return_value.ls_dir.side_effect = cephfs.ObjectNotFound()
+        self._get(self._create_ls_dir_url('a', {'root_dir': '/foo', 'depth': '3'}))
+        cephfs_class.assert_called_once_with('a')
+        cephfs_class.return_value.ls_dir.assert_called_once_with('/foo', 3)
+        self.assertStatus(200)
+        self.assertJsonBody({'paths': []})
index f48561ed22b3bf70e416a0e9f7c086f6379e27bf..2c84c962d10ad1b284666b279c05070f93bb8973 100644 (file)
@@ -432,6 +432,14 @@ class ExportMgr:
         export = self._fetch_export_id(cluster_id, export_id)
         return export.to_dict() if export else None
 
+    def get_export_by_pseudo(
+            self,
+            cluster_id: str,
+            pseudo_path: str
+    ) -> Optional[Dict[str, Any]]:
+        export = self._fetch_export(cluster_id, pseudo_path)
+        return export.to_dict() if export else None
+
     def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
         try:
             if not export_config: