From 64178e1f5b0f26f5c17898e85c3d5b439e5f9dbf Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Tue, 10 Aug 2021 00:30:16 +0530 Subject: [PATCH] mgr/dashboard: directly use ExportMgr and NFSCluster objects Using the objects directly provides access to other methods and helps in avoiding repeatition. mgr/dashboard/nfsganesha: remove tag Since NFS v3 is no longer supported. We can remove tag. mgr/nfs: define global constant to list supported FSALs mgr/dashboard: directly list nfs clusters by directly importing available_cluster() method The current dashboard api returns a list of following dictionary { 'pool': 'nfs-ganesha', 'namespace': cluster_id, 'type': 'orchestrator', 'daemon_conf': None } None of these values are required for listing nfs cluster by mgr/nfs module. Instead directly list available cluster names mgr/dashboard: add comment to remove listing of daemons As the configs are per cluster. There is no need to list daemons per cluster. mgr/dashboard/controllers/nfsganesha: Add comments to update/remove status endpoint This endpoint can be updated in suggested way or even removed. As it was initially[1] introduced to check if dashboard pool and namespace configuration was set. [1] https://github.com/ceph/ceph/commit/824726393b185b8e5a8f17e66487dfde9f3c8b5c mgr/nfs: remove fetch_cluster_obj() There is no need to fetch NFSCluster class object. Directly available_clusters() can be imported to list nfs clusters. mgr/dashboard/controllers/nfsganesha: list exports based on cluster id As mgr/nfs module lists based on cluster id. mgr/dashboard/nfs: get and delete export by export id Fixes: https://tracker.ceph.com/issues/46493 Signed-off-by: Varsha Rao --- .../mgr/dashboard/controllers/nfsganesha.py | 102 +++++++++++++----- src/pybind/mgr/nfs/cluster.py | 1 + src/pybind/mgr/nfs/export.py | 42 +++++--- src/pybind/mgr/nfs/export_utils.py | 13 +-- src/pybind/mgr/nfs/module.py | 14 +-- 5 files changed, 109 insertions(+), 63 deletions(-) diff --git a/src/pybind/mgr/dashboard/controllers/nfsganesha.py b/src/pybind/mgr/dashboard/controllers/nfsganesha.py index 5571bbd00b5..ae3c903c056 100644 --- a/src/pybind/mgr/dashboard/controllers/nfsganesha.py +++ b/src/pybind/mgr/dashboard/controllers/nfsganesha.py @@ -2,10 +2,15 @@ import logging import os +import json from functools import partial import cephfs import cherrypy +# Importing from nfs module throws Attribute Error +# https://gist.github.com/varshar16/61ac26426bbe5f5f562ebb14bcd0f548 +#from nfs.export_utils import NFS_GANESHA_SUPPORTED_FSALS +#from nfs.utils import available_clusters from .. import mgr from ..security import Scope @@ -23,6 +28,8 @@ class NFSException(DashboardException): def __init__(self, msg): super(NFSException, self).__init__(component="nfs", msg=msg) +# Remove this once attribute error is fixed +NFS_GANESHA_SUPPORTED_FSALS = ['CEPH', 'RGW'] # documentation helpers EXPORT_SCHEMA = { @@ -31,7 +38,6 @@ EXPORT_SCHEMA = { 'cluster_id': (str, 'Cluster identifier'), 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'), 'pseudo': (str, 'Pseudo FS path'), - 'tag': (str, 'NFSv3 export tag'), 'access_type': (str, 'Export access type'), 'squash': (str, 'Export squash policy'), 'security_label': (str, 'Security label'), @@ -57,7 +63,6 @@ CREATE_EXPORT_SCHEMA = { 'cluster_id': (str, 'Cluster identifier'), 'daemons': ([str], 'List of NFS Ganesha daemons identifiers'), 'pseudo': (str, 'Pseudo FS path'), - 'tag': (str, 'NFSv3 export tag'), 'access_type': (str, 'Export access type'), 'squash': (str, 'Export squash policy'), 'security_label': (str, 'Security label'), @@ -102,14 +107,19 @@ class NFSGanesha(RESTController): @Endpoint() @ReadPermission def status(self): - status = {'available': True, 'message': None} + ''' + FIXME: update this to check if any nfs cluster is available. Otherwise this endpoint can be safely removed too. + As it was introduced to check dashboard pool and namespace configuration. try: - mgr.remote('nfs', 'is_active') + cluster_ls = available_clusters(mgr) + if not cluster_ls: + raise NFSException('Please deploy a cluster using `nfs cluster create ... or orch apply nfs ..') except (NameError, ImportError) as e: status['message'] = str(e) # type: ignore status['available'] = False - return status + ''' + return {'available': True, 'message': None} @APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA) @@ -120,6 +130,14 @@ class NFSGaneshaExports(RESTController): @EndpointDoc("List all NFS-Ganesha exports", responses={200: [EXPORT_SCHEMA]}) def list(self): + ''' + list exports based on cluster_id ? + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + ret, out, err = export_mgr.list_exports(cluster_id=cluster_id, detailed=True) + if ret == 0: + return json.loads(out) + raise NFSException(f"Failed to list exports: {err}") + ''' return mgr.remote('nfs', 'export_ls') @NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}', @@ -127,16 +145,10 @@ class NFSGaneshaExports(RESTController): @EndpointDoc("Creates a new NFS-Ganesha export", parameters=CREATE_EXPORT_SCHEMA, responses={201: EXPORT_SCHEMA}) - def create(self, path, cluster_id, daemons, pseudo, tag, access_type, + def create(self, path, cluster_id, daemons, pseudo, access_type, squash, security_label, protocols, transports, fsal, clients, reload_daemons=True): - if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'): - raise NFSException("Cannot create this export. " - "FSAL '{}' cannot be managed by the dashboard." - .format(fsal['name'])) - fsal.pop('user_id') # mgr/nfs does not let you customize user_id - # FIXME: what was this? 'tag': tag, raw_ex = { 'path': path, 'pseudo': pseudo, @@ -150,8 +162,11 @@ class NFSGaneshaExports(RESTController): 'fsal': fsal, 'clients': clients } - export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex) - return export + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + ret, out, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex)) + if ret == 0: + return export_mgr._get_export_dict(cluster_id, pseudo) + raise NFSException(f"Export creation failed {err}") @EndpointDoc("Get an NFS-Ganesha export", parameters={ @@ -160,6 +175,15 @@ class NFSGaneshaExports(RESTController): }, responses={200: EXPORT_SCHEMA}) def get(self, cluster_id, export_id): + ''' + Get export by pseudo path? + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + return export_mgr._get_export_dict(cluster_id, pseudo) + + Get export by id + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + return export_mgr.get_export_by_id(cluster_id, export_id) + ''' return mgr.remote('nfs', 'export_get', cluster_id, export_id) @NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'}, @@ -168,21 +192,11 @@ class NFSGaneshaExports(RESTController): parameters=dict(export_id=(int, "Export ID"), **CREATE_EXPORT_SCHEMA), responses={200: EXPORT_SCHEMA}) - def set(self, cluster_id, export_id, path, daemons, pseudo, tag, access_type, + def set(self, cluster_id, export_id, path, daemons, pseudo, access_type, squash, security_label, protocols, transports, fsal, clients, reload_daemons=True): - export_id = int(export_id) - - if not mgr.remote('nfs', 'export_get', export_id): - raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious - - if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'): - raise NFSException("Cannot make modifications to this export. " - "FSAL '{}' cannot be managed by the dashboard." - .format(fsal['name'])) fsal.pop('user_id') # mgr/nfs does not let you customize user_id - # FIXME: what was this? 'tag': tag, raw_ex = { 'path': path, 'pseudo': pseudo, @@ -196,8 +210,12 @@ class NFSGaneshaExports(RESTController): 'fsal': fsal, 'clients': clients } - export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex) - return export + + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + ret, out, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex)) + if ret == 0: + return export_mgr._get_export_dict(cluster_id, pseudo) + raise NFSException(f"Failed to update export: {err}") @NfsTask('delete', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'}, 2.0) @@ -211,6 +229,18 @@ class NFSGaneshaExports(RESTController): True) }) def delete(self, cluster_id, export_id, reload_daemons=True): + ''' + Delete by pseudo path + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + export_mgr.delete_export(cluster_id, pseudo) + + if deleting by export id + export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj') + export = export_mgr.get_export_by_id(cluster_id, export_id) + ret, out, err = export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=export['pseudo']) + if ret != 0: + raise NFSException(err) + ''' export_id = int(export_id) export = mgr.remote('nfs', 'export_get', cluster_id, export_id) @@ -219,6 +249,7 @@ class NFSGaneshaExports(RESTController): mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo']) +# FIXME: remove this; dashboard should only care about clusters. @APIRouter('/nfs-ganesha/daemon', Scope.NFS_GANESHA) @APIDoc(group="NFS-Ganesha") class NFSGaneshaService(RESTController): @@ -232,7 +263,6 @@ class NFSGaneshaService(RESTController): 'desc': (str, 'Status description', True) }]}) def list(self): - # FIXME: remove this; dashboard should only care about clusters. return mgr.remote('nfs', 'daemon_ls') @@ -247,7 +277,7 @@ class NFSGaneshaUi(BaseController): @Endpoint('GET', '/fsals') @ReadPermission def fsals(self): - return mgr.remote('nfs', 'cluster_fsals') + return NFS_GANESHA_SUPPORTED_FSALS @Endpoint('GET', '/lsdir') @ReadPermission @@ -301,4 +331,18 @@ class NFSGaneshaUi(BaseController): @Endpoint('GET', '/clusters') @ReadPermission def clusters(self): + ''' + Remove this remote call instead directly use available_cluster() method. It returns list of cluster names: ['vstart'] + The current dashboard api needs to changed from following to simply list of strings + [ + { + 'pool': 'nfs-ganesha', + 'namespace': cluster_id, + 'type': 'orchestrator', + 'daemon_conf': None + } for cluster_id in available_clusters() + ] + As pool, namespace, cluster type and daemon_conf are not required for listing cluster by mgr/nfs module + return available_cluster(mgr) + ''' return mgr.remote('nfs', 'cluster_ls') diff --git a/src/pybind/mgr/nfs/cluster.py b/src/pybind/mgr/nfs/cluster.py index 9989d4bc878..9902ee308a9 100644 --- a/src/pybind/mgr/nfs/cluster.py +++ b/src/pybind/mgr/nfs/cluster.py @@ -149,6 +149,7 @@ class NFSCluster: except Exception as e: return exception_handler(e, "Failed to list NFS Cluster") + # FIXME: Remove this method. It was added for dashboard integration with mgr/nfs module. def list_daemons(self): completion = self.mgr.list_daemons(daemon_type='nfs') # Here completion.result is a list DaemonDescription objects diff --git a/src/pybind/mgr/nfs/export.py b/src/pybind/mgr/nfs/export.py index ec79274c2bc..9e24203f8dd 100644 --- a/src/pybind/mgr/nfs/export.py +++ b/src/pybind/mgr/nfs/export.py @@ -9,7 +9,8 @@ from rados import TimedOut, ObjectNotFound from mgr_module import NFS_POOL_NAME as POOL_NAME -from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL +from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL, \ + NFS_GANESHA_SUPPORTED_FSALS from .exception import NFSException, NFSInvalidOperation, FSNotFound, \ ClusterNotFound from .utils import available_clusters, check_fs, restart_nfs_service @@ -402,6 +403,12 @@ class ExportMgr: except Exception as e: return exception_handler(e, f"Failed to list exports for {cluster_id}") + def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]: + export = self._fetch_export(cluster_id, pseudo_path) + if export: + return export.to_dict() + log.warning(f"No {pseudo_path} export to show for {cluster_id}") + @export_cluster_checker def get_export( self, @@ -409,9 +416,9 @@ class ExportMgr: pseudo_path: str, ) -> Tuple[int, str, str]: try: - export = self._fetch_export(cluster_id, pseudo_path) - if export: - return 0, json.dumps(export.to_dict(), indent=2), '' + export_dict = self._get_export_dict(cluster_id, pseudo_path) + if export_dict: + return 0, json.dumps(export_dict, indent=2), '' log.warning("No %s export to show for %s", pseudo_path, cluster_id) return 0, '', '' except Exception as e: @@ -448,9 +455,9 @@ class ExportMgr: ret, out, err = (0, '', '') for export in j: try: - r, o, e, ex = self._apply_export(cluster_id, export) + r, o, e = self._apply_export(cluster_id, export) except Exception as ex: - r, o, e, ex = exception_handler(ex, f'Failed to apply export: {ex}') + r, o, e = exception_handler(ex, f'Failed to apply export: {ex}') if r: ret = r if o: @@ -459,7 +466,7 @@ class ExportMgr: err += e + '\n' return ret, out, err else: - r, o, e, ex = self._apply_export(cluster_id, j) + r, o, e = self._apply_export(cluster_id, j) return r, o, e except NotImplementedError: return 0, " Manual Restart of NFS PODS required for successful update of exports", "" @@ -546,13 +553,13 @@ class ExportMgr: fsal = ex_dict.get("fsal", {}) fsal_type = fsal.get("name") - if fsal_type == 'RGW': + if fsal_type == NFS_GANESHA_SUPPORTED_FSALS[1]: if '/' in path: raise NFSInvalidOperation('"/" is not allowed in path (bucket name)') uid = f'nfs.{cluster_id}.{path}' if "user_id" in fsal and fsal["user_id"] != uid: raise NFSInvalidOperation(f"export FSAL user_id must be '{uid}'") - elif fsal_type == 'CEPH': + elif fsal_type == NFS_GANESHA_SUPPORTED_FSALS[0]: fs_name = fsal.get("fs_name") if not fs_name: raise NFSInvalidOperation("export FSAL must specify fs_name") @@ -563,7 +570,8 @@ class ExportMgr: if "user_id" in fsal and fsal["user_id"] != user_id: raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'") else: - raise NFSInvalidOperation("export must specify FSAL name of 'CEPH' or 'RGW'") + raise NFSInvalidOperation(f"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}." + "Export must specify any one of it.") ex_dict["fsal"] = fsal ex_dict["cluster_id"] = cluster_id @@ -593,7 +601,7 @@ class ExportMgr: "access_type": access_type, "squash": squash, "fsal": { - "name": "CEPH", + "name": NFS_GANESHA_SUPPORTED_FSALS[0], "fs_name": fs_name, }, "clients": clients, @@ -631,7 +639,7 @@ class ExportMgr: "path": bucket, "access_type": access_type, "squash": squash, - "fsal": {"name": "RGW"}, + "fsal": {"name": NFS_GANESHA_SUPPORTED_FSALS[1]}, "clients": clients, } ) @@ -652,7 +660,7 @@ class ExportMgr: self, cluster_id: str, new_export_dict: Dict, - ) -> Tuple[int, str, str, Export]: + ) -> Tuple[int, str, str]: for k in ['path', 'pseudo']: if k not in new_export_dict: raise NFSInvalidOperation(f'Export missing required field {k}') @@ -690,7 +698,7 @@ class ExportMgr: if not old_export: self._create_export_user(new_export) self._save_export(cluster_id, new_export) - return 0, f'Added export {new_export.pseudo}', '', new_export + return 0, f'Added export {new_export.pseudo}', '' if old_export.fsal.name != new_export.fsal.name: raise NFSInvalidOperation('FSAL change not allowed') @@ -698,7 +706,7 @@ class ExportMgr: log.debug('export %s pseudo %s -> %s', new_export.export_id, old_export.pseudo, new_export.pseudo) - if old_export.fsal.name == 'CEPH': + if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]: old_fsal = cast(CephFSFSAL, old_export.fsal) new_fsal = cast(CephFSFSAL, new_export.fsal) if old_fsal.user_id != new_fsal.user_id: @@ -718,7 +726,7 @@ class ExportMgr: new_fsal.cephx_key = old_fsal.cephx_key else: new_fsal.cephx_key = old_fsal.cephx_key - if old_export.fsal.name == 'RGW': + if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: old_rgw_fsal = cast(RGWFSAL, old_export.fsal) new_rgw_fsal = cast(RGWFSAL, new_export.fsal) if old_rgw_fsal.user_id != new_rgw_fsal.user_id: @@ -735,4 +743,4 @@ class ExportMgr: # TODO: detect whether the update is such that a reload is sufficient restart_nfs_service(self.mgr, new_export.cluster_id) - return 0, f"Updated export {new_export.pseudo}", "", new_export + return 0, f"Updated export {new_export.pseudo}", "" diff --git a/src/pybind/mgr/nfs/export_utils.py b/src/pybind/mgr/nfs/export_utils.py index b035b242bef..6967108fae4 100644 --- a/src/pybind/mgr/nfs/export_utils.py +++ b/src/pybind/mgr/nfs/export_utils.py @@ -7,6 +7,7 @@ from .utils import check_fs if TYPE_CHECKING: from nfs.module import Module +NFS_GANESHA_SUPPORTED_FSALS = ['CEPH', 'RGW'] class RawBlock(): def __init__(self, block_name: str, blocks: List['RawBlock'] = [], values: Dict[str, Any] = {}): @@ -182,17 +183,17 @@ class FSAL(object): @classmethod def from_dict(cls, fsal_dict: Dict[str, Any]) -> 'FSAL': - if fsal_dict.get('name') == 'CEPH': + if fsal_dict.get('name') == NFS_GANESHA_SUPPORTED_FSALS[0]: return CephFSFSAL.from_dict(fsal_dict) - if fsal_dict.get('name') == 'RGW': + if fsal_dict.get('name') == NFS_GANESHA_SUPPORTED_FSALS[1]: return RGWFSAL.from_dict(fsal_dict) raise NFSInvalidOperation(f'Unknown FSAL {fsal_dict.get("name")}') @classmethod def from_fsal_block(cls, fsal_block: RawBlock) -> 'FSAL': - if fsal_block.values.get('name') == 'CEPH': + if fsal_block.values.get('name') == NFS_GANESHA_SUPPORTED_FSALS[0]: return CephFSFSAL.from_fsal_block(fsal_block) - if fsal_block.values.get('name') == 'RGW': + if fsal_block.values.get('name') == NFS_GANESHA_SUPPORTED_FSALS[1]: return RGWFSAL.from_fsal_block(fsal_block) raise NFSInvalidOperation(f'Unknown FSAL {fsal_block.values.get("name")}') @@ -503,11 +504,11 @@ class Export: if client.access_type: self.validate_access_type(client.access_type) - if self.fsal.name == 'CEPH': + if self.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]: fs = cast(CephFSFSAL, self.fsal) if not fs.fs_name or not check_fs(mgr, fs.fs_name): raise FSNotFound(fs.fs_name) - elif self.fsal.name == 'RGW': + elif self.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: rgw = cast(RGWFSAL, self.fsal) # noqa pass else: diff --git a/src/pybind/mgr/nfs/module.py b/src/pybind/mgr/nfs/module.py index b6fff9c37d7..8fc8558d098 100644 --- a/src/pybind/mgr/nfs/module.py +++ b/src/pybind/mgr/nfs/module.py @@ -131,8 +131,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): """Reset NFS-Ganesha Config to default""" return self.nfs.reset_nfs_cluster_config(cluster_id=cluster_id) - def is_active(self) -> bool: - return True + def fetch_nfs_export_obj(self): + return self.export_mgr def export_ls(self) -> List[Dict[Any, Any]]: return self.export_mgr.list_all_exports() @@ -146,6 +146,7 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): def daemon_ls(self) -> List[Dict[Any, Any]]: return self.nfs.list_daemons() + # Remove this method after fixing attribute error def cluster_ls(self) -> List[str]: return [ { @@ -155,12 +156,3 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'daemon_conf': None, } for cluster_id in available_clusters() ] - - def cluster_fsals(self) -> List[str]: - return ['CEPH', 'RGW'] - - def export_apply(self, cluster_id: str, export: Dict[Any, Any]) -> Dict[Any, Any]: - ret, out, err, export = self.export_mgr._apply_export(cluster_id, export) - if ret: - return None - return export.to_dict() -- 2.39.5