import logging
import os
+import json
from functools import partial
import cephfs
import cherrypy
+# Importing from nfs module throws Attribute Error
+# https://gist.github.com/varshar16/61ac26426bbe5f5f562ebb14bcd0f548
+#from nfs.export_utils import NFS_GANESHA_SUPPORTED_FSALS
+#from nfs.utils import available_clusters
from .. import mgr
from ..security import Scope
def __init__(self, msg):
super(NFSException, self).__init__(component="nfs", msg=msg)
+# Remove this once attribute error is fixed
+NFS_GANESHA_SUPPORTED_FSALS = ['CEPH', 'RGW']
# documentation helpers
EXPORT_SCHEMA = {
'cluster_id': (str, 'Cluster identifier'),
'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
'pseudo': (str, 'Pseudo FS path'),
- 'tag': (str, 'NFSv3 export tag'),
'access_type': (str, 'Export access type'),
'squash': (str, 'Export squash policy'),
'security_label': (str, 'Security label'),
'cluster_id': (str, 'Cluster identifier'),
'daemons': ([str], 'List of NFS Ganesha daemons identifiers'),
'pseudo': (str, 'Pseudo FS path'),
- 'tag': (str, 'NFSv3 export tag'),
'access_type': (str, 'Export access type'),
'squash': (str, 'Export squash policy'),
'security_label': (str, 'Security label'),
@Endpoint()
@ReadPermission
def status(self):
- status = {'available': True, 'message': None}
+ '''
+ FIXME: update this to check if any nfs cluster is available. Otherwise this endpoint can be safely removed too.
+ As it was introduced to check dashboard pool and namespace configuration.
try:
- mgr.remote('nfs', 'is_active')
+ cluster_ls = available_clusters(mgr)
+ if not cluster_ls:
+ raise NFSException('Please deploy a cluster using `nfs cluster create ... or orch apply nfs ..')
except (NameError, ImportError) as e:
status['message'] = str(e) # type: ignore
status['available'] = False
-
return status
+ '''
+ return {'available': True, 'message': None}
@APIRouter('/nfs-ganesha/export', Scope.NFS_GANESHA)
@EndpointDoc("List all NFS-Ganesha exports",
responses={200: [EXPORT_SCHEMA]})
def list(self):
+ '''
+ list exports based on cluster_id ?
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ ret, out, err = export_mgr.list_exports(cluster_id=cluster_id, detailed=True)
+ if ret == 0:
+ return json.loads(out)
+ raise NFSException(f"Failed to list exports: {err}")
+ '''
return mgr.remote('nfs', 'export_ls')
@NfsTask('create', {'path': '{path}', 'fsal': '{fsal.name}',
@EndpointDoc("Creates a new NFS-Ganesha export",
parameters=CREATE_EXPORT_SCHEMA,
responses={201: EXPORT_SCHEMA})
- def create(self, path, cluster_id, daemons, pseudo, tag, access_type,
+ def create(self, path, cluster_id, daemons, pseudo, access_type,
squash, security_label, protocols, transports, fsal, clients,
reload_daemons=True):
- if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'):
- raise NFSException("Cannot create this export. "
- "FSAL '{}' cannot be managed by the dashboard."
- .format(fsal['name']))
-
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
- # FIXME: what was this? 'tag': tag,
raw_ex = {
'path': path,
'pseudo': pseudo,
'fsal': fsal,
'clients': clients
}
- export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex)
- return export
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ ret, out, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
+ if ret == 0:
+ return export_mgr._get_export_dict(cluster_id, pseudo)
+ raise NFSException(f"Export creation failed {err}")
@EndpointDoc("Get an NFS-Ganesha export",
parameters={
},
responses={200: EXPORT_SCHEMA})
def get(self, cluster_id, export_id):
+ '''
+ Get export by pseudo path?
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ return export_mgr._get_export_dict(cluster_id, pseudo)
+
+ Get export by id
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ return export_mgr.get_export_by_id(cluster_id, export_id)
+ '''
return mgr.remote('nfs', 'export_get', cluster_id, export_id)
@NfsTask('edit', {'cluster_id': '{cluster_id}', 'export_id': '{export_id}'},
parameters=dict(export_id=(int, "Export ID"),
**CREATE_EXPORT_SCHEMA),
responses={200: EXPORT_SCHEMA})
- def set(self, cluster_id, export_id, path, daemons, pseudo, tag, access_type,
+ def set(self, cluster_id, export_id, path, daemons, pseudo, access_type,
squash, security_label, protocols, transports, fsal, clients,
reload_daemons=True):
- export_id = int(export_id)
-
- if not mgr.remote('nfs', 'export_get', export_id):
- raise cherrypy.HTTPError(404) # pragma: no cover - the handling is too obvious
-
- if fsal['name'] not in mgr.remote('nfs', 'cluster_fsals'):
- raise NFSException("Cannot make modifications to this export. "
- "FSAL '{}' cannot be managed by the dashboard."
- .format(fsal['name']))
fsal.pop('user_id') # mgr/nfs does not let you customize user_id
- # FIXME: what was this? 'tag': tag,
raw_ex = {
'path': path,
'pseudo': pseudo,
'fsal': fsal,
'clients': clients
}
- export = mgr.remote('nfs', 'export_apply', cluster_id, raw_ex)
- return export
+
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ ret, out, err = export_mgr.apply_export(cluster_id, json.dumps(raw_ex))
+ if ret == 0:
+ return export_mgr._get_export_dict(cluster_id, pseudo)
+ raise NFSException(f"Failed to update export: {err}")
@NfsTask('delete', {'cluster_id': '{cluster_id}',
'export_id': '{export_id}'}, 2.0)
True)
})
def delete(self, cluster_id, export_id, reload_daemons=True):
+ '''
+ Delete by pseudo path
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ export_mgr.delete_export(cluster_id, pseudo)
+
+ if deleting by export id
+ export_mgr = mgr.remote('nfs', 'fetch_nfs_export_obj')
+ export = export_mgr.get_export_by_id(cluster_id, export_id)
+ ret, out, err = export_mgr.delete_export(cluster_id=cluster_id, pseudo_path=export['pseudo'])
+ if ret != 0:
+ raise NFSException(err)
+ '''
export_id = int(export_id)
export = mgr.remote('nfs', 'export_get', cluster_id, export_id)
mgr.remote('nfs', 'export_rm', cluster_id, export['pseudo'])
+# FIXME: remove this; dashboard should only care about clusters.
@APIRouter('/nfs-ganesha/daemon', Scope.NFS_GANESHA)
@APIDoc(group="NFS-Ganesha")
class NFSGaneshaService(RESTController):
'desc': (str, 'Status description', True)
}]})
def list(self):
- # FIXME: remove this; dashboard should only care about clusters.
return mgr.remote('nfs', 'daemon_ls')
@Endpoint('GET', '/fsals')
@ReadPermission
def fsals(self):
- return mgr.remote('nfs', 'cluster_fsals')
+ return NFS_GANESHA_SUPPORTED_FSALS
@Endpoint('GET', '/lsdir')
@ReadPermission
@Endpoint('GET', '/clusters')
@ReadPermission
def clusters(self):
+ '''
+ Remove this remote call instead directly use available_cluster() method. It returns list of cluster names: ['vstart']
+ The current dashboard api needs to changed from following to simply list of strings
+ [
+ {
+ 'pool': 'nfs-ganesha',
+ 'namespace': cluster_id,
+ 'type': 'orchestrator',
+ 'daemon_conf': None
+ } for cluster_id in available_clusters()
+ ]
+ As pool, namespace, cluster type and daemon_conf are not required for listing cluster by mgr/nfs module
+ return available_cluster(mgr)
+ '''
return mgr.remote('nfs', 'cluster_ls')
from mgr_module import NFS_POOL_NAME as POOL_NAME
-from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL
+from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL, \
+ NFS_GANESHA_SUPPORTED_FSALS
from .exception import NFSException, NFSInvalidOperation, FSNotFound, \
ClusterNotFound
from .utils import available_clusters, check_fs, restart_nfs_service
except Exception as e:
return exception_handler(e, f"Failed to list exports for {cluster_id}")
+ def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]:
+ export = self._fetch_export(cluster_id, pseudo_path)
+ if export:
+ return export.to_dict()
+ log.warning(f"No {pseudo_path} export to show for {cluster_id}")
+
@export_cluster_checker
def get_export(
self,
pseudo_path: str,
) -> Tuple[int, str, str]:
try:
- export = self._fetch_export(cluster_id, pseudo_path)
- if export:
- return 0, json.dumps(export.to_dict(), indent=2), ''
+ export_dict = self._get_export_dict(cluster_id, pseudo_path)
+ if export_dict:
+ return 0, json.dumps(export_dict, indent=2), ''
log.warning("No %s export to show for %s", pseudo_path, cluster_id)
return 0, '', ''
except Exception as e:
ret, out, err = (0, '', '')
for export in j:
try:
- r, o, e, ex = self._apply_export(cluster_id, export)
+ r, o, e = self._apply_export(cluster_id, export)
except Exception as ex:
- r, o, e, ex = exception_handler(ex, f'Failed to apply export: {ex}')
+ r, o, e = exception_handler(ex, f'Failed to apply export: {ex}')
if r:
ret = r
if o:
err += e + '\n'
return ret, out, err
else:
- r, o, e, ex = self._apply_export(cluster_id, j)
+ r, o, e = self._apply_export(cluster_id, j)
return r, o, e
except NotImplementedError:
return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
fsal = ex_dict.get("fsal", {})
fsal_type = fsal.get("name")
- if fsal_type == 'RGW':
+ if fsal_type == NFS_GANESHA_SUPPORTED_FSALS[1]:
if '/' in path:
raise NFSInvalidOperation('"/" is not allowed in path (bucket name)')
uid = f'nfs.{cluster_id}.{path}'
if "user_id" in fsal and fsal["user_id"] != uid:
raise NFSInvalidOperation(f"export FSAL user_id must be '{uid}'")
- elif fsal_type == 'CEPH':
+ elif fsal_type == NFS_GANESHA_SUPPORTED_FSALS[0]:
fs_name = fsal.get("fs_name")
if not fs_name:
raise NFSInvalidOperation("export FSAL must specify fs_name")
if "user_id" in fsal and fsal["user_id"] != user_id:
raise NFSInvalidOperation(f"export FSAL user_id must be '{user_id}'")
else:
- raise NFSInvalidOperation("export must specify FSAL name of 'CEPH' or 'RGW'")
+ raise NFSInvalidOperation(f"NFS Ganesha supported FSALs are {NFS_GANESHA_SUPPORTED_FSALS}."
+ "Export must specify any one of it.")
ex_dict["fsal"] = fsal
ex_dict["cluster_id"] = cluster_id
"access_type": access_type,
"squash": squash,
"fsal": {
- "name": "CEPH",
+ "name": NFS_GANESHA_SUPPORTED_FSALS[0],
"fs_name": fs_name,
},
"clients": clients,
"path": bucket,
"access_type": access_type,
"squash": squash,
- "fsal": {"name": "RGW"},
+ "fsal": {"name": NFS_GANESHA_SUPPORTED_FSALS[1]},
"clients": clients,
}
)
self,
cluster_id: str,
new_export_dict: Dict,
- ) -> Tuple[int, str, str, Export]:
+ ) -> Tuple[int, str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
if not old_export:
self._create_export_user(new_export)
self._save_export(cluster_id, new_export)
- return 0, f'Added export {new_export.pseudo}', '', new_export
+ return 0, f'Added export {new_export.pseudo}', ''
if old_export.fsal.name != new_export.fsal.name:
raise NFSInvalidOperation('FSAL change not allowed')
log.debug('export %s pseudo %s -> %s',
new_export.export_id, old_export.pseudo, new_export.pseudo)
- if old_export.fsal.name == 'CEPH':
+ if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
old_fsal = cast(CephFSFSAL, old_export.fsal)
new_fsal = cast(CephFSFSAL, new_export.fsal)
if old_fsal.user_id != new_fsal.user_id:
new_fsal.cephx_key = old_fsal.cephx_key
else:
new_fsal.cephx_key = old_fsal.cephx_key
- if old_export.fsal.name == 'RGW':
+ if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
new_rgw_fsal = cast(RGWFSAL, new_export.fsal)
if old_rgw_fsal.user_id != new_rgw_fsal.user_id:
# TODO: detect whether the update is such that a reload is sufficient
restart_nfs_service(self.mgr, new_export.cluster_id)
- return 0, f"Updated export {new_export.pseudo}", "", new_export
+ return 0, f"Updated export {new_export.pseudo}", ""