from rados import TimedOut, ObjectNotFound
from .export_utils import GaneshaConfParser, Export, RawBlock, CephFSFSAL, RGWFSAL
-from .exception import NFSException, NFSInvalidOperation, NFSObjectNotFound, FSNotFound, \
+from .exception import NFSException, NFSInvalidOperation, FSNotFound, \
ClusterNotFound
from .utils import POOL_NAME, available_clusters, check_fs, restart_nfs_service
except Exception as e:
return exception_handler(e, f"Failed to get {pseudo_path} export for {cluster_id}")
- def update_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
- try:
- new_export = json.loads(export_config)
- # check export type
- return FSExport(self).update_export_1(cluster_id, new_export, False)
- except NotImplementedError:
- return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
- except Exception as e:
- return exception_handler(e, f'Failed to update export: {e}')
-
- def import_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
+ def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
try:
if not export_config:
raise NFSInvalidOperation("Empty Config!!")
new_export = json.loads(export_config)
# check export type
- return FSExport(self).update_export_1(cluster_id, new_export, True)
+ return FSExport(self).update_export_1(cluster_id, new_export)
except NotImplementedError:
return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
except Exception as e:
- return exception_handler(e, f'Failed to import export: {e}')
+ return exception_handler(e, f'Failed to update export: {e}')
class FSExport(ExportMgr):
ret, out, err = self._exec(['radosgw-admin', 'user', 'create', '--uid', uid,
'--display-name', uid])
if ret:
- raise NFSException(f'Failed to create user {uid}')
+ raise NFSException(-1, f'Failed to create user {uid}')
j = json.loads(out)
# FIXME: make this more tolerate of unexpected output?
access_key = j['keys'][0]['access_key']
self,
cluster_id: str,
new_export: Dict,
- can_create: bool
) -> Tuple[int, str, str]:
for k in ['cluster_id', 'path', 'pseudo']:
if k not in new_export:
new_export['pseudo'])
if old_export:
# Check if export id matches
- if old_export.export_id != new_export.get('export_id'):
- raise NFSInvalidOperation('Export ID changed, Cannot update export')
+ if new_export.get('export_id'):
+ if old_export.export_id != new_export.get('export_id'):
+ raise NFSInvalidOperation('Export ID changed, Cannot update export')
+ else:
+ new_export['export_id'] = old_export.export_id
elif new_export.get('export_id'):
old_export = self._fetch_export_obj(cluster_id, new_export['export_id'])
if old_export:
old_export = self._fetch_export(cluster_id, old_export.pseudo)
self.mgr.log.debug(f"export {old_export.export_id} pseudo {old_export.pseudo} -> {new_export_dict['pseudo']}")
- if not old_export and not can_create:
- raise NFSObjectNotFound('Export does not exist')
-
new_export = Export.from_dict(
new_export.get('export_id', self._gen_export_id(cluster_id)),
new_export
read_only=readonly, squash='none',
addr=addr)
- @CLICommand('nfs export import', perm='rw')
- def _cmd_nfs_export_import(self,
- cluster_id: str,
- inbuf: str) -> Tuple[int, str, str]:
- """Create one or more exports from JSON specification"""
- return self.export_mgr.import_export(cluster_id, inbuf)
-
@CLICommand('nfs export rm', perm='rw')
def _cmd_nfs_export_rm(self, cluster_id: str, binding: str) -> Tuple[int, str, str]:
"""Remove a cephfs export"""
"""Fetch a export of a NFS cluster given the pseudo path/binding"""
return self.export_mgr.get_export(cluster_id=cluster_id, pseudo_path=binding)
- @CLICommand('nfs export update', perm='rw')
- @CLICheckNonemptyFileInput(desc='CephFS Export configuration')
- def _cmd_nfs_export_update(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]:
- """Update an export of a NFS cluster by `-i <json_file>`"""
- # The export <json_file> is passed to -i and it's processing is handled by the Ceph CLI.
- return self.export_mgr.update_export(cluster_id, export_config=inbuf)
+ @CLICommand('nfs export apply', perm='rw')
+ @CLICheckNonemptyFileInput(desc='Export JSON specification')
+ def _cmd_nfs_export_apply(self, cluster_id: str, inbuf: str) -> Tuple[int, str, str]:
+ """Create or update an export by `-i <json_file>`"""
+ # The export <json_file> is passed to -i and it's processing
+ # is handled by the Ceph CLI.
+ return self.export_mgr.apply_export(cluster_id, export_config=inbuf)
@CLICommand('nfs cluster create', perm='rw')
def _cmd_nfs_cluster_create(self,
def _do_test_update_export(self, cluster_id, expected_exports):
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
- r = conf.update_export(cluster_id, json.dumps({
+ r = conf.apply_export(cluster_id, json.dumps({
'export_id': 2,
'daemons': expected_exports[2],
'path': 'bucket',
assert export.clients[0].access_type is None
assert export.cluster_id == cluster_id
+ # do it again, with changes
+ r = conf.apply_export(cluster_id, json.dumps({
+ 'export_id': 2,
+ 'daemons': expected_exports[2],
+ 'path': 'newbucket',
+ 'pseudo': '/rgw/bucket',
+ 'cluster_id': cluster_id,
+ 'tag': 'bucket_tag',
+ 'access_type': 'RO',
+ 'squash': 'root',
+ 'security_label': False,
+ 'protocols': [4],
+ 'transports': ['TCP'],
+ 'clients': [{
+ 'addresses': ["192.168.10.0/16"],
+ 'access_type': None,
+ 'squash': None
+ }],
+ 'fsal': {
+ 'name': 'RGW',
+ 'user_id': 'testuser',
+ 'access_key_id': 'access_key',
+ 'secret_access_key': 'secret_key',
+ }
+ }))
+ assert r[0] == 0
+
+ export = conf._fetch_export('foo', '/rgw/bucket')
+ assert export.export_id == 2
+ assert export.path == "newbucket"
+ assert export.pseudo == "/rgw/bucket"
+ assert export.access_type == "RO"
+ assert export.squash == "root"
+ assert export.protocols == [4]
+ assert export.transports == ["TCP"]
+ assert export.fsal.name == "RGW"
+ assert export.fsal.user_id == "testuser"
+ assert export.fsal.access_key_id == "access_key"
+ assert export.fsal.secret_access_key == "secret_key"
+ assert len(export.clients) == 1
+ assert export.clients[0].squash is None
+ assert export.clients[0].access_type is None
+ assert export.cluster_id == cluster_id
+
+ # again, but without export_id
+ r = conf.apply_export(cluster_id, json.dumps({
+ 'daemons': expected_exports[2],
+ 'path': 'newestbucket',
+ 'pseudo': '/rgw/bucket',
+ 'cluster_id': cluster_id,
+ 'tag': 'bucket_tag',
+ 'access_type': 'RW',
+ 'squash': 'root',
+ 'security_label': False,
+ 'protocols': [4],
+ 'transports': ['TCP'],
+ 'clients': [{
+ 'addresses': ["192.168.10.0/16"],
+ 'access_type': None,
+ 'squash': None
+ }],
+ 'fsal': {
+ 'name': 'RGW',
+ 'user_id': 'testuser',
+ 'access_key_id': 'access_key',
+ 'secret_access_key': 'secret_key',
+ }
+ }))
+ assert r[0] == 0
+
+ export = conf._fetch_export('foo', '/rgw/bucket')
+ assert export.export_id == 2
+ assert export.path == "newestbucket"
+ assert export.pseudo == "/rgw/bucket"
+ assert export.access_type == "RW"
+ assert export.squash == "root"
+ assert export.protocols == [4]
+ assert export.transports == ["TCP"]
+ assert export.fsal.name == "RGW"
+ assert export.fsal.user_id == "testuser"
+ assert export.fsal.access_key_id == "access_key"
+ assert export.fsal.secret_access_key == "secret_key"
+ assert len(export.clients) == 1
+ assert export.clients[0].squash is None
+ assert export.clients[0].access_type is None
+ assert export.cluster_id == cluster_id
+
def test_remove_export(self) -> None:
with self._mock_orchestrator(True), mock.patch('nfs.module.ExportMgr._exec'):
for cluster_id, info in self.clusters.items():