return ns
+class AppliedExportResults:
+ """Gathers the results of multiple changed exports.
+ Returned by apply_export.
+ """
+
+ def __init__(self) -> None:
+ self.changes: List[Dict[str, str]] = []
+ self.has_error = False
+
+ def append(self, value: Dict[str, str]) -> None:
+ if value.get("state", "") == "error":
+ self.has_error = True
+ self.changes.append(value)
+
+ def to_simplified(self) -> List[Dict[str, str]]:
+ return self.changes
+
+ def mgr_return_value(self) -> int:
+ return -errno.EIO if self.has_error else 0
+
+
class ExportMgr:
def __init__(
self,
export = self._fetch_export(cluster_id, pseudo_path)
return export.to_dict() if export else None
- def apply_export(self, cluster_id: str, export_config: str) -> Tuple[int, str, str]:
+ def apply_export(self, cluster_id: str, export_config: str) -> AppliedExportResults:
try:
- if not export_config:
- raise NFSInvalidOperation("Empty Config!!")
+ exports = self._read_export_config(cluster_id, export_config)
+ except Exception as e:
+ log.exception(f'Failed to update export: {e}')
+ raise ErrorResponse.wrap(e)
+
+ aeresults = AppliedExportResults()
+ for export in exports:
+ aeresults.append(self._change_export(cluster_id, export))
+ return aeresults
+
+ def _read_export_config(self, cluster_id: str, export_config: str) -> List[Dict]:
+ if not export_config:
+ raise NFSInvalidOperation("Empty Config!!")
+ try:
+ j = json.loads(export_config)
+ except ValueError:
+ # okay, not JSON. is it an EXPORT block?
try:
- j = json.loads(export_config)
- except ValueError:
- # okay, not JSON. is it an EXPORT block?
- try:
- blocks = GaneshaConfParser(export_config).parse()
- exports = [
- Export.from_export_block(block, cluster_id)
- for block in blocks
- ]
- j = [export.to_dict() for export in exports]
- except Exception as ex:
- raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
-
- # check export type
- if isinstance(j, list):
- ret, out, err = (0, '', '')
- for export in j:
- try:
- r, o, e = self._apply_export(cluster_id, export)
- except Exception as ex:
- r, o, e = exception_handler(ex, f'Failed to apply export: {ex}')
- if r:
- ret = r
- if o:
- out += o + '\n'
- if e:
- err += e + '\n'
- return ret, out, err
- else:
- r, o, e = self._apply_export(cluster_id, j)
- return r, o, e
+ blocks = GaneshaConfParser(export_config).parse()
+ exports = [
+ Export.from_export_block(block, cluster_id)
+ for block in blocks
+ ]
+ j = [export.to_dict() for export in exports]
+ except Exception as ex:
+ raise NFSInvalidOperation(f"Input must be JSON or a ganesha EXPORT block: {ex}")
+ # check export type - always return a list
+ if isinstance(j, list):
+ return j # j is already a list object
+ return [j] # return a single object list, with j as the only item
+
+ def _change_export(self, cluster_id: str, export: Dict) -> Dict[str, str]:
+ try:
+ return self._apply_export(cluster_id, export)
except NotImplementedError:
- return 0, " Manual Restart of NFS PODS required for successful update of exports", ""
- except Exception as e:
- return exception_handler(e, f'Failed to update export: {e}')
+ # in theory, the NotImplementedError here may be raised by a hook back to
+ # an orchestration module. If the orchestration module supports it the NFS
+ # servers may be restarted. If not supported the expectation is that an
+ # (unfortunately generic) NotImplementedError will be raised. We then
+ # indicate to the user that manual intervention may be needed now that the
+ # configuration changes have been applied.
+ return {
+ "pseudo": export['pseudo'],
+ "state": "warning",
+ "msg": "changes applied (Manual restart of NFS Pods required)",
+ }
+ except Exception as ex:
+ msg = f'Failed to apply export: {ex}'
+ log.exception(msg)
+ return {"state": "error", "msg": msg}
def _update_user_id(
self,
self,
cluster_id: str,
new_export_dict: Dict,
- ) -> Tuple[int, str, str]:
+ ) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
if not old_export:
self._create_export_user(new_export)
self._save_export(cluster_id, new_export)
- return 0, f'Added export {new_export.pseudo}', ''
+ return {"pseudo": new_export.pseudo, "state": "added"}
need_nfs_service_restart = True
if old_export.fsal.name != new_export.fsal.name:
self._update_export(cluster_id, new_export, need_nfs_service_restart)
- return 0, f"Updated export {new_export.pseudo}", ""
+ return {"pseudo": new_export.pseudo, "state": "updated"}
def _rados(self, cluster_id: str) -> NFSRados:
"""Return a new NFSRados object for the given cluster id."""
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 2
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
export = conf._fetch_export(self.cluster_id, '/rgw/bucket')
assert export.export_id == 2
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
# no sectype was given, key not present
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
'secret_access_key': 'the_secret_key',
}
}))
- assert r[0] == 0
+ assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/rgw/bucket")
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, self.export_3)
- assert r[0] == 0
+ assert len(r.changes) == 1
def test_update_export_with_ganesha_conf_sectype(self):
self._do_mock_test(
nfs_mod = Module('nfs', '', '')
conf = ExportMgr(nfs_mod)
r = conf.apply_export(self.cluster_id, export_conf)
- assert r[0] == 0
+ assert len(r.changes) == 1
# assert sectype matches new value(s)
info = conf._get_export_dict(self.cluster_id, "/secure1")
}
},
]))
- assert r[0] == 0
+ # The input object above contains TWO items (two different pseudo paths)
+ # therefore we expect the result to report that two changes have been
+ # applied, rather than the typical 1 change.
+ assert len(r.changes) == 2
export = conf._fetch_export('foo', '/rgw/bucket')
assert export.export_id == 3