GaneshaConfParser,
RGWFSAL,
RawBlock,
+ CephBlock,
+ NFSV4Block,
format_block)
from .exception import NFSException, NFSInvalidOperation, FSNotFound, NFSObjectNotFound
from .utils import (
" to be created/updated"
return self.status
+# what this module understands and is able to parse
+# NOTE: only non-nested blocks here.
+GANESHA_CONF_OPTIONAL_BLOCKS = ["ceph", "nfsv4"]
+GANESHA_CONF_VALID_BLOCKS = GANESHA_CONF_OPTIONAL_BLOCKS.append("export")
+
+class GaneshaExport:
+ def __init__(self,
+ export: Export,
+ **optional_blocks):
+ self.export = export
+ self.optional_blocks = optional_blocks
+
+ # frequently uesd properties so that much of the code that now
+ # has moved to using this class can still continue to acess via
+ # export.{path,pseudo,...}.
+ @property
+ def path(self):
+ return self.export.path
+
+ @property
+ def pseudo(self):
+ return self.export.pseudo
+
+ @property
+ def export_id(self):
+ return self.export.export_id
+
+ @property
+ def cluster_id(self):
+ return self.export.cluster_id
+
+ @property
+ def fsal(self):
+ return self.export.fsal
+
+ @property
+ def delegations(self):
+ return self.export.delegations
+
+ def to_dict(self, full=False) -> Dict[str, Any]:
+ export_dict = self.export.to_dict()
+ if not full or not self.optional_blocks:
+ return export_dict
+ ge_dict = {'export': export_dict}
+ for key, block in self.optional_blocks.items():
+ assert key in GANESHA_CONF_OPTIONAL_BLOCKS
+ ge_dict[key] = block.to_dict()
+ return ge_dict
+
+ def to_export_block(self):
+ block_str = format_block(self.export.to_export_block())
+ for key, block in self.optional_blocks.items():
+ assert key in GANESHA_CONF_OPTIONAL_BLOCKS
+ if key == "ceph":
+ block_str += format_block(block.to_ceph_block())
+ if key == "nfsv4":
+ block_str += format_block(block.to_nfsv4_block())
+ return block_str
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, GaneshaExport):
+ return False
+ return self.to_dict(full=true) == other.to_dict(full=true)
class ExportMgr:
def __init__(
self,
mgr: 'Module',
- export_ls: Optional[Dict[str, List[Export]]] = None
+ export_ls: Optional[Dict[str, List[GaneshaExport]]] = None
) -> None:
self.mgr = mgr
self.rados_pool = POOL_NAME
- self._exports: Optional[Dict[str, List[Export]]] = export_ls
+ self._exports: Optional[Dict[str, List[GaneshaExport]]] = export_ls
@property
- def exports(self) -> Dict[str, List[Export]]:
+ def exports(self) -> Dict[str, List[GaneshaExport]]:
if self._exports is None:
self._exports = {}
log.info("Begin export parsing")
for cluster_id in known_cluster_ids(self.mgr):
- self.export_conf_objs = [] # type: List[Export]
+ self.export_conf_objs = [] # type: List[GaneshaExport]
self._read_raw_config(cluster_id)
self._exports[cluster_id] = self.export_conf_objs
log.info("Exports parsed successfully %s", self.exports.items())
self,
cluster_id: str,
pseudo_path: str
- ) -> Optional[Export]:
+ ) -> Optional[GaneshaExport]:
try:
for ex in self.exports[cluster_id]:
if ex.pseudo == pseudo_path:
self,
cluster_id: str,
export_id: int
- ) -> Optional[Export]:
+ ) -> Optional[GaneshaExport]:
try:
for ex in self.exports[cluster_id]:
if ex.export_id == export_id:
log.info(f'no exports for cluster {cluster_id}')
return None
- def _delete_export_user(self, export: Export) -> None:
- if isinstance(export.fsal, CephFSFSAL):
- assert export.fsal.user_id
+ def _delete_export_user(self, ganesha_export: GaneshaExport) -> None:
+ if isinstance(ganesha_export.fsal, CephFSFSAL):
+ assert ganesha_export.fsal.user_id
self.mgr.check_mon_command({
'prefix': 'auth rm',
- 'entity': 'client.{}'.format(export.fsal.user_id),
+ 'entity': 'client.{}'.format(ganesha_export.fsal.user_id),
})
log.info("Deleted export user %s", export.fsal.user_id)
- elif isinstance(export.fsal, RGWFSAL):
+ elif isinstance(ganesha_export.fsal, RGWFSAL):
# do nothing; we're using the bucket owner creds.
pass
- def _create_rgw_export_user(self, export: Export) -> None:
- rgwfsal = cast(RGWFSAL, export.fsal)
+ def _create_rgw_export_user(self, ganesha_export: GaneshaExport) -> None:
+ rgwfsal = cast(RGWFSAL, ganesha_export.fsal)
if not rgwfsal.user_id:
- assert export.path
+ assert ganesha_export.path
ret, out, err = self.mgr.tool_exec(
- ['radosgw-admin', 'bucket', 'stats', '--bucket', export.path]
+ ['radosgw-admin', 'bucket', 'stats', '--bucket', ganesha_export.path]
)
if ret:
- raise NFSException(f'Failed to fetch owner for bucket {export.path}')
+ raise NFSException(f'Failed to fetch owner for bucket {ganesha_export.path}')
j = json.loads(out)
owner = j.get('owner', '')
rgwfsal.user_id = owner
])
if ret:
raise NFSException(
- f'Failed to fetch key for bucket {export.path} owner {rgwfsal.user_id}'
+ f'Failed to fetch key for bucket {ganesha_export.path} owner {rgwfsal.user_id}'
)
j = json.loads(out)
# FIXME: make this more tolerate of unexpected output?
rgwfsal.access_key_id = j['keys'][0]['access_key']
rgwfsal.secret_access_key = j['keys'][0]['secret_key']
- log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, export.path)
+ log.debug("Successfully fetched user %s for RGW path %s", rgwfsal.user_id, ganesha_export.path)
- def _ensure_cephfs_export_user(self, export: Export) -> None:
- fsal = cast(CephFSFSAL, export.fsal)
+ def _ensure_cephfs_export_user(self, ganesha_export: GaneshaExport) -> None:
+ fsal = cast(CephFSFSAL, ganesha_export.fsal)
assert fsal.fs_name
assert fsal.cmount_path
- fsal.user_id = f"nfs.{get_user_id(export.cluster_id, fsal.fs_name, fsal.cmount_path)}"
+ fsal.user_id = f"nfs.{get_user_id(ganesha_export.cluster_id, fsal.fs_name, fsal.cmount_path)}"
fsal.cephx_key = self._create_user_key(
- export.cluster_id, fsal.user_id, fsal.cmount_path, fsal.fs_name
+ ganesha_export.cluster_id, fsal.user_id, fsal.cmount_path, fsal.fs_name
)
log.debug(f"Established user {fsal.user_id} for cephfs {fsal.fs_name}")
break
return nid
+ def prepare_opt_block_from_raw_config(self, raw_config_parsed: Dict) -> Dict[str,Any]:
+ opt_blocks = {}
+ for block_name in GANESHA_CONF_OPTIONAL_BLOCKS:
+ _block = raw_config_parsed.get(block_name.upper(), None)
+ if _block:
+ if block_name == "ceph":
+ opt_blocks[block_name] = CephBlock.from_ceph_block(block)
+ elif block_name == "nfsv4":
+ opt_blocks[block_name] = NFSV4Block.from_nfsv4_block(block)
+ return opt_blocks
+
+ def prepare_opt_block_from_dict(self, opt_dict: Dict) -> Dict[str,Any]:
+ opt_blocks = {}
+ for block_name in GANESHA_CONF_OPTIONAL_BLOCKS:
+ dct = opt_dict.get(block_name, None)
+ if dct:
+ if block_name == "ceph":
+ opt_blocks[block_name] = CephBlock.from_dict(dct)
+ elif block_name == "nfsv4":
+ opt_blocks[block_name] = NFSV4Block.from_dict(dct)
+ return opt_blocks
+
def _read_raw_config(self, rados_namespace: str) -> None:
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
ioctx.set_namespace(rados_namespace)
log.debug("read export configuration from rados "
"object %s/%s/%s", self.rados_pool,
rados_namespace, obj.key)
- self.export_conf_objs.append(Export.from_export_block(
- GaneshaConfParser(raw_config).parse()[0], rados_namespace))
-
- def _save_export(self, cluster_id: str, export: Export) -> None:
- self.exports[cluster_id].append(export)
+ log.debug(f'raw_config: {raw_config}')
+ raw_config_parsed = GaneshaConfParser(raw_config).parse()
+ log.debug(f'raw_config_parsed: {raw_config_parsed}')
+ # mandatory export block
+ export_block = raw_config_parsed['EXPORT']
+ # optional blocks
+ opt_blocks = self.prepare_opt_block_from_raw_config(raw_config_parsed)
+ self.export_conf_objs.append(
+ GaneshaExport(Export.from_export_block(export_block, rados_namespace),
+ **opt_blocks))
+
+ def _save_export(self, cluster_id: str, ganesha_export: GaneshaExport) -> None:
+ log.debug('in _save_export')
+ self.exports[cluster_id].append(ganesha_export)
+ block_str = ganesha_export.to_export_block()
+ log.debug(f'_save_export block_str: {block_str}')
self._rados(cluster_id).write_obj(
- format_block(export.to_export_block()),
- export_obj_name(export.export_id),
- conf_obj_name(export.cluster_id)
+ block_str,
+ export_obj_name(ganesha_export.export_id),
+ conf_obj_name(ganesha_export.cluster_id)
)
def _delete_export(
self,
cluster_id: str,
pseudo_path: Optional[str],
- export_obj: Optional[Export] = None
+ ganesha_export_obj: Optional[GaneshaExport] = None
) -> None:
try:
- if export_obj:
- export: Optional[Export] = export_obj
+ if ganesha_export_obj:
+ ganesha_export: Optional[GaneshaExport] = ganesha_export_obj
else:
assert pseudo_path
- export = self._fetch_export(cluster_id, pseudo_path)
+ ganesha_export = self._fetch_export(cluster_id, pseudo_path)
- if export:
+ if ganesha_export:
exports_count = 0
- if export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
- exports_count = self.get_export_count_with_same_fsal(export.fsal.cmount_path, # type: ignore
- cluster_id, export.fsal.fs_name) # type: ignore
+ if ganesha_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
+ exports_count = self.get_export_count_with_same_fsal(
+ ganesha_export.fsal.cmount_path, # type: ignore
+ cluster_id, ganesha_export.fsal.fs_name) # type: ignore
if exports_count == 1:
- self._delete_export_user(export)
+ self._delete_export_user(ganesha_export)
if pseudo_path:
self._rados(cluster_id).remove_obj(
- export_obj_name(export.export_id), conf_obj_name(cluster_id))
- self.exports[cluster_id].remove(export)
- if export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
- self._delete_export_user(export)
+ export_obj_name(ganesha_export.export_id), conf_obj_name(cluster_id))
+ self.exports[cluster_id].remove(ganesha_export)
+ if ganesha_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
+ self._delete_export_user(ganehsa_export)
if not self.exports[cluster_id]:
del self.exports[cluster_id]
log.debug("Deleted all exports for cluster %s", cluster_id)
try:
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
ioctx.set_namespace(cluster_id)
- export = Export.from_export_block(
- GaneshaConfParser(
- ioctx.read(export_obj_name(ex_id)).decode("utf-8")
- ).parse()[0],
- cluster_id
- )
- return export
+ raw_config = ioctx.read(export_obj_name(ex_id)).decode("utf-8")
+ log.debug(f'raw_config: {raw_config}')
+ raw_config_parsed = GaneshaConfParser(raw_config).parse()
+ log.debug(f'raw_config_parsed: {raw_config_parsed}')
+ # mandatory export block
+ export_block = raw_config_parsed['EXPORT']
+ # optional blocks
+ opt_blocks = self.prepare_opt_block_from_raw_config(raw_config_parsed)
+ ganesha_export = GaneshaExport(Export.from_export_block(export_block, rados_namespace),
+ **opt_block)
+ log.debug(f'export: {ganesha_export}')
+ return ganesha_export
except ObjectNotFound:
log.exception("Export ID: %s not found", ex_id)
return None
- def _update_export(self, cluster_id: str, export: Export,
+ def _update_export(self, cluster_id: str, ganesha_export: GaneshaExport,
need_nfs_service_restart: bool) -> None:
- self.exports[cluster_id].append(export)
+ log.debug(f'in _update_export: service restart: {need_nfs_service_restart}')
+ self.exports[cluster_id].append(ganesha_export)
+ block_str = ganesha_export.to_export_block()
+ log.debug(f'_update_export block_str: {block_str}')
self._rados(cluster_id).update_obj(
- format_block(export.to_export_block()),
- export_obj_name(export.export_id), conf_obj_name(export.cluster_id),
+ block_str,
+ export_obj_name(ganesha_export.export_id), conf_obj_name(ganesha_export.cluster_id),
should_notify=not need_nfs_service_restart)
if need_nfs_service_restart:
- restart_nfs_service(self.mgr, export.cluster_id)
+ restart_nfs_service(self.mgr, ganesha_export.cluster_id)
def _validate_cluster_id(self, cluster_id: str) -> None:
"""Raise an exception if cluster_id is not valid."""
def delete_all_exports(self, cluster_id: str) -> None:
try:
- export_list = list(self.exports[cluster_id])
+ ganesha_export_list = list(self.exports[cluster_id])
except KeyError:
log.info("No exports to delete")
return
- for export in export_list:
+ for ganesha_export in ganesha_export_list:
try:
self._delete_export(cluster_id=cluster_id, pseudo_path=None,
- export_obj=export)
+ ganesha_export_obj=ganesha_export)
except Exception as e:
- raise NFSException(f"Failed to delete export {export.export_id}: {e}")
+ raise NFSException(f"Failed to delete export {ganesha_export.export_id}: {e}")
log.info("All exports successfully deleted for cluster id: %s", cluster_id)
def list_all_exports(self) -> List[Dict[str, Any]]:
r = []
for cluster_id, ls in self.exports.items():
- r.extend([e.to_dict() for e in ls])
+ r.extend([ge.to_dict() for ge in ls])
return r
def list_exports(self,
self._validate_cluster_id(cluster_id)
try:
if detailed:
- result_d = [export.to_dict() for export in self.exports[cluster_id]]
+ result_d = [ganesha_export.to_dict() for ganesha_export in self.exports[cluster_id]]
return result_d
else:
- result_ps = [export.pseudo for export in self.exports[cluster_id]]
+ result_ps = [ganesha_export.pseudo for ganesha_export in self.exports[cluster_id]]
return result_ps
except KeyError:
raise ErrorResponse.wrap(e)
def _get_export_dict(self, cluster_id: str, pseudo_path: str) -> Optional[Dict[str, Any]]:
- export = self._fetch_export(cluster_id, pseudo_path)
- if export:
- return export.to_dict()
+ ganesha_export = self._fetch_export(cluster_id, pseudo_path)
+ if ganesha_export:
+ return ganesha_export.to_dict(full=True)
log.warning(f"No {pseudo_path} export to show for {cluster_id}")
return None
cluster_id: str,
export_id: int
) -> Optional[Dict[str, Any]]:
- export = self._fetch_export_id(cluster_id, export_id)
- return export.to_dict() if export else None
+ ganesha_export = self._fetch_export_id(cluster_id, export_id)
+ return ganesha_export.to_dict() if ganesha_export else None
def get_export_by_pseudo(
self,
cluster_id: str,
pseudo_path: str
) -> Optional[Dict[str, Any]]:
- export = self._fetch_export(cluster_id, pseudo_path)
- return export.to_dict() if export else None
+ ganesha_export = self._fetch_export(cluster_id, pseudo_path)
+ return ganesha_export.to_dict() if ganesha_export else None
# This method is used by the dashboard module (../dashboard/controllers/nfs.py)
# Do not change interface without updating the Dashboard code
j = json.loads(export_config)
except ValueError:
# okay, not JSON. is it an EXPORT block?
+ # including CEPH block when passing an EXPORT block
+ # is not currently supported (use export json for that).
+ # TODO: add this support.
try:
blocks = GaneshaConfParser(export_config).parse()
exports = [
def _change_export(self, cluster_id: str, export: Dict,
earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Dict[str, Any]:
+ # if the export json has a "export" key, extract it and treat the rest of the
+ # dict as optional (blocks), otherwise, the dict is a export without optional
+ # blocks (backward compat).
+ msg = f'export: {export}'
+ log.debug(msg)
+
+ opt_dict = {}
+ if "export" in export.keys():
+ export_dict = export.pop("export")
+ opt_dict = export
+ else:
+ export_dict = export
+
+ msg = f'export_dict: {export_dict}'
+ log.debug(msg)
+ msg = f'opt_dict: {opt_dict}'
+ log.debug(msg)
try:
- return self._apply_export(cluster_id, export, earmark_resolver)
- except NotImplementedError:
+ return self._apply_export(cluster_id, export_dict, earmark_resolver, opt_dict)
+ except NotImplementedError as e:
# in theory, the NotImplementedError here may be raised by a hook back to
# an orchestration module. If the orchestration module supports it the NFS
# servers may be restarted. If not supported the expectation is that an
# indicate to the user that manual intervention may be needed now that the
# configuration changes have been applied.
return {
- "pseudo": export['pseudo'],
+ "pseudo": export_dict['pseudo'],
"state": "warning",
"msg": "changes applied (Manual restart of NFS Pods required)",
}
msg = f'Failed to apply export: {ex}'
log.exception(msg)
return {"state": "error", "msg": msg, "exception": ex,
- "pseudo": export['pseudo']}
+ "pseudo": export_dict['pseudo']}
def _update_user_id(
self,
cluster_id: str,
ex_id: int,
ex_dict: Dict[str, Any],
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
- ) -> Export:
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Export:
pseudo_path = ex_dict.get("pseudo")
if not pseudo_path:
raise NFSInvalidOperation("export must specify pseudo path")
clients: list = [],
sectype: Optional[List[str]] = None,
cmount_path: Optional[str] = "/",
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None,
+ delegations: Optional[str] = "none"
) -> Dict[str, Any]:
validate_cephfs_path(self.mgr, fs_name, path)
},
"clients": clients,
"sectype": sectype,
+ "delegations": delegations
},
earmark_resolver
)
sectype: Optional[List[str]] = None) -> Dict[str, Any]:
pseudo_path = normalize_path(pseudo_path)
- if not bucket and not user_id:
- raise ErrorResponse("Must specify either bucket or user_id")
+
if not self._fetch_export(cluster_id, pseudo_path):
export = self.create_export_from_dict(
self,
cluster_id: str,
new_export_dict: Dict,
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
- ) -> Dict[str, str]:
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None,
+ opt_dict: Optional[Dict] = {}) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
new_export_dict,
earmark_resolver
)
+ log.debug(f'opt_dict: {opt_dict}')
+ opt_blocks = self.prepare_opt_block_from_dict(opt_dict)
+ log.debug(f'opt_blocks: {opt_blocks}')
+ # use @ganesha_export in place of @new_export here onwards
+ ganesha_export = GaneshaExport(new_export, **opt_blocks)
if not old_export:
if new_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: # only for RGW
self._create_rgw_export_user(new_export)
- self._save_export(cluster_id, new_export)
+ self._save_export(cluster_id, ganesha_export)
return {"pseudo": new_export.pseudo, "state": "added"}
need_nfs_service_restart = True
- if old_export.fsal.name != new_export.fsal.name:
+ if old_export.fsal.name != ganesha_export.fsal.name:
raise NFSInvalidOperation('FSAL change not allowed')
- if old_export.pseudo != new_export.pseudo:
+ if old_export.pseudo != ganesha_export.pseudo:
log.debug('export %s pseudo %s -> %s',
- new_export.export_id, old_export.pseudo, new_export.pseudo)
+ ganesha_export.export_id, old_export.pseudo, ganesha_export.pseudo)
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[0]:
old_fsal = cast(CephFSFSAL, old_export.fsal)
- new_fsal = cast(CephFSFSAL, new_export.fsal)
- self._ensure_cephfs_export_user(new_export)
+ new_fsal = cast(CephFSFSAL, ganesha_export.fsal)
+ self._ensure_cephfs_export_user(ganesha_export)
need_nfs_service_restart = not (old_fsal.user_id == new_fsal.user_id
and old_fsal.fs_name == new_fsal.fs_name
and old_export.path == new_export.path
- and old_export.pseudo == new_export.pseudo)
+ and old_export.pseudo == new_export.pseudo
+ and old_export.optional_blocks == ganesha_export.optional_blocks
+ and old_export.delegations == ganesha_export.delegations)
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
elif old_rgw_fsal.secret_access_key != new_rgw_fsal.secret_access_key:
raise NFSInvalidOperation('secret_access_key change is not allowed')
+
self.exports[cluster_id].remove(old_export)
- self._update_export(cluster_id, new_export, need_nfs_service_restart)
+ self._update_export(cluster_id, ganesha_export, need_nfs_service_restart)
return {"pseudo": new_export.pseudo, "state": "updated"}
value = self.stream()[:idx]
self.pos += idx + 1
block_dict = RawBlock('%url', values={'value': value})
- return block_dict
+ return ('%url', block_dict)
- block_dict = RawBlock(self.parse_block_name().upper())
+ block_name = self.parse_block_name().upper()
+ block_dict = RawBlock(block_name)
self.parse_block_body(block_dict)
if self.stream()[0] != '}':
raise Exception("No closing bracket '}' found at the end of block")
self.pos += 1
- return block_dict
+ return (block_name, block_dict)
def parse_parameter_value(self, raw_value: str) -> Any:
if raw_value.find(',') != -1:
self.parse_stanza(block_dict)
elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket)
or (not is_semicolon)):
- block_dict.blocks.append(self.parse_block_or_section())
+ block_dict.blocks.append(self.parse_block_or_section()[1])
else:
raise Exception("Malformed stanza: no semicolon found.")
raise Exception("Infinite loop while parsing block content")
def parse(self) -> List[RawBlock]:
- blocks = []
+ blocks = {}
while self.stream():
- blocks.append(self.parse_block_or_section())
+ (block_name, block) = self.parse_block_or_section()
+ blocks[block_name] = block
return blocks
def __init__(self,
addresses: List[str],
access_type: str,
- squash: str):
+ squash: str,
+ delegations: str):
self.addresses = addresses
self.access_type = access_type
self.squash = squash
+ self.delegations: delegations
@classmethod
def from_client_block(cls, client_block: RawBlock) -> 'Client':
addresses = [addresses]
return cls(addresses,
client_block.values.get('access_type', None),
- client_block.values.get('squash', None))
+ client_block.values.get('squash', None),
+ client_block.values.get('delegations', None))
def to_client_block(self) -> RawBlock:
result = RawBlock('CLIENT', values={'clients': self.addresses})
result.values['access_type'] = self.access_type
if self.squash:
result.values['squash'] = self.squash
+ if self.delegations:
+ result.values['delegations'] = self.delegations
return result
@classmethod
def from_dict(cls, client_dict: Dict[str, Any]) -> 'Client':
return cls(client_dict['addresses'], client_dict['access_type'],
- client_dict['squash'])
+ client_dict['squash'], client_dict['delegations'])
def to_dict(self) -> Dict[str, Any]:
return {
'addresses': self.addresses,
'access_type': self.access_type,
- 'squash': self.squash
+ 'squash': self.squash,
+ 'delegations': self.delegations
+ }
+
+class CephBlock:
+ def __init__(self,
+ is_async: bool,
+ is_zerocopy: bool):
+ self.is_async = is_async
+ self.is_zerocopy = is_zerocopy
+
+ @classmethod
+ def from_ceph_block(cls, ceph_block: RawBlock) -> 'CephBlock':
+ return cls(ceph_block.values.get('async', False),
+ ceph_block.values.get('zerocopy', False))
+
+ def to_ceph_block(self) -> RawBlock:
+ values = {
+ 'async': self.is_async,
+ 'zerocopy': self.is_zerocopy
}
+ result = RawBlock("CEPH", values=values)
+ return result
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'CephBlock':
+ return cls(ex_dict.get('async', False),
+ ex_dict.get('zerocopy', False))
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'async': self.is_async,
+ 'zerocopy': self.is_zerocopy
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, CephBlock):
+ return False
+ return self.to_dict() == other.to_dict()
+
+class NFSV4Block:
+ def __init__(self,
+ delegations: bool):
+ self.delegations = delegations
+
+ @classmethod
+ def from_nfsv4_block(cls, nfsv4_block: RawBlock) -> 'NFSV4Block':
+ return cls(nfsv4_block.values.get('delegations', False))
+
+ def to_nfsv4_block(self) -> RawBlock:
+ result = RawBlock("NFSV4", values={'delegations': self.delegations})
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'NFSV4Block':
+ return cls(ex_dict['delegations'])
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'delegations': self.delegations
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, NFSV4Block):
+ return False
+ return self.to_dict() == other.to_dict()
class Export:
def __init__(
transports: List[str],
fsal: FSAL,
clients: Optional[List[Client]] = None,
- sectype: Optional[List[str]] = None) -> None:
+ sectype: Optional[List[str]] = None,
+ delegations: Optional[str] = "none"):
self.export_id = export_id
self.path = path
self.fsal = fsal
self.transports = transports
self.clients: List[Client] = clients or []
self.sectype = sectype
+ self.delegations = delegations
@classmethod
def from_export_block(cls, export_block: RawBlock, cluster_id: str) -> 'Export':
FSAL.from_fsal_block(fsal_blocks[0]),
[Client.from_client_block(client)
for client in client_blocks],
- sectype=sectype)
+ sectype=sectype,
+ delegations=export_block.values.get("delegations", "none"))
def to_export_block(self) -> RawBlock:
values = {
'security_label': self.security_label,
'protocols': self.protocols,
'transports': self.transports,
+ 'delegations': self.delegations
}
if self.sectype:
values['SecType'] = self.sectype
ex_dict.get('transports', ['TCP']),
FSAL.from_dict(ex_dict.get('fsal', {})),
[Client.from_dict(client) for client in ex_dict.get('clients', [])],
- sectype=ex_dict.get("sectype"))
+ sectype=ex_dict.get("sectype"),
+ delegations=ex_dict.get("delegations", "none"))
def to_dict(self) -> Dict[str, Any]:
values = {
'protocols': sorted([p for p in self.protocols]),
'transports': sorted([t for t in self.transports]),
'fsal': self.fsal.to_dict(),
- 'clients': [client.to_dict() for client in self.clients]
+ 'clients': [client.to_dict() for client in self.clients],
+ "delegations": self.delegations
}
if self.sectype:
values['sectype'] = self.sectype
for st in (self.sectype or []):
_validate_sec_type(st)
+ valid_delegations = ["R", "RW", "NONE"]
+ if not self.delegations.upper() in valid_delegations:
+ raise NFSInvalidOperation(f'invalid delegations in export block: {self.delegations}')
+
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Export):
return False