GaneshaConfParser,
RGWFSAL,
RawBlock,
+ CephBlock,
format_block)
from .exception import NFSException, NFSInvalidOperation, FSNotFound, NFSObjectNotFound
from .utils import (
log.exception("Export ID: %s not found", ex_id)
return None
- def _update_export(self, cluster_id: str, export: Export,
+ def _update_export(self, cluster_id: str, export: Export, ceph_section: Export,
need_nfs_service_restart: bool) -> None:
self.exports[cluster_id].append(export)
+ block_str = format_block(export.to_export_block())
+ block_str += format_block(ceph_section.to_export_block())
+ log.debug(f'block_str: {block_str}')
self._rados(cluster_id).update_obj(
- format_block(export.to_export_block()),
+ block_str,
export_obj_name(export.export_id), conf_obj_name(export.cluster_id),
should_notify=not need_nfs_service_restart)
if need_nfs_service_restart:
def _change_export(self, cluster_id: str, export: Dict,
earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Dict[str, Any]:
+ # if the export json has a ceph section (key), extract it from the export
+ # json to preserver backward compatability.
+ ceph_dict = {}
+ if "ceph" in export.keys():
+ ceph_dict = export.pop("ceph")
+ export = export.pop("export")
+ msg = f'export_dict: {export}'
+ log.exception(msg)
+ msg = f'ceph_dict: {ceph_dict}'
+ log.exception(msg)
try:
- return self._apply_export(cluster_id, export, earmark_resolver)
- except NotImplementedError:
+ return self._apply_export(cluster_id, export, earmark_resolver, ceph_dict)
+ except NotImplementedError as e:
# in theory, the NotImplementedError here may be raised by a hook back to
# an orchestration module. If the orchestration module supports it the NFS
# servers may be restarted. If not supported the expectation is that an
cluster_id: str,
ex_id: int,
ex_dict: Dict[str, Any],
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
- ) -> Export:
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Export:
pseudo_path = ex_dict.get("pseudo")
if not pseudo_path:
raise NFSInvalidOperation("export must specify pseudo path")
self,
cluster_id: str,
new_export_dict: Dict,
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
- ) -> Dict[str, str]:
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None,
+ ceph_dict: Optional[Dict] = {}) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
new_export_dict,
earmark_resolver
)
+ ceph_section = None
+ log.debug(f'ceph_dict: {ceph_dict}')
+ if ceph_dict:
+ ceph_section = CephBlock.from_dict(ceph_dict)
if not old_export:
if new_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: # only for RGW
self.exports[cluster_id].remove(old_export)
- self._update_export(cluster_id, new_export, need_nfs_service_restart)
+ self._update_export(cluster_id, new_export, ceph_section, need_nfs_service_restart)
return {"pseudo": new_export.pseudo, "state": "updated"}
'squash': self.squash
}
+class CephBlock:
+ def __init__(self,
+ is_async: bool,
+ is_zerocopy: bool):
+ self.is_async = is_async
+ self.is_zerocopy = is_zerocopy
+
+ @classmethod
+ def from_export_block(cls, ceph_block: RawBlock) -> 'Export':
+ pass
+
+ def to_export_block(self) -> RawBlock:
+ values = {
+ 'async': self.is_async,
+ 'zerocopy': self.is_zerocopy
+ }
+ result = RawBlock("CEPH", values=values)
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Export':
+ return cls(ex_dict.get('is_async', False),
+ ex_dict.get('is_zerocopy', False))
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'async': self.is_async,
+ 'zerocopy': self.is_zerocopy
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, Export):
+ return False
+ return self.to_dict() == other.to_dict()
class Export:
def __init__(
transports: List[str],
fsal: FSAL,
clients: Optional[List[Client]] = None,
- sectype: Optional[List[str]] = None) -> None:
+ sectype: Optional[List[str]] = None):
self.export_id = export_id
self.path = path
self.fsal = fsal