--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/cephfs/begin/0-install.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/begin/2-logrotate.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_9.stream.yaml
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mgr.y, mds.a, mds.c, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mon.c, mgr.x, mds.b, osd.4, osd.5, osd.6, osd.7]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 20 # GB
+- machine:
+ disk: 200 # GB
--- /dev/null
+.qa/cephfs/conf
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/pg_health.yaml
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+tasks:
+- cephadm:
+ roleless: false
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+- cephadm.shell:
+ mon.a:
+ - cmd: ceph nfs cluster create nfs-ganesha-test
+ - cmd: ceph nfs export apply nfs-ganesha-test -i /dev/stdin
+ stdin: |
+ {
+ "export": {
+ "export_id": 1,
+ "path": "/",
+ "cluster_id": "nfs-ganesha-test",
+ "pseudo": "/nfsganesha",
+ "access_type": "RW",
+ "squash": "none",
+ "security_label": true,
+ "protocols": [
+ 4
+ ],
+ "transports": [
+ "TCP"
+ ],
+ "fsal": {
+ "name": "CEPH",
+ "user_id": "nfs.nfs-ganesha-test.cephfs.a4cd9f65",
+ "fs_name": "cephfs",
+ "cmount_path": "/"
+ },
+ "clients": []
+ },
+ "log": {
+ "default_log_level": "WARN",
+ "components": {
+ "fsal": "debug",
+ "nfs4": "debug"
+ },
+ "facility": {
+ "name": "file",
+ "destination": "/var/log/ceph/ganesha.log",
+ "enable": "active"
+ }
+ }
+ }
+ # for debug
+ - cmd: ceph nfs export info nfs-ganesha-test --pseudo_path=/nfsganesha
+ # for debug
+ - cmd: ceph orch ls --service-name nfs.nfs-ganesha-test --export
+ # sleep a bit
+ - cmd: sleep 60
+ # more debug
+ - cmd: ceph orch ps
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ async: no
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ async: true
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ delegations: none
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ delegations: rw
\ No newline at end of file
--- /dev/null
+tasks:
+- ganesha-reconf:
+ cluster_id: 'nfs-ganesha-test'
+ pseudo_path: '/nfsganesha'
+
+- cephadm.shell:
+ mon.a:
+ # sleep a bit
+ - cmd: sleep 30
+ # more debug
+ - cmd: ceph orch ps
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ zerocopy: no
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-reconf:
+ zerocopy: true
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- ganesha-client:
+ client.0:
+ cluster_id: nfs-ganesha-test
+ pseudo_path: /nfsganesha
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-client:
+ version: 4.1
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-client:
+ version: 4.2
\ No newline at end of file
--- /dev/null
+overrides:
+ ganesha-client:
+ version: latest
\ No newline at end of file
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - suites/iogen.sh
\ No newline at end of file
--- /dev/null
+"""
+mount a ganesha client
+"""
+
+import os
+import json
+import logging
+from io import StringIO
+
+from teuthology.misc import deep_merge
+from teuthology.task import Task
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+class GaneshaClient(Task):
+ def __init__(self, ctx, config):
+ super(GaneshaClient, self).__init__(ctx, config)
+ self.log = log
+ self.mounts = {}
+
+ def setup(self):
+ super(GaneshaClient, self).setup()
+
+ def begin(self):
+ super(GaneshaClient, self).begin()
+ log.info('mounting ganesha client(s)')
+
+ if self.config is None:
+ ids = misc.all_roles_of_type(self.ctx.cluster, 'client')
+ client_roles = [f'client.{id_}' for id_ in ids]
+ self.config = dict([r, dict()] for r in client_rols)
+ elif isinstance(self.config, list):
+ client_roles = self.config
+ self.config = dict([r, dict()] for r in client_roles)
+ elif isinstance(self.config, dict):
+ client_roles = filter(lambda x: 'client.' in x, self.config.keys())
+ else:
+ raise ValueError(f"Invalid config object: {self.config} ({self.config.__class__})")
+ log.info(f"config is {self.config}")
+
+ mounts = {}
+ overrides = self.ctx.config.get('overrides', {}).get('ganesha-client', {})
+ top_overrides = dict(filter(lambda x: 'client.' not in x[0], overrides.items()))
+
+ clients = list(misc.get_clients(ctx=self.ctx, roles=client_roles))
+ test_dir = misc.get_testdir(self.ctx)
+
+ for id_, remote in clients:
+ entity = f'client.{id_}'
+ client_config = self.config.get(entity)
+ if client_config is None:
+ client_config = {}
+ # top level overrides
+ deep_merge(client_config, top_overrides)
+ # mount specific overrides
+ client_config_overrides = overrides.get(entity)
+ deep_merge(client_config, client_config_overrides)
+ log.info(f"{entity} config is {client_config}")
+
+ cluster_id = client_config['cluster_id']
+ pseudo_path = client_config['pseudo_path']
+ nfs_version = client_config.get('version', 'latest')
+
+ try:
+ first_mon = misc.get_first_mon(self.ctx, None)
+ (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+ proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+ stdout=StringIO(), wait=True)
+ res = proc.stdout.getvalue()
+ export_json = json.loads(res)
+ log.debug(f'export_json: {export_json}')
+
+ proc = mon0_remote.run(args=['ceph', 'nfs', 'cluster', 'info', cluster_id],
+ stdout=StringIO(), wait=True)
+ res = proc.stdout.getvalue()
+ cluster_info = json.loads(res)
+ log.debug(f'cluster_info: {cluster_info}')
+
+ info_output = cluster_info[cluster_id]['backend'][0]
+ port = info_output['port']
+ ip = info_output['ip']
+
+ mntpt = os.path.join(test_dir, f'mnt.{id_}')
+ remote.run(args=['mkdir', '-p', mntpt], timeout=60)
+ if nfs_version == 'latest':
+ remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+ f'port={port}', f'{ip}:{pseudo_path}', mntpt])
+ else:
+ remote.run(args=['sudo', 'mount', '-t', 'nfs', '-o',
+ f'port={port},vers={nfs_version}', f'{ip}:{pseudo_path}', mntpt])
+ remote.run(args=['sudo', 'chmod', '1777', mntpt], timeout=60)
+ remote.run(args=['stat', mntpt])
+ mounts[id_] = (remote, mntpt)
+ except Exception as e:
+ log.error(f'failed: {e}')
+ self.mounts = mounts
+
+ def end(self):
+ super(GaneshaClient, self).end()
+ log.debug('unmounting ganesha client(s)')
+ for (remote, mntpt) in self.mounts.values():
+ log.debug(f'unmounting {mntpt}')
+ remote.run(args=['sudo', 'umount', mntpt])
+ self.mounts = {}
+
+task = GaneshaClient
--- /dev/null
+"""
+reconfigure a ganesha server
+"""
+
+import json
+import logging
+from io import StringIO
+
+from teuthology.misc import deep_merge
+from teuthology.task import Task
+from teuthology import misc
+
+log = logging.getLogger(__name__)
+
+class GaneshaReconf(Task):
+ def __init__(self, ctx, config):
+ super(GaneshaReconf, self).__init__(ctx, config)
+ self.log = log
+
+ def setup(self):
+ super(GaneshaReconf, self).setup()
+
+ def begin(self):
+ super(GaneshaReconf, self).begin()
+ log.info('reconfiguring ganesha server')
+
+ ganesha_config = self.config
+ log.info(f'ganesha_config is {ganesha_config}')
+ overrides = self.ctx.config.get('overrides', {}).get('ganesha-reconf', {})
+ log.info(f'overrides is {overrides}')
+
+ deep_merge(ganesha_config, overrides)
+ log.info(f'ganesha_config is {ganesha_config}')
+
+ try:
+ first_mon = misc.get_first_mon(self.ctx, None)
+ (mon0_remote,) = self.ctx.cluster.only(first_mon).remotes.keys()
+
+ cluster_id = ganesha_config['cluster_id']
+ pseudo_path = ganesha_config['pseudo_path']
+
+ proc = mon0_remote.run(args=['ceph', 'nfs', 'export', 'info', cluster_id, pseudo_path],
+ stdout=StringIO(), wait=True)
+ res = proc.stdout.getvalue()
+ export_json = json.loads(res)
+ log.debug(f'export_json: {export_json}')
+
+ ceph_section = {'async': False, 'zerocopy': False}
+ is_async = ganesha_config.get('async', False)
+ if is_async:
+ ceph_section["async"] = True
+ is_zerocopy = ganesha_config.get('zerocopy', False)
+ if is_zerocopy:
+ ceph_section["zerocopy"] = True
+
+ nfsv4_block = {}
+ delegations = ganesha_config.get('delegations', 'none')
+ export_json['delegations'] = delegations
+ nfsv4_block['delegations'] = False if delegations == 'none' else True
+
+ new_export = {}
+ if "export" in export_json.keys():
+ new_export = export_json
+ else:
+ new_export["export"] = export_json
+ new_export["ceph"] = ceph_section
+
+ log.debug(f'new_export is {json.dumps(new_export)}')
+ mon0_remote.run(args=['ceph', 'nfs', 'export', 'apply', cluster_id, "-i", "-"],
+ stdin=json.dumps(new_export))
+ except Exception as e:
+ log.error(f'failed: {e}')
+
+ def end(self):
+ super(GaneshaReconf, self).end()
+
+task = GaneshaReconf
RGWFSAL,
RawBlock,
CephBlock,
+ LogBlock,
+ NFSV4Block,
format_block)
from .exception import NFSException, NFSInvalidOperation, FSNotFound, NFSObjectNotFound
from .utils import (
return self.status
class GaneshaExport:
- # currently, EXPORT and CEPH block.
+ # EXPORT, CEPH and LOG block.
def __init__(self,
export: Export,
- ceph_block: Optional[CephBlock] = None) -> None:
+ ceph_block: Optional[CephBlock] = None,
+ log_block: Optional[LogBlock] = None,
+ nfsv4_block: Optional[NFSV4Block] = None) -> None:
self.export = export
self.ceph_block = ceph_block
+ self.log_block = log_block
+ self.nfsv4_block = nfsv4_block
# frequently uesd properties so that much of the code that now
# has moved to using this class can still continue to acess via
def fsal(self):
return self.export.fsal
+ @property
+ def delegations(self):
+ return self.export.delegations
+
def to_dict(self, full=False) -> Dict[str, Any]:
export_dict = self.export.to_dict()
- if not full or not self.ceph_block:
+ if not full or (not self.ceph_block and not self.log_block
+ and not self.nfsv4_block):
return export_dict
- ge_dict = {
- 'export': export_dict,
- 'ceph': self.ceph_block.to_dict()
- }
+ ge_dict = {'export': export_dict}
+ if self.ceph_block:
+ ge_dict['ceph'] = self.ceph_block.to_dict()
+ if self.log_block:
+ ge_dict['log'] = self.log_block.to_dict()
+ if self.nfsv4_block:
+ ge_dict['nfsv4'] = self.nfsv4_block.to_dict()
return ge_dict
def to_export_block(self):
block_str = format_block(self.export.to_export_block())
if self.ceph_block:
block_str += format_block(self.ceph_block.to_ceph_block())
+ if self.log_block:
+ block_str += format_block(self.log_block.to_log_block())
+ if self.nfsv4_block:
+ block_str += format_block(self.nfsv4_block.to_nfsv4_block())
return block_str
def __eq__(self, other: Any) -> bool:
break
return nid
- def _has_ceph_block(raw_config_parsed: List) -> bool:
- return len(raw_config_parsed) > 1
+ def _has_ceph_block(raw_config_parsed: Dict) -> bool:
+ return 'CEPH' in raw_config_parsed.keys()
+ def _has_log_block(raw_config_parsed: Dict) -> bool:
+ return 'LOG' in raw_config_parsed.keys()
def _read_raw_config(self, rados_namespace: str) -> None:
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
log.debug(f'raw_config: {raw_config}')
raw_config_parsed = GaneshaConfParser(raw_config).parse()
log.debug(f'raw_config_parsed: {raw_config_parsed}')
- export_block = raw_config_parsed[0]
- # do we have a ceph block?
+ # mandatory export block
+ export_block = raw_config_parsed['EXPORT']
+ # do we have a ceph/log block? (optional)
+ ceph_block = None
+ log_block = None
if _has_ceph_block(raw_config_parsed):
- ceph_block = raw_config_parsed[1]
- self.export_conf_objs.append(
+ ceph_block = raw_config_parsed['CEPH']
+ if _has_log_block(raw_config_parsed):
+ log_block = raw_config_parsed['LOG']
+ self.export_conf_objs.append(
GaneshaExport(Export.from_export_block(export_block, rados_namespace),
- CephBlock.from_ceph_block(ceph_block)))
- else:
- self.export_conf_objs.append(
- GaneshaExport(Export.from_export_block(export_block, rados_namespace)))
+ CephBlock.from_ceph_block(ceph_block),
+ LogBlock.from_log_block(log_block)))
def _save_export(self, cluster_id: str, ganesha_export: GaneshaExport) -> None:
log.debug('in _save_export')
log.debug(f'raw_config: {raw_config}')
raw_config_parsed = GaneshaConfParser(raw_config).parse()
log.debug(f'raw_config_parsed: {raw_config_parsed}')
- export_block = raw_config_parsed[0]
- # do we have a ceph block?
+ export_block = raw_config_parsed['EXPORT']
+ # do we have a ceph/log block? (optional)
+ ceph_block = None
+ log_block = None
if _has_ceph_block(raw_config_parsed):
- ceph_block = raw_config_parsed[1]
- export = GaneshaExport(Export.from_export_block(export_block, cluster_id),
- CephBlock.from_ceph_block(ceph_block))
- else:
- export = GaneshaExport(Export.from_export_block(export_block, cluster_id))
+ ceph_block = raw_config_parsed['CEPH']
+ if _has_log_block(raw_config_parsed):
+ log_block = raw_config_parsed['LOG']
+ self.export_conf_objs.append(
+ GaneshaExport(Export.from_export_block(export_block, rados_namespace),
+ CephBlock.from_ceph_block(ceph_block),
+ LogBlock.from_log_block(log_block)))
log.debug(f'export: {export}')
return export
except ObjectNotFound:
def _change_export(self, cluster_id: str, export: Dict,
earmark_resolver: Optional[CephFSEarmarkResolver] = None) -> Dict[str, Any]:
- # if the export json has a ceph section (key), extract it from the export
+ # if the export json has a ceph/log section (key), extract it from the export
# json to preserver backward compatability.
ceph_dict = {}
+ log_dict = {}
+ nfsv4_dict = {}
if "ceph" in export.keys():
ceph_dict = export.pop("ceph")
- if not "export" in export.keys():
- raise Exception('\'export\' key missing in export json')
+ if "log" in export.keys():
+ log_dict = export.pop("log")
+ if "nfsv4" in export.keys():
+ nfsv4_dict = export.pop("nfsv4")
+ if "export" in export.keys():
export = export.pop("export")
msg = f'export_dict: {export}'
- log.exception(msg)
+ log.debug(msg)
msg = f'ceph_dict: {ceph_dict}'
- log.exception(msg)
+ log.debug(msg)
+ msg = f'nfsv4_dict: {nfsv4_dict}'
+ log.debug(msg)
+ msg = f'log_dict: {log_dict}'
+ log.debug(msg)
try:
- return self._apply_export(cluster_id, export, earmark_resolver, ceph_dict)
+ return self._apply_export(cluster_id, export, earmark_resolver,
+ ceph_dict, log_dict, nfsv4_dict)
except NotImplementedError as e:
# in theory, the NotImplementedError here may be raised by a hook back to
# an orchestration module. If the orchestration module supports it the NFS
clients: list = [],
sectype: Optional[List[str]] = None,
cmount_path: Optional[str] = "/",
- earmark_resolver: Optional[CephFSEarmarkResolver] = None
+ earmark_resolver: Optional[CephFSEarmarkResolver] = None,
+ delegations: Optional[str] = "none"
) -> Dict[str, Any]:
validate_cephfs_path(self.mgr, fs_name, path)
},
"clients": clients,
"sectype": sectype,
+ "delegations": delegations
},
earmark_resolver
)
cluster_id: str,
new_export_dict: Dict,
earmark_resolver: Optional[CephFSEarmarkResolver] = None,
- ceph_dict: Optional[Dict] = {}) -> Dict[str, str]:
+ ceph_dict: Optional[Dict] = {},
+ log_dict: Optional[Dict] = {},
+ nfsv4_dict: Optional[Dict] = {}) -> Dict[str, str]:
for k in ['path', 'pseudo']:
if k not in new_export_dict:
raise NFSInvalidOperation(f'Export missing required field {k}')
log.debug(f'ceph_dict: {ceph_dict}')
if ceph_dict:
ceph_block = CephBlock.from_dict(ceph_dict)
+ log_block = None
+ log.debug(f'log_dict: {log_dict}')
+ if log_dict:
+ log_block = LogBlock.from_dict(log_dict)
+ nfsv4_block = None
+ log.debug(f'nfsv4_dict: {nfsv4_dict}')
+ if nfsv4_dict:
+ nfsv4_block = NFSV4Block.from_dict(nfsv4_dict)
# use @ganesha_export in place of @new_export here onwards
- ganesha_export = GaneshaExport(new_export, ceph_block)
+ ganesha_export = GaneshaExport(new_export, ceph_block, log_block, nfsv4_block)
if not old_export:
if new_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: # only for RGW
and old_fsal.fs_name == new_fsal.fs_name
and old_export.path == new_export.path
and old_export.pseudo == new_export.pseudo
- and old_export.ceph_block == ganesha_export.ceph_block)
+ and old_export.ceph_block == ganesha_export.ceph_block
+ and old_export.log_block == ganesha_export.log_block
+ and old_export.nfsv4_block == ganesha_export.nfsv4_block
+ and old_export.delegations == ganesha_export.delegations)
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
value = self.stream()[:idx]
self.pos += idx + 1
block_dict = RawBlock('%url', values={'value': value})
- return block_dict
+ return ('%url', block_dict)
- block_dict = RawBlock(self.parse_block_name().upper())
+ block_name = self.parse_block_name().upper()
+ block_dict = RawBlock(block_name)
self.parse_block_body(block_dict)
if self.stream()[0] != '}':
raise Exception("No closing bracket '}' found at the end of block")
self.pos += 1
- return block_dict
+ return (block_name, block_dict)
def parse_parameter_value(self, raw_value: str) -> Any:
if raw_value.find(',') != -1:
self.parse_stanza(block_dict)
elif is_lbracket and ((is_semicolon and not is_semicolon_lt_lbracket)
or (not is_semicolon)):
- block_dict.blocks.append(self.parse_block_or_section())
+ block_dict.blocks.append(self.parse_block_or_section()[1])
else:
raise Exception("Malformed stanza: no semicolon found.")
raise Exception("Infinite loop while parsing block content")
def parse(self) -> List[RawBlock]:
- blocks = []
+ blocks = {}
while self.stream():
- blocks.append(self.parse_block_or_section())
+ (block_name, block) = self.parse_block_or_section()
+ blocks[block_name] = block
return blocks
return result
@classmethod
- def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Export':
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'CephBlock':
return cls(ex_dict.get('async', False),
ex_dict.get('zerocopy', False))
return False
return self.to_dict() == other.to_dict()
+class Facility:
+ def __init__(self,
+ name: str,
+ destination: str,
+ enable: str):
+ self.name = name
+ self.destination = destination
+ self.enable = enable
+
+ @classmethod
+ def from_facility_block(cls, facility: RawBlock) -> 'Facility':
+ return cls(facility.values['name'],
+ facility.values['destination'], facility.values['enable'])
+
+ def to_facility_block(self) -> RawBlock:
+ result = RawBlock("FACILITY", values={'name': self.name,
+ 'destination': self.destination,
+ 'enable': self.enable})
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Facility':
+ return cls(ex_dict['name'], ex_dict['destination'], ex_dict['enable'])
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'name': self.name,
+ 'destination': self.destination,
+ 'enable': self.enable
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, Facility):
+ return False
+ return self.to_dict() == other.to_dict()
+
+class Components:
+ def __init__(self,
+ fsal: str,
+ nfs4: str):
+ self.fsal = fsal
+ self.nfs4 = nfs4
+
+ @classmethod
+ def from_components_block(cls, components: RawBlock) -> 'Components':
+ return cls(components.values['fsal'], components.values['nfs4'])
+
+ def to_components_block(self) -> RawBlock:
+ result = RawBlock("COMPONENTS", values={'fsal': self.fsal, 'nfs4': self.nfs4})
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'Components':
+ return cls(ex_dict['fsal'], ex_dict['nfs4'])
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'fsal': self.fsal,
+ 'nfs4': self.nfs4
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, Components):
+ return False
+ return self.to_dict() == other.to_dict()
+
+class LogBlock:
+ def __init__(self,
+ default_log_level: str,
+ components: Components,
+ facility: Facility):
+ self.default_log_level = default_log_level
+ self.components = components
+ self.facility = facility
+
+ @classmethod
+ def from_log_block(cls, log_block: RawBlock) -> 'LogBlock':
+ return cls(log_block.values.get('default_log_level', None),
+ Components.from_components_block(self.components),
+ Facility.from_facility_block(self.facility))
+
+ def to_log_block(self) -> RawBlock:
+ result = RawBlock("LOG", values={'default_log_level': self.default_log_level})
+ result.blocks = [
+ self.components.to_components_block()
+ ] + [
+ self.facility.to_facility_block()
+ ]
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'LogBlock':
+ return cls(ex_dict['default_log_level'],
+ Components.from_dict(ex_dict['components']),
+ Facility.from_dict(ex_dict['facility']))
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'default_log_level': self.default_log_level,
+ 'components': self.components.to_dict(),
+ 'facility': self.facility.to_dict()
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, LogBlock):
+ return False
+ return self.to_dict() == other.to_dict()
+
+class NFSV4Block:
+ def __init__(self,
+ delegations: bool):
+ self.delegations = delegations
+
+ @classmethod
+ def from_nfsv4_block(cls, nfsv4_block: RawBlock) -> 'NFSV4Block':
+ return cls(nfsv4_block.values.get('delegations', False))
+
+ def to_nfsv4_block(self) -> RawBlock:
+ result = RawBlock("NFSV4", values={'delegations': self.delegations})
+ return result
+
+ @classmethod
+ def from_dict(cls, ex_dict: Dict[str, Any]) -> 'NFSV4Block':
+ return cls(ex_dict['delegations'])
+
+ def to_dict(self) -> Dict[str, Any]:
+ values = {
+ 'delegations': self.delegations
+ }
+ return values
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, NFSV4Block):
+ return False
+ return self.to_dict() == other.to_dict()
+
class Export:
def __init__(
self,
transports: List[str],
fsal: FSAL,
clients: Optional[List[Client]] = None,
- sectype: Optional[List[str]] = None):
+ sectype: Optional[List[str]] = None,
+ delegations: Optional[str] = "none"):
self.export_id = export_id
self.path = path
self.fsal = fsal
self.transports = transports
self.clients: List[Client] = clients or []
self.sectype = sectype
+ self.delegations = delegations
@classmethod
def from_export_block(cls, export_block: RawBlock, cluster_id: str) -> 'Export':
FSAL.from_fsal_block(fsal_blocks[0]),
[Client.from_client_block(client)
for client in client_blocks],
- sectype=sectype)
+ sectype=sectype,
+ delegations=export_block.values.get("delegations", "none"))
def to_export_block(self) -> RawBlock:
values = {
'security_label': self.security_label,
'protocols': self.protocols,
'transports': self.transports,
+ 'delegations': self.delegations
}
if self.sectype:
values['SecType'] = self.sectype
ex_dict.get('transports', ['TCP']),
FSAL.from_dict(ex_dict.get('fsal', {})),
[Client.from_dict(client) for client in ex_dict.get('clients', [])],
- sectype=ex_dict.get("sectype"))
+ sectype=ex_dict.get("sectype"),
+ delegations=ex_dict.get("delegations", "none"))
def to_dict(self) -> Dict[str, Any]:
values = {
'protocols': sorted([p for p in self.protocols]),
'transports': sorted([t for t in self.transports]),
'fsal': self.fsal.to_dict(),
- 'clients': [client.to_dict() for client in self.clients]
+ 'clients': [client.to_dict() for client in self.clients],
+ "delegations": self.delegations
}
if self.sectype:
values['sectype'] = self.sectype
for st in (self.sectype or []):
_validate_sec_type(st)
+ valid_delegations = ["R", "RW", "NONE"]
+ if not self.delegations.upper() in valid_delegations:
+ raise NFSInvalidOperation(f'invalid delegations in export block: {self.delegations}')
+
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Export):
return False
client_addr: Optional[List[str]] = None,
squash: str = 'none',
sectype: Optional[List[str]] = None,
- cmount_path: Optional[str] = "/"
+ cmount_path: Optional[str] = "/",
+ delegations: Optional[str] = "none"
) -> Dict[str, Any]:
"""Create a CephFS export"""
earmark_resolver = CephFSEarmarkResolver(self)
addr=client_addr,
sectype=sectype,
cmount_path=cmount_path,
- earmark_resolver=earmark_resolver
+ earmark_resolver=earmark_resolver,
+ delegations=delegations
)
@CLICommand('nfs export create rgw', perm='rw')