if isinstance(export.fsal, CephFSFSAL):
fsal = cast(CephFSFSAL, export.fsal)
assert fsal.fs_name
-
- # is top-level or any client rw?
- rw = export.access_type.lower() == 'rw'
- for c in export.clients:
- if c.access_type.lower() == 'rw':
- rw = True
- break
-
fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}"
fsal.cephx_key = self._create_user_key(
- export.cluster_id, fsal.user_id, export.path, fsal.fs_name, not rw
+ export.cluster_id, fsal.user_id, export.path, fsal.fs_name
)
log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path)
self,
cluster_id: str,
path: str,
- access_type: str,
fs_name: str,
user_id: str
) -> None:
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
self.rados_pool, cluster_id, fs_name)
- access_type = 'r' if access_type == 'RO' else 'rw'
-
+ # NFS-Ganesha can dynamically enforce an export's access type changes, but Ceph server
+ # daemons can't dynamically enforce changes in Ceph user caps of the Ceph clients. To
+ # allow dynamic updates of CephFS NFS exports, always set FSAL Ceph user's MDS caps with
+ # path restricted read-write access. Rely on the ganesha servers to enforce the export
+ # access type requested for the NFS clients.
self.mgr.check_mon_command({
'prefix': 'auth caps',
'entity': f'client.{user_id}',
- 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format(
- access_type, path)],
+ 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)],
})
log.info("Export user updated %s", user_id)
entity: str,
path: str,
fs_name: str,
- fs_ro: bool
) -> str:
osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format(
self.rados_pool, cluster_id, fs_name)
- access_type = 'r' if fs_ro else 'rw'
nfs_caps = [
'mon', 'allow r',
'osd', osd_cap,
- 'mds', 'allow {} path={}'.format(access_type, path)
+ 'mds', 'allow rw path={}'.format(path)
]
ret, out, err = self.mgr.mon_command({
self._save_export(cluster_id, new_export)
return 0, f'Added export {new_export.pseudo}', ''
+ need_nfs_service_restart = True
if old_export.fsal.name != new_export.fsal.name:
raise NFSInvalidOperation('FSAL change not allowed')
if old_export.pseudo != new_export.pseudo:
self._update_user_id(
cluster_id,
new_export.path,
- new_export.access_type,
cast(str, new_fsal.fs_name),
cast(str, new_fsal.user_id)
)
new_fsal.cephx_key = old_fsal.cephx_key
else:
+ expected_mds_caps = 'allow rw path={}'.format(new_export.path)
+ entity = new_fsal.user_id
+ ret, out, err = self.mgr.mon_command({
+ 'prefix': 'auth get',
+ 'entity': 'client.{}'.format(entity),
+ 'format': 'json',
+ })
+ if ret:
+ raise NFSException(f'Failed to fetch caps for {entity}: {err}')
+ actual_mds_caps = json.loads(out)[0]['caps'].get('mds')
+ if actual_mds_caps != expected_mds_caps:
+ self._update_user_id(
+ cluster_id,
+ new_export.path,
+ cast(str, new_fsal.fs_name),
+ cast(str, new_fsal.user_id)
+ )
+ elif old_export.pseudo == new_export.pseudo:
+ need_nfs_service_restart = False
new_fsal.cephx_key = old_fsal.cephx_key
+
if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]:
old_rgw_fsal = cast(RGWFSAL, old_export.fsal)
new_rgw_fsal = cast(RGWFSAL, new_export.fsal)
self.exports[cluster_id].remove(old_export)
self._update_export(cluster_id, new_export)
- # TODO: detect whether the update is such that a reload is sufficient
- restart_nfs_service(self.mgr, new_export.cluster_id)
+ # TODO: detect whether the RGW export update is such that a reload is sufficient
+ if need_nfs_service_restart:
+ restart_nfs_service(self.mgr, new_export.cluster_id)
return 0, f"Updated export {new_export.pseudo}", ""