From: Ramana Raja Date: Tue, 25 Jan 2022 01:06:11 +0000 (-0500) Subject: mgr/nfs: allow dynamic update of cephfs nfs export X-Git-Tag: v17.2.1~48^2~55 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=4569336be67f1dd171550509bdfadd3f51caea94;p=ceph.git mgr/nfs: allow dynamic update of cephfs nfs export mgr/nfs module's apply_export() method is used to update an existing CephFS NFS export. The method always restarted the ganesha service ( ganesha server cluster) after updating the export object and notifying the ganesha servers to reload their exports. The restart temporarily affected the clients connections of all the exports served by the ganesha servers. It is not always necessary to restart the ganesha servers. Only updating the export ID, path, or FSAL block of a CephFS NFS export requires a restart. So modify apply_export() to only restart the ganesha servers for such export updates. The mgr/nfs module creates a FSAL ceph user with read-only or read-write path restricted MDS caps for each export. To change the access type of the CephFS NFS export, the MDS caps of the export's FSAL ceph user must also be changed. Ganesha can dynamically enforce an export's access type changes, but Ceph server daemons can't dynamically enforce changes in caps of the Ceph clients. To allow dynamic updates of CephFS NFS exports, always create a FSAL Ceph user with read-write path restricted MDS caps per export. Rely on the ganesha servers to enforce the export access type changes for the NFS clients. Fixes: https://tracker.ceph.com/issues/54025 Signed-off-by: Ramana Raja (cherry picked from commit 2415e03217b4afe9e430863da0b4503be254c425) --- diff --git a/src/pybind/mgr/nfs/export.py b/src/pybind/mgr/nfs/export.py index 6abaf3f2744de..d7f554194e99f 100644 --- a/src/pybind/mgr/nfs/export.py +++ b/src/pybind/mgr/nfs/export.py @@ -243,17 +243,9 @@ class ExportMgr: if isinstance(export.fsal, CephFSFSAL): fsal = cast(CephFSFSAL, export.fsal) assert fsal.fs_name - - # is top-level or any client rw? - rw = export.access_type.lower() == 'rw' - for c in export.clients: - if c.access_type.lower() == 'rw': - rw = True - break - fsal.user_id = f"nfs.{export.cluster_id}.{export.export_id}" fsal.cephx_key = self._create_user_key( - export.cluster_id, fsal.user_id, export.path, fsal.fs_name, not rw + export.cluster_id, fsal.user_id, export.path, fsal.fs_name ) log.debug("Successfully created user %s for cephfs path %s", fsal.user_id, export.path) @@ -523,19 +515,20 @@ class ExportMgr: self, cluster_id: str, path: str, - access_type: str, fs_name: str, user_id: str ) -> None: osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( self.rados_pool, cluster_id, fs_name) - access_type = 'r' if access_type == 'RO' else 'rw' - + # NFS-Ganesha can dynamically enforce an export's access type changes, but Ceph server + # daemons can't dynamically enforce changes in Ceph user caps of the Ceph clients. To + # allow dynamic updates of CephFS NFS exports, always set FSAL Ceph user's MDS caps with + # path restricted read-write access. Rely on the ganesha servers to enforce the export + # access type requested for the NFS clients. self.mgr.check_mon_command({ 'prefix': 'auth caps', 'entity': f'client.{user_id}', - 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow {} path={}'.format( - access_type, path)], + 'caps': ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path={}'.format(path)], }) log.info("Export user updated %s", user_id) @@ -546,15 +539,13 @@ class ExportMgr: entity: str, path: str, fs_name: str, - fs_ro: bool ) -> str: osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data={}'.format( self.rados_pool, cluster_id, fs_name) - access_type = 'r' if fs_ro else 'rw' nfs_caps = [ 'mon', 'allow r', 'osd', osd_cap, - 'mds', 'allow {} path={}'.format(access_type, path) + 'mds', 'allow rw path={}'.format(path) ] ret, out, err = self.mgr.mon_command({ @@ -749,6 +740,7 @@ class ExportMgr: self._save_export(cluster_id, new_export) return 0, f'Added export {new_export.pseudo}', '' + need_nfs_service_restart = True if old_export.fsal.name != new_export.fsal.name: raise NFSInvalidOperation('FSAL change not allowed') if old_export.pseudo != new_export.pseudo: @@ -768,13 +760,32 @@ class ExportMgr: self._update_user_id( cluster_id, new_export.path, - new_export.access_type, cast(str, new_fsal.fs_name), cast(str, new_fsal.user_id) ) new_fsal.cephx_key = old_fsal.cephx_key else: + expected_mds_caps = 'allow rw path={}'.format(new_export.path) + entity = new_fsal.user_id + ret, out, err = self.mgr.mon_command({ + 'prefix': 'auth get', + 'entity': 'client.{}'.format(entity), + 'format': 'json', + }) + if ret: + raise NFSException(f'Failed to fetch caps for {entity}: {err}') + actual_mds_caps = json.loads(out)[0]['caps'].get('mds') + if actual_mds_caps != expected_mds_caps: + self._update_user_id( + cluster_id, + new_export.path, + cast(str, new_fsal.fs_name), + cast(str, new_fsal.user_id) + ) + elif old_export.pseudo == new_export.pseudo: + need_nfs_service_restart = False new_fsal.cephx_key = old_fsal.cephx_key + if old_export.fsal.name == NFS_GANESHA_SUPPORTED_FSALS[1]: old_rgw_fsal = cast(RGWFSAL, old_export.fsal) new_rgw_fsal = cast(RGWFSAL, new_export.fsal) @@ -789,8 +800,9 @@ class ExportMgr: self.exports[cluster_id].remove(old_export) self._update_export(cluster_id, new_export) - # TODO: detect whether the update is such that a reload is sufficient - restart_nfs_service(self.mgr, new_export.cluster_id) + # TODO: detect whether the RGW export update is such that a reload is sufficient + if need_nfs_service_restart: + restart_nfs_service(self.mgr, new_export.cluster_id) return 0, f"Updated export {new_export.pseudo}", ""