From cc50d4c154d1f2acee238b95484d429180eeec7b Mon Sep 17 00:00:00 2001 From: Varsha Rao Date: Mon, 27 Jan 2020 15:36:48 +0530 Subject: [PATCH] mgr/volumes: Add command to create nfs-ganesha clusters ceph fs nfs cluster create [--size=1] cluster_id: Name of the nfs cluster This command creates a common recovery pool for all Ganesha daemons, creates new user and nfs cluster. Orchestrator needs to be enabled. Fixes: https://tracker.ceph.com/issues/44193 Signed-off-by: Varsha Rao --- src/pybind/mgr/volumes/fs/nfs.py | 50 ++++++++++++++++++++++++++++++++ src/pybind/mgr/volumes/module.py | 10 ++++++- 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/src/pybind/mgr/volumes/fs/nfs.py b/src/pybind/mgr/volumes/fs/nfs.py index 23ddfee5b2f..bc3f3c381eb 100644 --- a/src/pybind/mgr/volumes/fs/nfs.py +++ b/src/pybind/mgr/volumes/fs/nfs.py @@ -1,5 +1,7 @@ +import errno import json import logging + import cephfs import orchestrator from dashboard.services.cephx import CephX @@ -236,3 +238,51 @@ def create_rados_pool(vc_mgr, pool_name): #return r, outb, outs log.info("pool enable done r: {}".format(r)) """ +def create_nfs_cluster(fs_mgr, size, cluster_id): + mgr = fs_mgr.vc.mgr + pool_list = [p['pool_name'] for p in mgr.get_osdmap().dump().get('pools', [])] + pool_name = 'nfs-ganesha' + pool_ns = 'nfsgw' + client = 'client.ganesha-%s' % cluster_id + + if pool_name not in pool_list: + r, out, err = create_pool(mgr, pool_name) + if r != 0: + return r, out, err + log.info("{}".format(out)) + + ret, out, err = mgr.mon_command({ + 'prefix': 'auth get-or-create', + 'entity': client, + 'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s' % (pool_name, pool_ns)], + }) + + if ret!= 0: + return ret, out, err + + ret, keyring, err = mgr.mon_command({ + 'prefix': 'auth print-key', 'entity': client,}) + + if ret!= 0: + return ret, out, err + + ps = orchestrator.PlacementSpec(count=size) + spec = orchestrator.NFSServiceSpec(cluster_id, pool_name, pool_ns, ps) + try: + completion = mgr.add_nfs(spec) + mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + except Exception as e: + log.exception("Failed to create NFS Cluster") + return -errno.EINVAL, "", str(e) + + if size > 1: + try: + completion = mgr.update_nfs(spec) + mgr._orchestrator_wait([completion]) + orchestrator.raise_if_exception(completion) + except Exception as e: + log.exception("Failed to scale NFS Cluster") + return -errno.EINVAL, "", str(e) + + return 0,"","NFS Cluster Created Successfully" diff --git a/src/pybind/mgr/volumes/module.py b/src/pybind/mgr/volumes/module.py index 902ed981f1b..54640415253 100644 --- a/src/pybind/mgr/volumes/module.py +++ b/src/pybind/mgr/volumes/module.py @@ -225,7 +225,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): 'desc': "Delete nfs exports", 'perm': 'rw' }, - + { + 'cmd': 'fs nfs cluster create ' + 'name=size,type=CephInt,req=false ' + 'name=cluster_id,type=CephString ', + 'desc': "Creates NFS Cluster", + 'perm': 'rw' + }, # volume ls [recursive] # subvolume ls # volume authorize/deauthorize @@ -400,3 +406,5 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule): def _cmd_fs_nfs_delete(self, inbuf, cmd): instance = create_instance(self, "nfs-ganesha") return delete_export(instance, cmd['export_id']) + def _cmd_fs_nfs_cluster_create(self, inbuf, cmd): + return create_nfs_cluster(self, size=cmd.get('size', 1), cluster_id=cmd['cluster_id']) -- 2.39.5