]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Add command to create nfs-ganesha clusters
authorVarsha Rao <varao@redhat.com>
Mon, 27 Jan 2020 10:06:48 +0000 (15:36 +0530)
committerVarsha Rao <varao@redhat.com>
Wed, 8 Apr 2020 09:19:43 +0000 (14:49 +0530)
ceph fs nfs cluster create <cluster_id> [--size=1]

cluster_id: Name of the nfs cluster

This command creates a common recovery pool for all Ganesha daemons, creates
new user and nfs cluster. Orchestrator needs to be enabled.

Fixes: https://tracker.ceph.com/issues/44193
Signed-off-by: Varsha Rao <varao@redhat.com>
src/pybind/mgr/volumes/fs/nfs.py
src/pybind/mgr/volumes/module.py

index 23ddfee5b2ffe885375de63d3dea6ae184d84984..bc3f3c381eb7680c684280ab40fe72906dde9973 100644 (file)
@@ -1,5 +1,7 @@
+import errno
 import json
 import logging
+
 import cephfs
 import orchestrator
 from dashboard.services.cephx import CephX
@@ -236,3 +238,51 @@ def create_rados_pool(vc_mgr, pool_name):
         #return r, outb, outs
     log.info("pool enable done r: {}".format(r))
     """
+def create_nfs_cluster(fs_mgr, size, cluster_id):
+    mgr = fs_mgr.vc.mgr
+    pool_list = [p['pool_name'] for p in mgr.get_osdmap().dump().get('pools', [])]
+    pool_name = 'nfs-ganesha'
+    pool_ns = 'nfsgw'
+    client = 'client.ganesha-%s' % cluster_id
+
+    if pool_name not in pool_list:
+        r, out, err = create_pool(mgr, pool_name)
+        if r != 0:
+            return r, out, err
+        log.info("{}".format(out))
+
+    ret, out, err = mgr.mon_command({
+        'prefix': 'auth get-or-create',
+        'entity': client,
+        'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s' % (pool_name, pool_ns)],
+        })
+
+    if ret!= 0:
+        return ret, out, err
+
+    ret, keyring, err = mgr.mon_command({
+        'prefix': 'auth print-key', 'entity': client,})
+
+    if ret!= 0:
+        return ret, out, err
+
+    ps = orchestrator.PlacementSpec(count=size)
+    spec = orchestrator.NFSServiceSpec(cluster_id, pool_name, pool_ns, ps)
+    try:
+        completion = mgr.add_nfs(spec)
+        mgr._orchestrator_wait([completion])
+        orchestrator.raise_if_exception(completion)
+    except Exception as e:
+        log.exception("Failed to create NFS Cluster")
+        return -errno.EINVAL, "", str(e)
+
+    if size > 1:
+        try:
+            completion = mgr.update_nfs(spec)
+            mgr._orchestrator_wait([completion])
+            orchestrator.raise_if_exception(completion)
+        except Exception as e:
+            log.exception("Failed to scale NFS Cluster")
+            return -errno.EINVAL, "", str(e)
+
+    return 0,"","NFS Cluster Created Successfully"
index 902ed981f1b92dd3825342eb84a83aae9d2f95c7..546404152530db661fd73ee2b6ee9a14051791c8 100644 (file)
@@ -225,7 +225,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             'desc': "Delete nfs exports",
             'perm': 'rw'
         },
-
+        {
+            'cmd': 'fs nfs cluster create '
+                   'name=size,type=CephInt,req=false '
+                   'name=cluster_id,type=CephString ',
+            'desc': "Creates NFS Cluster",
+            'perm': 'rw'
+        },
         # volume ls [recursive]
         # subvolume ls <volume>
         # volume authorize/deauthorize
@@ -400,3 +406,5 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     def _cmd_fs_nfs_delete(self, inbuf, cmd):
             instance = create_instance(self, "nfs-ganesha")
             return delete_export(instance, cmd['export_id'])
+    def _cmd_fs_nfs_cluster_create(self, inbuf, cmd):
+            return create_nfs_cluster(self, size=cmd.get('size', 1), cluster_id=cmd['cluster_id'])