]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Create NFSConfig class
authorVarsha Rao <varao@redhat.com>
Thu, 30 Jan 2020 04:37:44 +0000 (10:07 +0530)
committerVarsha Rao <varao@redhat.com>
Wed, 8 Jul 2020 05:36:34 +0000 (07:36 +0200)
Move the ganesha config functions into this class.

Fixes: https://tracker.ceph.com/issues/44193
Signed-off-by: Varsha Rao <varao@redhat.com>
(cherry picked from commit a4debfdc906d0410b45cf291caeb7378fd375507)

src/pybind/mgr/volumes/fs/nfs.py
src/pybind/mgr/volumes/module.py

index bc3f3c381eb7680c684280ab40fe72906dde9973..b718343343f6e99758059eafb1f4e1acb192f911 100644 (file)
@@ -10,8 +10,6 @@ from .fs_util import create_pool
 
 log = logging.getLogger(__name__)
 
-exp_num = 0
-
 class GaneshaConf(object):
     # pylint: disable=R0902
 
@@ -184,105 +182,86 @@ class GaneshaConf(object):
             for daemon_id in daemons:
                 ioctx.notify("conf-{}".format(daemon_id))
 
-def create_instance(orch, pool_name):
-    return GaneshaConf("a", pool_name, "ganesha", orch)
-
-def create_export(ganesha_conf):
-    ex_id = ganesha_conf.create_export({
-        'path': "/",
-        'pseudo': "/cephfs",
-        'cluster_id': "cluster1",
-        'daemons': ["ganesha.a"],
-        'tag': "",
-        'access_type': "RW",
-        'squash': "no_root_squash",
-        'security_label': True,
-        'protocols': [4],
-        'transports': ["TCP"],
-        'fsal': {"name": "CEPH", "user_id":"admin", "fs_name": "a", "sec_label_xattr": ""},
-        'clients': []
-        })
-
-    log.info("Export ID is {}".format(ex_id))
-    global exp_num
-    exp_num += 1
-    return 0, "", ""
-
-def delete_export(ganesha_conf, ex_id):
-    if not ganesha_conf.has_export(ex_id):
-        return 0, "No exports available",""
-    log.info("Export detected for id:{}".format(ex_id))
-    export = ganesha_conf.remove_export(ex_id)
-    ganesha_conf.reload_daemons(export.daemons)
-    return 0, "", ""
-
-def check_fsal_valid(fs_map):
-    fsmap_res = [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
-            for fs in fs_map['filesystems']]
-
-    #return 0, json.dumps(fsmap_res, indent=2), ""
-    return fsmap_res
-
-def create_rados_pool(vc_mgr, pool_name):
-    global exp_num
-    if not exp_num:
-        r, outb, outs = create_pool(vc_mgr, pool_name)
-    """
-    if r != 0:
-        #return r, outb, outs
+class NFSConfig(object):
+    exp_num = 0
+
+    def __init__(self, mgr, cluster_id):
+        self.cluster_id = cluster_id
+        self.pool_name = 'nfs-ganesha'
+        self.pool_ns = 'nfsgw'
+        self.mgr = mgr
 
-    command = {'prefix': 'osd pool application enable', 'pool': pool_name, 'app': 'nfs'}
-    r, outb, outs = vc_mgr.mgr.mon_command(command)
+    def create_instance(self, orch, pool_name):
+        return GaneshaConf("a", pool_name, "ganesha", orch)
+
+    def create_export(self, ganesha_conf):
+        ex_id = ganesha_conf.create_export({
+            'path': "/",
+            'pseudo': "/cephfs",
+            'cluster_id': "cluster1",
+            'daemons': ["ganesha.a"],
+            'tag': "",
+            'access_type': "RW",
+            'squash': "no_root_squash",
+            'security_label': True,
+            'protocols': [4],
+            'transports': ["TCP"],
+            'fsal': {"name": "CEPH", "user_id":"admin", "fs_name": "a", "sec_label_xattr": ""},
+            'clients': []
+            })
+
+        log.info("Export ID is {}".format(ex_id))
+        NFSConfig.exp_num += 1
+        return 0, "", ""
+
+    def delete_export(self, ganesha_conf, ex_id):
+        if not ganesha_conf.has_export(ex_id):
+            return 0, "No exports available",""
+        log.info("Export detected for id:{}".format(ex_id))
+        export = ganesha_conf.remove_export(ex_id)
+        ganesha_conf.reload_daemons(export.daemons)
+        return 0, "", ""
+
+    def check_fsal_valid(self, fs_map):
+        fsmap_res = [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
+                for fs in fs_map['filesystems']]
+
+        #return 0, json.dumps(fsmap_res, indent=2), ""
+        return fsmap_res
+
+    def create_rados_pool(self, vc_mgr, pool_name):
+        if not NFSConfig.exp_num:
+            r, outb, outs = create_pool(vc_mgr, pool_name)
 
-    if r != 0:
+        """
+        if r != 0:
         #return r, outb, outs
-    log.info("pool enable done r: {}".format(r))
-    """
-def create_nfs_cluster(fs_mgr, size, cluster_id):
-    mgr = fs_mgr.vc.mgr
-    pool_list = [p['pool_name'] for p in mgr.get_osdmap().dump().get('pools', [])]
-    pool_name = 'nfs-ganesha'
-    pool_ns = 'nfsgw'
-    client = 'client.ganesha-%s' % cluster_id
-
-    if pool_name not in pool_list:
-        r, out, err = create_pool(mgr, pool_name)
+
+        command = {'prefix': 'osd pool application enable', 'pool': pool_name, 'app': 'nfs'}
+        r, outb, outs = vc_mgr.mgr.mon_command(command)
+
         if r != 0:
-            return r, out, err
-        log.info("{}".format(out))
-
-    ret, out, err = mgr.mon_command({
-        'prefix': 'auth get-or-create',
-        'entity': client,
-        'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s' % (pool_name, pool_ns)],
-        })
-
-    if ret!= 0:
-        return ret, out, err
-
-    ret, keyring, err = mgr.mon_command({
-        'prefix': 'auth print-key', 'entity': client,})
-
-    if ret!= 0:
-        return ret, out, err
-
-    ps = orchestrator.PlacementSpec(count=size)
-    spec = orchestrator.NFSServiceSpec(cluster_id, pool_name, pool_ns, ps)
-    try:
-        completion = mgr.add_nfs(spec)
-        mgr._orchestrator_wait([completion])
-        orchestrator.raise_if_exception(completion)
-    except Exception as e:
-        log.exception("Failed to create NFS Cluster")
-        return -errno.EINVAL, "", str(e)
-
-    if size > 1:
-        try:
-            completion = mgr.update_nfs(spec)
-            mgr._orchestrator_wait([completion])
-            orchestrator.raise_if_exception(completion)
-        except Exception as e:
-            log.exception("Failed to scale NFS Cluster")
-            return -errno.EINVAL, "", str(e)
-
-    return 0,"","NFS Cluster Created Successfully"
+        #return r, outb, outs
+        log.info("pool enable done r: {}".format(r))
+        """
+
+    def create_nfs_cluster(self, size):
+        pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
+        client = 'client.ganesha-%s' % self.cluster_id
+
+        if self.pool_name not in pool_list:
+            r, out, err = create_pool(self.mgr, self.pool_name)
+            if r != 0:
+                return r, out, err
+            log.info("{}".format(out))
+
+        ret, out, err = self.mgr.mon_command({
+            'prefix': 'auth get-or-create',
+            'entity': client,
+            'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s' % (self.pool_name, self.pool_ns)],
+            })
+
+        if ret!= 0:
+            return ret, out, err
+
+        return 0, "", "NFS Cluster Created Successfully"
index c2df25172e717924e81999f8939f1c66a18a7ae2..aae47b439fd2c6fdcf750b84dd64352fcd2dc70a 100644 (file)
@@ -6,7 +6,7 @@ import orchestrator
 
 from .fs.volume import VolumeClient
 #from .fs.nfs import check_fsal_valid, create_instance, create_export, delete_export
-from .fs.nfs import *
+from .fs.nfs import NFSConfig
 
 class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     COMMANDS = [
@@ -460,14 +460,19 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             vol_name=cmd['vol_name'], clone_name=cmd['clone_name'],  group_name=cmd.get('group_name', None))
 
     def _cmd_fs_nfs_create(self, inbuf, cmd):
-        if check_fsal_valid(self.vc.mgr.get('fs_map')):
+        if NFSConfig.check_fsal_valid(self.vc.mgr.get('fs_map')):
             pool_name = "nfs-ganesha"
-            create_rados_pool(self.vc.mgr, pool_name)
-            instance = create_instance(self, pool_name)
-            return create_export(instance)
+            NFSConfig.create_rados_pool(self.vc.mgr, pool_name)
+            instance = NFSConfig.create_instance(self, pool_name)
+            return NFSConfig.create_export(instance)
+
+            NFSConfig.create_rados_pool(self.vc.mgr, pool_name)
+            instance = NFSConfig.create_instance(self, pool_name)
+            return NFSConfig.create_export(instance)
 
     def _cmd_fs_nfs_delete(self, inbuf, cmd):
-            instance = create_instance(self, "nfs-ganesha")
-            return delete_export(instance, cmd['export_id'])
+            instance = NFSConfig.create_instance(self, "nfs-ganesha")
+            return NFSConfig.delete_export(instance, cmd['export_id'])
     def _cmd_fs_nfs_cluster_create(self, inbuf, cmd):
-            return create_nfs_cluster(self, size=cmd.get('size', 1), cluster_id=cmd['cluster_id'])
+            nfs_obj = NFSConfig(self, cmd['cluster_id'])
+            return nfs_obj.create_nfs_cluster(size=cmd.get('size', 1))