]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/volumes: Remove GaneshaConf and NFSConfig class
authorVarsha Rao <varao@redhat.com>
Wed, 1 Apr 2020 11:18:30 +0000 (16:48 +0530)
committerVarsha Rao <varao@redhat.com>
Wed, 8 Apr 2020 11:51:17 +0000 (17:21 +0530)
NFSCluster and FSExport class replace GaneshaConf and NFSConfig class.

Signed-off-by: Varsha Rao <varao@redhat.com>
src/pybind/mgr/volumes/fs/nfs.py
src/pybind/mgr/volumes/module.py

index 6106bedfcc64361ef0fe338f5cc8c3bc8fbee215..a6740b83503c5abe852f2cd651560ec2f97f0c6b 100644 (file)
@@ -112,7 +112,7 @@ class Client(object):
     def __init__(self, addresses, access_type=None, squash=None):
         self.addresses = addresses
         self.access_type = access_type
-        self.squash = GaneshaConf.format_squash(squash)
+        self.squash = squash
 
     @classmethod
     def from_client_block(cls, client_block):
@@ -192,242 +192,6 @@ class Export(object):
                    ex_dict['access_type'],
                    [Client.from_dict(client) for client in ex_dict['clients']])
 
-class GaneshaConf(object):
-    # pylint: disable=R0902
-
-    def __init__(self):
-        self.mgr = nfs_conf.mgr
-        self.cephx_key = nfs_conf.key
-        self.cluster_id = nfs_conf.cluster_id
-        self.rados_pool = nfs_conf.pool_name
-        self.rados_namespace = nfs_conf.pool_ns
-        self.export_conf_blocks = []
-        self.daemons_conf_blocks = {}
-        self.exports = {}
-
-    def check_fs_valid(self):
-        fs_map = self.mgr.get('fs_map')
-        return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
-                for fs in fs_map['filesystems']]
-
-    def _create_user_key(self):
-        ret, out, err = self.mgr.mon_command({
-            'prefix': 'auth get-or-create',
-            'entity': self.cluster_id,
-            'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s, \
-                      allow rw tag cephfs data=a' % (self.rados_pool,
-                      self.rados_namespace), 'mds', 'allow rw path=/'],
-            'format': 'json',
-            })
-
-        if ret!= 0:
-            return ret, out, err
-
-        json_res = json.loads(out)
-        log.info("Export user is {}".format(json_res[0]['entity']))
-
-        return json_res[0]['entity'], json_res[0]['key']
-
-    def _write_raw_config(self, conf_block, obj):
-        raw_config = GaneshaConfParser.write_conf(conf_block)
-        with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
-            if self.rados_namespace:
-                ioctx.set_namespace(self.rados_namespace)
-            ioctx.write_full(obj, raw_config.encode('utf-8'))
-            log.debug(
-                    "write configuration into rados object %s/%s/%s:\n%s",
-                    self.rados_pool, self.rados_namespace, obj, raw_config)
-
-    @classmethod
-    def format_path(cls, path):
-        if path is not None:
-            path = path.strip()
-            if len(path) > 1 and path[-1] == '/':
-                path = path[:-1]
-        return path
-
-    @classmethod
-    def format_squash(cls, squash):
-        if squash is None:
-            return None
-        if squash.lower() in ["no_root_squash", "noidsquash", "none"]:
-            return "no_root_squash"
-        if squash.lower() in ["rootid", "root_id_squash", "rootidsquash"]:
-            return "root_id_squash"
-        if squash.lower() in ["root", "root_squash", "rootsquash"]:
-            return "root_squash"
-        if squash.lower() in ["all", "all_squash", "allsquash",
-                              "all_anonymous", "allanonymous"]:
-            return "all_squash"
-        log.error("could not parse squash value: %s", squash)
-
-    def _gen_export_id(self):
-        exports = sorted(self.exports)
-        nid = 1
-        for e_id in exports:
-            if e_id == nid:
-                nid += 1
-            else:
-                break
-        return nid
-
-    def _persist_daemon_configuration(self):
-        daemon_map = {} # type: Dict[str, List[Dict[str, str]]]
-        """
-        for daemon_id in self.list_daemons():
-            daemon_map[daemon_id] = []
-        """
-        daemon_map["ganesha.a"] = []
-
-        for _, ex in self.exports.items():
-            for daemon in ex.daemons:
-                daemon_map[daemon].append({
-                    'block_name': "%url",
-                    'value': self.make_rados_url(
-                        "export-{}".format(ex.export_id))
-                })
-        for daemon_id, conf_blocks in daemon_map.items():
-            self._write_raw_config(conf_blocks, "conf-nfs")
-
-    def _delete_export(self, export_id):
-        self._persist_daemon_configuration()
-        with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
-            if self.rados_namespace:
-                ioctx.set_namespace(self.rados_namespace)
-            ioctx.remove_object("export-{}".format(export_id))
-
-    def _save_export(self, export):
-        export.fsal.cephx_key = self.cephx_key
-        self.exports[export.export_id] = export
-        conf_block = export.to_export_block()
-        self._write_raw_config(conf_block, "export-{}".format(export.export_id))
-        self._persist_daemon_configuration()
-
-    def create_export(self, ex_dict):
-        ex_id = self._gen_export_id()
-        export = Export.from_dict(ex_id, ex_dict)
-        self._save_export(export)
-        return ex_id
-
-    def remove_export(self, export_id):
-        if export_id not in self.exports:
-            return None
-        export = self.exports[export_id]
-        del self.exports[export_id]
-        self._delete_export(export_id)
-        return export
-
-    def has_export(self, export_id):
-        return export_id in self.exports
-
-    def list_daemons(self):
-        return [daemon_id for daemon_id in self.daemons_conf_blocks]
-
-    def reload_daemons(self, daemons):
-        with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
-            if self.rados_namespace:
-                ioctx.set_namespace(self.rados_namespace)
-            for daemon_id in daemons:
-                ioctx.notify("conf-{}".format(daemon_id))
-
-    def make_rados_url(self, obj):
-        if self.rados_namespace:
-            return "rados://{}/{}/{}".format(self.rados_pool, self.rados_namespace, obj)
-        return "rados://{}/{}".format(self.rados_pool, obj)
-
-class NFSConfig(object):
-    exp_num = 0
-
-    def __init__(self, mgr, cluster_id):
-        self.cluster_id = "ganesha-%s" % cluster_id
-        self.pool_name = 'nfs-ganesha'
-        self.pool_ns = cluster_id
-        self.mgr = mgr
-        self.ganeshaconf = None # type: Optional[GaneshaConf]
-        self.key = ''
-
-    def update_user_caps(self):
-        if NFSConfig.exp_num > 0:
-            ret, out, err = self.mgr.mon_command({
-                'prefix': 'auth caps',
-                'entity': "client.%s" % (self.cluster_id),
-                'caps' : ['mon', 'allow *', 'osd', 'allow * pool=%s namespace=%s, allow rw tag cephfs data=a' % (self.pool_name, self.pool_ns), 'mds', 'allow * path=/'],
-                })
-
-            if ret!= 0:
-                return ret, out, err
-
-    def create_instance(self):
-        self.ganeshaconf = GaneshaConf(self)
-        ret, out, err = self.mgr.mon_command({'prefix': 'auth get','entity': "client.%s" % (self.cluster_id), 'format': 'json',})
-
-        if not out:
-            json_res = json.loads(out)
-            self.key = json_res[0]['key']
-
-    def create_export(self):
-        assert self.ganeshaconf is not None
-        ex_id = self.ganeshaconf.create_export({
-            'path': "/",
-            'pseudo': "/cephfs",
-            'cluster_id': self.cluster_id,
-            'daemons': ["ganesha.a"],
-            'access_type': "RW",
-            'fsal': {"name": "CEPH", "user_id":self.cluster_id, "fs_name": "a", "sec_label_xattr": ""},
-            'clients': []
-            })
-        log.info("Export ID is {}".format(ex_id))
-        NFSConfig.exp_num += 1
-        #self.update_user_caps()
-        return 0, "", "Export Created Successfully"
-
-    def delete_export(self, ex_id):
-        assert self.ganeshaconf is not None
-        if not self.ganeshaconf.has_export(ex_id):
-            return 0, "No exports available",""
-        log.info("Export detected for id:{}".format(ex_id))
-        export = self.ganeshaconf.remove_export(ex_id)
-        self.ganeshaconf.reload_daemons(export.daemons)
-        return 0, "", "Export Deleted Successfully"
-
-    def check_fs_valid(self):
-        fs_map = self.mgr.get('fs_map')
-        return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
-                for fs in fs_map['filesystems']]
-
-    def create_empty_rados_obj(self):
-        common_conf = 'conf-nfs'
-        result = ''
-        with self.mgr.rados.open_ioctx(self.pool_name) as ioctx:
-            if self.pool_ns:
-                ioctx.set_namespace(self.pool_ns)
-            ioctx.write_full(common_conf, result.encode('utf-8'))
-            log.debug(
-                    "write configuration into rados object %s/%s/%s\n",
-                    self.pool_name, self.pool_ns, common_conf)
-
-    def create_nfs_cluster(self, size):
-        pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
-        client = 'client.%s' % self.cluster_id
-
-        if self.pool_name not in pool_list:
-            r, out, err = create_pool(self.mgr, self.pool_name)
-            if r != 0:
-                return r, out, err
-            log.info("{}".format(out))
-
-            command = {'prefix': 'osd pool application enable', 'pool': self.pool_name, 'app': 'nfs'}
-            r, out, err = self.mgr.mon_command(command)
-
-            if r != 0:
-                return r, out, err
-
-        self.create_empty_rados_obj()
-        #TODO Check if cluster exists
-        #TODO Call Orchestrator to deploy cluster
-
-        return 0, "", "NFS Cluster Created Successfully"
-
 class FSExport(object):
     def __init__(self, mgr, namespace=None):
         self.mgr = mgr
@@ -549,6 +313,9 @@ class FSExport(object):
 
         return (0, json.dumps(result, indent=4), '')
 
+    def delete_export(self, ex_id):
+        raise NotImplementedError()
+
     def make_rados_url(self, obj):
         if self.rados_namespace:
             return "rados://{}/{}/{}".format(self.rados_pool, self.rados_namespace, obj)
index 8a0405f7ee8b83f55c648141e5669a4f7bb5e2f6..75cbaf44ceecc734a25b324936e363bc785316ef 100644 (file)
@@ -7,7 +7,7 @@ from mgr_module import MgrModule
 import orchestrator
 
 from .fs.volume import VolumeClient
-from .fs.nfs import NFSConfig, NFSCluster, FSExport
+from .fs.nfs import NFSCluster, FSExport
 
 class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     COMMANDS = [
@@ -258,7 +258,6 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
     def __init__(self, *args, **kwargs):
         super(Module, self).__init__(*args, **kwargs)
         self.vc = VolumeClient(self)
-        self.nfs_obj = None # type: Optional[NFSConfig]
         self.fs_export = FSExport(self)
 
     def __del__(self):
@@ -405,22 +404,13 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
             vol_name=cmd['vol_name'], clone_name=cmd['clone_name'],  group_name=cmd.get('group_name', None))
 
     def _cmd_fs_nfs_export_create(self, inbuf, cmd):
-        """
-        if self.nfs_obj and self.nfs_obj.check_fsal_valid():
-            self.nfs_obj.create_instance()
-            return self.nfs_obj.create_export()
-        """
-
         return self.fs_export.create_export(fs_name=cmd['fsname'],
                 pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False),
                 path=cmd.get('path', '/'), cluster_id=cmd.get('attach','None'))
 
     def _cmd_fs_nfs_export_delete(self, inbuf, cmd):
-        if self.nfs_obj:
-            return self.nfs_obj.delete_export(cmd['export_id'])
+            return self.fs_export.delete_export(cmd['export_id'])
 
     def _cmd_fs_nfs_cluster_create(self, inbuf, cmd):
-            #self.nfs_obj = NFSConfig(self, cmd['cluster_id'])
-            #return self.nfs_obj.create_nfs_cluster(size=cmd.get('size', 1))
             nfs_cluster_obj = NFSCluster(self, cmd['cluster_id'])
             return nfs_cluster_obj.create_nfs_cluster(size=cmd.get('size', 1))