import json
+import errno
import logging
try:
from typing import Dict, List, Optional
class Export(object):
# pylint: disable=R0902
- def __init__(self, export_id, path, fsal, cluster_id, daemons, pseudo=None,
+ def __init__(self, export_id, path, fsal, cluster_id, pseudo,
access_type='R', clients=None):
self.export_id = export_id
- self.path = GaneshaConf.format_path(path)
+ self.path = path
self.fsal = fsal
self.cluster_id = cluster_id
- self.daemons = set(daemons)
- self.pseudo = GaneshaConf.format_path(pseudo)
+ self.pseudo = pseudo
self.access_type = access_type
self.squash = 'no_root_squash'
self.attr_expiration_time = 0
result = {
'block_name': 'EXPORT',
'export_id': self.export_id,
- 'path': self.path
+ 'path': self.path,
+ 'pseudo': self.pseudo,
+ 'access_type': self.access_type,
+ 'squash': self.squash,
+ 'attr_expiration_time': self.attr_expiration_time,
+ 'security_label': self.security_label,
+ 'protocols': self.protocols,
+ 'transports': [self.transports],
}
- if self.pseudo:
- result['pseudo'] = self.pseudo
- result['access_type'] = self.access_type
- result['squash'] = self.squash
- result['attr_expiration_time'] = self.attr_expiration_time
- result['security_label'] = self.security_label
- result['protocols'] = self.protocols
- result['transports'] = [self.transports]
-
result['_blocks_'] = [self.fsal.to_fsal_block()]
result['_blocks_'].extend([client.to_client_block()
for client in self.clients])
ex_dict['path'],
CephFSFSal.from_dict(ex_dict['fsal']),
ex_dict['cluster_id'],
- ex_dict['daemons'],
ex_dict['pseudo'],
ex_dict['access_type'],
[Client.from_dict(client) for client in ex_dict['clients']])
class GaneshaConf(object):
# pylint: disable=R0902
- def __init__(self, nfs_conf):
+ def __init__(self):
self.mgr = nfs_conf.mgr
self.cephx_key = nfs_conf.key
self.cluster_id = nfs_conf.cluster_id
self.daemons_conf_blocks = {}
self.exports = {}
+ def check_fs_valid(self):
+ fs_map = self.mgr.get('fs_map')
+ return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
+ for fs in fs_map['filesystems']]
+
+ def _create_user_key(self):
+ ret, out, err = self.mgr.mon_command({
+ 'prefix': 'auth get-or-create',
+ 'entity': self.cluster_id,
+ 'caps' : ['mon', 'allow r', 'osd', 'allow rw pool=%s namespace=%s, \
+ allow rw tag cephfs data=a' % (self.rados_pool,
+ self.rados_namespace), 'mds', 'allow rw path=/'],
+ 'format': 'json',
+ })
+
+ if ret!= 0:
+ return ret, out, err
+
+ json_res = json.loads(out)
+ log.info("Export user is {}".format(json_res[0]['entity']))
+
+ return json_res[0]['entity'], json_res[0]['key']
+
def _write_raw_config(self, conf_block, obj):
raw_config = GaneshaConfParser.write_conf(conf_block)
with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
self.ganeshaconf.reload_daemons(export.daemons)
return 0, "", "Export Deleted Successfully"
- def check_fsal_valid(self):
+ def check_fs_valid(self):
fs_map = self.mgr.get('fs_map')
return [{'id': fs['id'], 'name': fs['mdsmap']['fs_name']}
for fs in fs_map['filesystems']]
#TODO Call Orchestrator to deploy cluster
return 0, "", "NFS Cluster Created Successfully"
+
+class FSExport(object):
+ def __init__(self, mgr, namespace=None):
+ self.mgr = mgr
+ self.rados_pool = 'nfs-ganesha'
+ self.rados_namespace = namespace #TODO check if cluster exists
+ self.export_conf_blocks = []
+ self.exports = {}
+
+ def check_fs(self, fs_name):
+ fs_map = self.mgr.get('fs_map')
+ return fs_name in [fs['mdsmap']['fs_name'] for fs in fs_map['filesystems']]
+
+ def check_pseudo_path(self, pseudo_path):
+ for ex in self.exports[self.rados_namespace]:
+ if ex.pseudo == pseudo_path:
+ return True
+ return False
+
+ def _create_user_key(self, entity):
+ osd_cap = 'allow rw pool={} namespace={}, allow rw tag cephfs data=a'.format(
+ self.rados_pool, self.rados_namespace)
+ ret, out, err = self.mgr.mon_command({
+ 'prefix': 'auth get-or-create',
+ 'entity': 'client.{}'.format(entity),
+ 'caps' : ['mon', 'allow r', 'osd', osd_cap, 'mds', 'allow rw path=/'],
+ 'format': 'json',
+ })
+
+ if ret!= 0:
+ return ret, err
+
+ json_res = json.loads(out)
+ log.info("Export user is {}".format(json_res[0]['entity']))
+
+ return json_res[0]['entity'], json_res[0]['key']
+
+ def format_path(self, path):
+ if path is not None:
+ path = path.strip()
+ if len(path) > 1 and path[-1] == '/':
+ path = path[:-1]
+ return path
+
+ def _gen_export_id(self):
+ exports = sorted([ex.export_id for ex in self.exports[self.rados_namespace]])
+ nid = 1
+ for e_id in exports:
+ if e_id == nid:
+ nid += 1
+ else:
+ break
+ return nid
+
+ def _write_raw_config(self, conf_block, obj, append=False):
+ raw_config = GaneshaConfParser.write_conf(conf_block)
+ with self.mgr.rados.open_ioctx(self.rados_pool) as ioctx:
+ if self.rados_namespace:
+ ioctx.set_namespace(self.rados_namespace)
+ if append:
+ ioctx.append(obj, raw_config.encode('utf-8'))
+ else:
+ ioctx.write_full(obj, raw_config.encode('utf-8'))
+ log.debug(
+ "write configuration into rados object %s/%s/%s:\n%s",
+ self.rados_pool, self.rados_namespace, obj, raw_config)
+
+ def _update_common_conf(self, ex_id):
+ common_conf = 'conf-nfs'
+ conf_blocks = {
+ 'block_name': '%url',
+ 'value': self.make_rados_url(
+ 'export-{}'.format(ex_id))
+ }
+ self._write_raw_config(conf_blocks, common_conf, True)
+
+ def _save_export(self, export):
+ self.exports[self.rados_namespace].append(export)
+ conf_block = export.to_export_block()
+ self._write_raw_config(conf_block, "export-{}".format(export.export_id))
+ self._update_common_conf(export.export_id)
+
+ def create_export(self, fs_name, pseudo_path, read_only, path, cluster_id):
+ #TODO Check if valid cluster
+ if cluster_id not in self.exports:
+ self.exports[cluster_id] = []
+
+ self.rados_namespace = cluster_id
+ if not self.check_fs(fs_name) or self.check_pseudo_path(pseudo_path):
+ return -errno.EINVAL,"", "Invalid CephFS name or export already exists"
+
+ user_id, key = self._create_user_key(cluster_id)
+ if isinstance(user_id, int):
+ return user_id, "", key
+ access_type = "RW"
+ if read_only:
+ access_type = "R"
+
+ ex_dict = {
+ 'path': self.format_path(path),
+ 'pseudo': self.format_path(pseudo_path),
+ 'cluster_id': cluster_id,
+ 'access_type': access_type,
+ 'fsal': {"name": "CEPH", "user_id":cluster_id, "fs_name": fs_name, "sec_label_xattr": ""},
+ 'clients': []
+ }
+
+ ex_id = self._gen_export_id()
+ export = Export.from_dict(ex_id, ex_dict)
+ export.fsal.cephx_key = key
+ self._save_export(export)
+
+ result = {
+ "bind": pseudo_path,
+ "fs": fs_name,
+ "path": path,
+ "cluster": cluster_id,
+ "mode": access_type,
+ }
+
+ return (0, json.dumps(result, indent=4), '')
+
+ def make_rados_url(self, obj):
+ if self.rados_namespace:
+ return "rados://{}/{}/{}".format(self.rados_pool, self.rados_namespace, obj)
+ return "rados://{}/{}".format(self.rados_pool, obj)
+
+class NFSCluster:
+ def __init__(self, mgr, cluster_id):
+ self.cluster_id = "ganesha-%s" % cluster_id
+ self.pool_name = 'nfs-ganesha'
+ self.pool_ns = cluster_id
+ self.mgr = mgr
+
+ def create_empty_rados_obj(self):
+ common_conf = 'conf-nfs'
+ result = ''
+ with self.mgr.rados.open_ioctx(self.pool_name) as ioctx:
+ if self.pool_ns:
+ ioctx.set_namespace(self.pool_ns)
+ ioctx.write_full(common_conf, result.encode('utf-8'))
+ log.debug(
+ "write configuration into rados object %s/%s/nfs-conf\n",
+ self.pool_name, self.pool_ns)
+
+ def create_nfs_cluster(self, size):
+ pool_list = [p['pool_name'] for p in self.mgr.get_osdmap().dump().get('pools', [])]
+ client = 'client.%s' % self.cluster_id
+
+ if self.pool_name not in pool_list:
+ r, out, err = create_pool(self.mgr, self.pool_name)
+ if r != 0:
+ return r, out, err
+ log.info("{}".format(out))
+
+ command = {'prefix': 'osd pool application enable', 'pool': self.pool_name, 'app': 'nfs'}
+ r, out, err = self.mgr.mon_command(command)
+
+ if r != 0:
+ return r, out, err
+
+ self.create_empty_rados_obj()
+ #TODO Check if cluster exists
+ #TODO Call Orchestrator to deploy cluster
+
+ return 0, "", "NFS Cluster Created Successfully"
+
+ def update_nfs_cluster(self, size):
+ raise NotImplementedError()
+
+ def delete_nfs_cluster(self):
+ raise NotImplementedError()
import orchestrator
from .fs.volume import VolumeClient
-from .fs.nfs import NFSConfig
+from .fs.nfs import NFSConfig, NFSCluster, FSExport
class Module(orchestrator.OrchestratorClientMixin, MgrModule):
COMMANDS = [
},
{
'cmd': 'fs nfs export create '
- 'name=fs-name,type=CephString '
- 'name=read-only,type=CephBool,req=false '
- 'name=path,type=CephString,req=false '
- 'name=attach,type=CephString,req=false '
- 'name=binding,type=CephString,req=false ',
- 'desc': "Create cephfs export",
+ 'name=fsname,type=CephString '
+ 'name=binding,type=CephString '
+ 'name=readonly,type=CephBool,req=false '
+ 'name=path,type=CephString,req=false '
+ 'name=attach,type=CephString,req=false ',
+ 'desc': "Create a cephfs export",
'perm': 'rw'
},
{
'cmd': 'fs nfs export delete '
'name=export_id,type=CephInt,req=true ',
- 'desc': "Delete cephfs exports",
+ 'desc': "Delete a cephfs export",
'perm': 'rw'
},
{
'cmd': 'fs nfs cluster create '
'name=size,type=CephInt,req=false '
'name=cluster_id,type=CephString ',
- 'desc': "Creates NFS Cluster",
+ 'desc': "Create an NFS Cluster",
'perm': 'rw'
},
# volume ls [recursive]
super(Module, self).__init__(*args, **kwargs)
self.vc = VolumeClient(self)
self.nfs_obj = None # type: Optional[NFSConfig]
+ self.fs_export = FSExport(self)
def __del__(self):
self.vc.shutdown()
vol_name=cmd['vol_name'], clone_name=cmd['clone_name'], group_name=cmd.get('group_name', None))
def _cmd_fs_nfs_export_create(self, inbuf, cmd):
+ """
if self.nfs_obj and self.nfs_obj.check_fsal_valid():
self.nfs_obj.create_instance()
return self.nfs_obj.create_export()
+ """
+
+ return self.fs_export.create_export(fs_name=cmd['fsname'],
+ pseudo_path=cmd['binding'], read_only=cmd.get('readonly', False),
+ path=cmd.get('path', '/'), cluster_id=cmd.get('attach','None'))
def _cmd_fs_nfs_export_delete(self, inbuf, cmd):
if self.nfs_obj:
return self.nfs_obj.delete_export(cmd['export_id'])
def _cmd_fs_nfs_cluster_create(self, inbuf, cmd):
- self.nfs_obj = NFSConfig(self, cmd['cluster_id'])
- return self.nfs_obj.create_nfs_cluster(size=cmd.get('size', 1))
+ #self.nfs_obj = NFSConfig(self, cmd['cluster_id'])
+ #return self.nfs_obj.create_nfs_cluster(size=cmd.get('size', 1))
+ nfs_cluster_obj = NFSCluster(self, cmd['cluster_id'])
+ return nfs_cluster_obj.create_nfs_cluster(size=cmd.get('size', 1))