Handle the setup, starting, and clean-up of a Ceph cluster.
"""
+from copy import deepcopy
from io import BytesIO
from io import StringIO
# If there are any MDSs, then create a filesystem for them to use
# Do this last because requires mon cluster to be up and running
if mdss.remotes:
- log.info('Setting up CephFS filesystem...')
-
- Filesystem(ctx, fs_config=config.get('cephfs', None), name='cephfs',
- create=True, ec_profile=config.get('cephfs_ec_profile', None))
+ log.info('Setting up CephFS filesystem(s)...')
+ cephfs_config = config.get('cephfs', {})
+ fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}])
+ set_allow_multifs = len(fs_configs) > 1
+
+ for fs_config in fs_configs:
+ assert isinstance(fs_config, dict)
+ name = fs_config.pop('name')
+ temp = deepcopy(cephfs_config)
+ teuthology.deep_merge(temp, fs_config)
+ fs = Filesystem(ctx, fs_config=temp, name=name, create=True)
+ if set_allow_multifs:
+ fs.set_allow_multifs()
+ set_allow_multifs = False
yield
cephfs:
max_mds: 2
+ To change the max_mds of a specific filesystem, use::
+
+ tasks:
+ - ceph:
+ cephfs:
+ max_mds: 2
+ fs:
+ - name: a
+ max_mds: 3
+ - name: b
+
+ In the above example, filesystem 'a' will have 'max_mds' 3,
+ and filesystme 'b' will have 'max_mds' 2.
+
To change the mdsmap's default session_timeout (60 seconds), use::
tasks:
This object is for driving a CephFS filesystem. The MDS daemons driven by
MDSCluster may be shared with other Filesystems.
"""
- def __init__(self, ctx, fs_config=None, fscid=None, name=None, create=False,
- ec_profile=None):
+ def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False):
super(Filesystem, self).__init__(ctx)
self.name = name
- self.ec_profile = ec_profile
self.id = None
self.metadata_pool_name = None
self.metadata_overlay = False
self.data_pool_name = None
self.data_pools = None
self.fs_config = fs_config
+ self.ec_profile = fs_config.get('cephfs_ec_profile')
client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client'))
self.client_id = client_list[0]
config['cluster'] = 'ceph'
for fs in status.get_filesystems():
- thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fs['id']), fs['mdsmap']['max_mds'])
+ thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fscid=fs['id']), fs['mdsmap']['max_mds'])
thrasher.start()
ctx.ceph[config['cluster']].thrashers.append(thrasher)
class LocalFilesystem(Filesystem, LocalMDSCluster):
- def __init__(self, ctx, fscid=None, name=None, create=False, ec_profile=None):
+ def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False):
# Deliberately skip calling parent constructor
self._ctx = ctx
self.id = None
self.name = name
- self.ec_profile = ec_profile
self.metadata_pool_name = None
self.metadata_overlay = False
self.data_pool_name = None
self.data_pools = None
- self.fs_config = None
+ self.fs_config = fs_config
+ self.ec_profile = fs_config.get('cephfs_ec_profile')
# Hack: cheeky inspection of ceph.conf to see what MDSs exist
self.mds_ids = set()