import uuid
import yaml
+from copy import deepcopy
from io import BytesIO, StringIO
from tarfile import ReadError
from tasks.ceph_manager import CephManager
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.config import config as teuth_config
from textwrap import dedent
+from tasks.cephfs.filesystem import MDSCluster, Filesystem
# these items we use from ceph.py should probably eventually move elsewhere
from tasks.ceph import get_mons, healthy
yield
+@contextlib.contextmanager
+def cephfs_setup(ctx, config):
+ mdss = list(teuthology.all_roles_of_type(ctx.cluster, 'mds'))
+
+ # If there are any MDSs, then create a filesystem for them to use
+ # Do this last because requires mon cluster to be up and running
+ if len(mdss) > 0:
+ log.info('Setting up CephFS filesystem(s)...')
+ cephfs_config = config.get('cephfs', {})
+ fs_configs = cephfs_config.pop('fs', [{'name': 'cephfs'}])
+ set_allow_multifs = len(fs_configs) > 1
+
+ # wait for standbys to become available (slow due to valgrind, perhaps)
+ mdsc = MDSCluster(ctx)
+ with contextutil.safe_while(sleep=2,tries=150) as proceed:
+ while proceed():
+ if len(mdsc.get_standby_daemons()) >= len(mdss):
+ break
+
+ fss = []
+ for fs_config in fs_configs:
+ assert isinstance(fs_config, dict)
+ name = fs_config.pop('name')
+ temp = deepcopy(cephfs_config)
+ teuthology.deep_merge(temp, fs_config)
+ fs = Filesystem(ctx, fs_config=temp, name=name, create=True)
+ if set_allow_multifs:
+ fs.set_allow_multifs()
+ set_allow_multifs = False
+ fss.append(fs)
+
+ yield
+
+ for fs in fss:
+ fs.destroy()
+ else:
+ yield
@contextlib.contextmanager
def ceph_monitoring(daemon_type, ctx, config):
lambda: ceph_mgrs(ctx=ctx, config=config),
lambda: ceph_osds(ctx=ctx, config=config),
lambda: ceph_mdss(ctx=ctx, config=config),
+ lambda: cephfs_setup(ctx=ctx, config=config),
lambda: ceph_rgw(ctx=ctx, config=config),
lambda: ceph_iscsi(ctx=ctx, config=config),
lambda: ceph_monitoring('prometheus', ctx=ctx, config=config),