continue
if id_ not in all_mounts:
- fuse_mount = FuseMount(ctx, client_config, testdir, auth_id, remote, brxnet)
+ fuse_mount = FuseMount(ctx=ctx, client_config=client_config,
+ test_dir=testdir, client_id=auth_id,
+ client_remote=remote, brxnet=brxnet)
all_mounts[id_] = fuse_mount
else:
# Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client
log.info('Mounting ceph-fuse clients...')
for info in mounted_by_me.values():
config = info["config"]
- mount_path = config.get("mount_path")
- mountpoint = config.get("mountpoint")
- info["mount"].mount(mountpoint=mountpoint, mount_path=mount_path)
+ mount_x = info['mount']
+ if config.get("mount_path"):
+ mount_x.cephfs_mntpt = config.get("mount_path")
+ if config.get("mountpoint"):
+ mount_x.hostfs_mntpt = config.get("mountpoint")
+ mount_x.mount()
for info in mounted_by_me.values():
info["mount"].wait_until_mounted()
self.getinfo(refresh = True)
-
def check_pool_application(self, pool_name):
osd_map = self.mon_manager.get_osd_dump_json()
for pool in osd_map['pools']:
-from io import StringIO
import json
import time
import logging
+from io import StringIO
from textwrap import dedent
from teuthology import misc
log = logging.getLogger(__name__)
+# Refer mount.py for docstrings.
class FuseMount(CephFSMount):
- def __init__(self, ctx, client_config, test_dir, client_id, client_remote, brxnet):
- super(FuseMount, self).__init__(ctx, test_dir, client_id, client_remote, brxnet)
+ def __init__(self, ctx, client_config, test_dir, client_id,
+ client_remote, client_keyring_path=None, cephfs_name=None,
+ cephfs_mntpt=None, hostfs_mntpt=None, brxnet=None):
+ super(FuseMount, self).__init__(ctx=ctx, test_dir=test_dir,
+ client_id=client_id, client_remote=client_remote,
+ client_keyring_path=client_keyring_path, hostfs_mntpt=hostfs_mntpt,
+ cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet)
self.client_config = client_config if client_config else {}
self.fuse_daemon = None
self.inst = None
self.addr = None
- def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
- if mountpoint is not None:
- self.mountpoint = mountpoint
- self.setupfs(name=mount_fs_name)
+ def mount(self, mntopts=[], createfs=True, **kwargs):
+ self.update_attrs(**kwargs)
+ self.assert_and_log_minimum_mount_details()
+
self.setup_netns()
+ if createfs:
+ # TODO: don't call setupfs() from within mount(), since it's
+ # absurd. The proper order should be: create FS first and then
+ # call mount().
+ self.setupfs(name=self.cephfs_name)
+
try:
- return self._mount(mount_path, mount_fs_name, mount_options)
+ return self._mount(mntopts)
except RuntimeError:
# Catch exceptions by the mount() logic (i.e. not remote command
# failures) and ensure the mount is not left half-up.
self.umount_wait(force=True)
raise
- def _mount(self, mount_path, mount_fs_name, mount_options):
- log.info("Client client.%s config is %s" % (self.client_id, self.client_config))
+ def _mount(self, mntopts):
+ log.info("Client client.%s config is %s" % (self.client_id,
+ self.client_config))
daemon_signal = 'kill'
- if self.client_config.get('coverage') or self.client_config.get('valgrind') is not None:
+ if self.client_config.get('coverage') or \
+ self.client_config.get('valgrind') is not None:
daemon_signal = 'term'
- log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
- id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
-
- self.client_remote.run(args=['mkdir', '-p', self.mountpoint],
- timeout=(15*60), cwd=self.test_dir)
+ stderr = StringIO()
+ try:
+ self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt],
+ timeout=(15*60), cwd=self.test_dir,
+ stderr=StringIO())
+ except CommandFailedError:
+ if 'file exists' not in stderr.getvalue().lower():
+ raise
run_cmd = [
'sudo',
]
fuse_cmd = ['ceph-fuse', "-f"]
-
- if mount_path is not None:
- fuse_cmd += ["--client_mountpoint={0}".format(mount_path)]
-
- if mount_fs_name is not None:
- fuse_cmd += ["--client_fs={0}".format(mount_fs_name)]
-
- fuse_cmd += mount_options
-
- fuse_cmd += [
- '--name', 'client.{id}'.format(id=self.client_id),
- # TODO ceph-fuse doesn't understand dash dash '--',
- self.mountpoint,
- ]
+ if self.client_id is not None:
+ fuse_cmd += ['--id', self.client_id]
+ if self.client_keyring_path and self.client_id is not None:
+ fuse_cmd += ['-k', self.client_keyring_path]
+ if self.cephfs_mntpt is not None:
+ fuse_cmd += ["--client_mountpoint=" + self.cephfs_mntpt]
+ if self.cephfs_name is not None:
+ fuse_cmd += ["--client_fs=" + self.cephfs_name]
+ if mntopts:
+ fuse_cmd += mntopts
+ fuse_cmd.append(self.hostfs_mntpt)
cwd = self.test_dir
if self.client_config.get('valgrind') is not None:
'--file-system',
'--printf=%T\n',
'--',
- self.mountpoint,
+ self.hostfs_mntpt,
],
cwd=self.test_dir,
stdout=StringIO(),
if ("endpoint is not connected" in error
or "Software caused connection abort" in error):
# This happens is fuse is killed without unmount
- log.warning("Found stale moutn point at {0}".format(self.mountpoint))
+ log.warning("Found stale mount point at {0}".format(self.hostfs_mntpt))
return True
else:
# This happens if the mount directory doesn't exist
- log.info('mount point does not exist: %s', self.mountpoint)
+ log.info('mount point does not exist: %s', self.hostfs_mntpt)
return False
fstype = proc.stdout.getvalue().rstrip('\n')
if fstype == 'fuseblk':
- log.info('ceph-fuse is mounted on %s', self.mountpoint)
+ log.info('ceph-fuse is mounted on %s', self.hostfs_mntpt)
return True
else:
log.debug('ceph-fuse not mounted, got fs type {fstype!r}'.format(
try:
stderr = StringIO()
self.client_remote.run(args=['sudo', 'chmod', '1777',
- self.mountpoint],
+ self.hostfs_mntpt],
timeout=(15*60), cwd=self.test_dir,
stderr=stderr, omit_sudo=False)
break
raise
def _mountpoint_exists(self):
- return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0
+ return self.client_remote.run(args=["ls", "-d", self.hostfs_mntpt], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0
def umount(self, cleanup=True):
"""
log.info('Running fusermount -u on {name}...'.format(name=self.client_remote.name))
stderr = StringIO()
self.client_remote.run(args=['sudo', 'fusermount', '-u',
- self.mountpoint],
+ self.hostfs_mntpt],
cwd=self.test_dir, stderr=stderr,
timeout=(30*60), omit_sudo=False)
except run.CommandFailedError:
# make sure its unmounted
try:
self.client_remote.run(args=['sudo', 'umount', '-l', '-f',
- self.mountpoint],
+ self.hostfs_mntpt],
stderr=stderr, timeout=(60*15), omit_sudo=False)
- except CommandFailedError:
- if self.is_mounted():
+ except CommandFailedError:
+ if self.is_mounted():
raise
self.mounted = False
:param force: Complete cleanly even if the MDS is offline
"""
if not (self.is_mounted() and self.fuse_daemon):
- log.debug('ceph-fuse client.{id} is not mounted at {remote} {mnt}'.format(id=self.client_id,
- remote=self.client_remote,
- mnt=self.mountpoint))
+ log.debug('ceph-fuse client.{id} is not mounted at {remote} '
+ '{mnt}'.format(id=self.client_id,
+ remote=self.client_remote,
+ mnt=self.hostfs_mntpt))
self.cleanup()
return
args=[
'rm',
'-rf',
- self.mountpoint,
+ self.hostfs_mntpt,
],
cwd=self.test_dir,
timeout=(60*5)
import json
import logging
+
+from io import StringIO
from textwrap import dedent
+
from teuthology.orchestra.run import CommandFailedError
from teuthology.orchestra import run
from teuthology.contextutil import MaxWhileTries
+
from tasks.cephfs.mount import CephFSMount
log = logging.getLogger(__name__)
class KernelMount(CephFSMount):
- def __init__(self, ctx, test_dir, client_id, client_remote, brxnet):
- super(KernelMount, self).__init__(ctx, test_dir, client_id, client_remote, brxnet)
+ def __init__(self, ctx, test_dir, client_id, client_remote,
+ client_keyring_path=None, hostfs_mntpt=None,
+ cephfs_name=None, cephfs_mntpt=None, brxnet=None):
+ super(KernelMount, self).__init__(ctx=ctx, test_dir=test_dir,
+ client_id=client_id, client_remote=client_remote,
+ client_keyring_path=client_keyring_path, hostfs_mntpt=hostfs_mntpt,
+ cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet)
+
+ def mount(self, mntopts=[], createfs=True, **kwargs):
+ self.update_attrs(**kwargs)
+ self.assert_and_log_minimum_mount_details()
- def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
- if mountpoint is not None:
- self.mountpoint = mountpoint
- self.setupfs(name=mount_fs_name)
self.setup_netns()
- log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
- id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
+ # TODO: don't call setupfs() from within mount(), since it's
+ # absurd. The proper order should be: create FS first and then
+ # call mount().
+ if createfs:
+ self.setupfs(name=self.cephfs_name)
+ if not self.cephfs_mntpt:
+ self.cephfs_mntpt = '/'
- self.client_remote.run(args=['mkdir', '-p', self.mountpoint],
- timeout=(5*60))
-
- if mount_path is None:
- mount_path = "/"
+ stderr = StringIO()
+ try:
+ self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt],
+ timeout=(5*60), stderr=stderr)
+ except CommandFailedError:
+ if 'file exists' not in stderr.getvalue().lower():
+ raise
- opts = 'name={id},norequire_active_mds,conf={conf}'.format(id=self.client_id,
- conf=self.config_path)
+ opts = 'name=' + self.client_id
+ if self.client_keyring_path and self.client_id is not None:
+ opts += 'secret=' + self.get_key_from_keyfile()
+ opts += ',norequire_active_mds,conf=' + self.config_path
- if mount_fs_name is not None:
- opts += ",mds_namespace={0}".format(mount_fs_name)
+ if self.cephfs_name is not None:
+ opts += ",mds_namespace={0}".format(self.cephfs_name)
- for mount_opt in mount_options :
- opts += ",{0}".format(mount_opt)
+ if mntopts:
+ opts += ',' + ','.join(mntopts)
self.client_remote.run(
args=[
'/bin/mount',
'-t',
'ceph',
- ':{mount_path}'.format(mount_path=mount_path),
- self.mountpoint,
+ ':' + self.cephfs_mntpt,
+ self.hostfs_mntpt,
'-v',
'-o',
opts
)
self.client_remote.run(
- args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(5*60))
+ args=['sudo', 'chmod', '1777', self.hostfs_mntpt], timeout=(5*60))
self.mounted = True
log.debug('Unmounting client client.{id}...'.format(id=self.client_id))
try:
- cmd=['sudo', 'umount', self.mountpoint]
+ cmd=['sudo', 'umount', self.hostfs_mntpt]
if force:
cmd.append('-f')
self.client_remote.run(args=cmd, timeout=(15*60), omit_sudo=False)
-from contextlib import contextmanager
import json
import logging
import datetime
+import os
+import re
import time
+
from io import StringIO
+from contextlib import contextmanager
from textwrap import dedent
-import os
-import re
from IPy import IP
+
+from teuthology.misc import get_file
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw
+
from tasks.cephfs.filesystem import Filesystem
log = logging.getLogger(__name__)
class CephFSMount(object):
- def __init__(self, ctx, test_dir, client_id, client_remote, brxnet):
+ def __init__(self, ctx, test_dir, client_id, client_remote,
+ client_keyring_path=None, hostfs_mntpt=None,
+ cephfs_name=None, cephfs_mntpt=None, brxnet=None):
"""
:param test_dir: Global teuthology test dir
:param client_id: Client ID, the 'foo' in client.foo
- :param client_remote: Remote instance for the host where client will run
+ :param client_keyring_path: path to keyring for given client_id
+ :param client_remote: Remote instance for the host where client will
+ run
+ :param hostfs_mntpt: Path to directory on the FS on which Ceph FS will
+ be mounted
+ :param cephfs_name: Name of Ceph FS to be mounted
+ :param cephfs_mntpt: Path to directory inside Ceph FS that will be
+ mounted as root
"""
-
+ self.mounted = False
self.ctx = ctx
self.test_dir = test_dir
+
+ self._verify_attrs(client_id=client_id,
+ client_keyring_path=client_keyring_path,
+ hostfs_mntpt=hostfs_mntpt, cephfs_name=cephfs_name,
+ cephfs_mntpt=cephfs_mntpt)
+
self.client_id = client_id
+ self.client_keyring_path = client_keyring_path
self.client_remote = client_remote
- self.mountpoint_dir_name = 'mnt.{id}'.format(id=self.client_id)
- self._mountpoint = None
+ if hostfs_mntpt:
+ self.hostfs_mntpt = hostfs_mntpt
+ self.hostfs_mntpt_dirname = os.path.basename(self.hostfs_mntpt)
+ else:
+ self.hostfs_mntpt = os.path.join(self.test_dir, f'mnt.{self.client_id}')
+ self.cephfs_name = cephfs_name
+ self.cephfs_mntpt = cephfs_mntpt
+
self.fs = None
- self.mounted = False
+
self._netns_name = None
self.nsid = -1
if brxnet is None:
@property
def mountpoint(self):
- if self._mountpoint == None:
- self._mountpoint= os.path.join(
- self.test_dir, '{dir_name}'.format(dir_name=self.mountpoint_dir_name))
- return self._mountpoint
+ if self.hostfs_mntpt == None:
+ self.hostfs_mntpt = os.path.join(self.test_dir,
+ self.hostfs_mntpt_dirname)
+ return self.hostfs_mntpt
@mountpoint.setter
def mountpoint(self, path):
if not isinstance(path, str):
raise RuntimeError('path should be of str type.')
- self._mountpoint = path
+ self._mountpoint = self.hostfs_mntpt = path
@property
def netns_name(self):
def netns_name(self, name):
self._netns_name = name
+ def assert_and_log_minimum_mount_details(self):
+ """
+ Make sure we have minimum details required for mounting. Ideally, this
+ method should be called at the beginning of the mount method.
+ """
+ if not self.client_id or not self.client_remote or \
+ not self.hostfs_mntpt:
+ errmsg = ('Mounting CephFS requires that at least following '
+ 'details to be provided -\n'
+ '1. the client ID,\n2. the mountpoint and\n'
+ '3. the remote machine where CephFS will be mounted.\n')
+ raise RuntimeError(errmsg)
+
+ log.info('Mounting Ceph FS. Following are details of mount; remember '
+ '"None" represents Python type None -')
+ log.info(f'self.client_remote.hostname = {self.client_remote.hostname}')
+ log.info(f'self.client.name = client.{self.client_id}')
+ log.info(f'self.hostfs_mntpt = {self.hostfs_mntpt}')
+ log.info(f'self.cephfs_name = {self.cephfs_name}')
+ log.info(f'self.cephfs_mntpt = {self.cephfs_mntpt}')
+ log.info(f'self.client_keyring_path = {self.client_keyring_path}')
+ if self.client_keyring_path:
+ log.info('keyring content -\n' +
+ get_file(self.client_remote, self.client_keyring_path,
+ sudo=True).decode())
+
def is_mounted(self):
return self.mounted
args = ['sudo', 'ip', 'link', 'set', 'brx.{0}'.format(self.nsid), 'up']
self.client_remote.run(args=args, timeout=(5*60), omit_sudo=False)
- def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
+ def mount(self, mntopts=[], createfs=True, **kwargs):
+ """
+ kwargs expects its members to be same as the arguments accepted by
+ self.update_attrs().
+ """
raise NotImplementedError()
- def mount_wait(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
- self.mount(mount_path=mount_path, mount_fs_name=mount_fs_name, mountpoint=mountpoint,
- mount_options=mount_options)
+ def mount_wait(self, **kwargs):
+ """
+ Accepts arguments same as self.mount().
+ """
+ self.mount(**kwargs)
self.wait_until_mounted()
def umount(self):
raise NotImplementedError()
- def umount_wait(self, force=False, require_clean=False):
+ def umount_wait(self, force=False, require_clean=False, timeout=None):
"""
:param force: Expect that the mount will not shutdown cleanly: kill
:param require_clean: Wait for the Ceph client associated with the
mount (e.g. ceph-fuse) to terminate, and
raise if it doesn't do so cleanly.
+ :param timeout: amount of time to be waited for umount command to finish
:return:
"""
raise NotImplementedError()
+ def _verify_attrs(self, **kwargs):
+ """
+ Verify that client_id, client_keyring_path, client_remote, hostfs_mntpt,
+ cephfs_name, cephfs_mntpt are either type str or None.
+ """
+ for k, v in kwargs.items():
+ if v is not None and not isinstance(v, str):
+ raise RuntimeError('value of attributes should be either str '
+ f'or None. {k} - {v}')
+
+ def update_attrs(self, client_id=None, client_keyring_path=None,
+ client_remote=None, hostfs_mntpt=None, cephfs_name=None,
+ cephfs_mntpt=None):
+ if not (client_id or client_keyring_path or client_remote or
+ cephfs_name or cephfs_mntpt or hostfs_mntpt):
+ return
+
+ self._verify_attrs(client_id=client_id,
+ client_keyring_path=client_keyring_path,
+ hostfs_mntpt=hostfs_mntpt, cephfs_name=cephfs_name,
+ cephfs_mntpt=cephfs_mntpt)
+
+ if client_id:
+ self.client_id = client_id
+ if client_keyring_path:
+ self.client_keyring_path = client_keyring_path
+ if client_remote:
+ self.client_remote = client_remote
+ if hostfs_mntpt:
+ self.hostfs_mntpt = hostfs_mntpt
+ if cephfs_name:
+ self.cephfs_name = cephfs_name
+ if cephfs_mntpt:
+ self.cephfs_mntpt = cephfs_mntpt
+
+ def remount(self, **kwargs):
+ """
+ Update mount object's attributes and attempt remount with these
+ new values for these attrbiutes.
+
+ 1. Run umount_wait().
+ 2. Run update_attrs().
+ 3. Run mount().
+
+ Accepts arguments of self.mount() and self.update_attrs() with 2 exceptions -
+ 1. Accepts wait too which can be True or False.
+ 2. The default value of createfs is False.
+ """
+ self.umount_wait()
+ assert not self.mounted
+
+ mntopts = kwargs.pop('mntopts', [])
+ createfs = kwargs.pop('createfs', False)
+ wait = kwargs.pop('wait', True)
+
+ self.update_attrs(**kwargs)
+
+ retval = self.mount(mntopts=mntopts, createfs=createfs)
+ # avoid this scenario (again): mount command might've failed and
+ # check_status might have silenced the exception, yet we attempt to
+ # wait which might lead to an error.
+ if retval is None and wait:
+ self.wait_until_mounted()
+
+ return retval
+
def kill(self):
"""
Suspend the netns veth interface to make the client disconnected
"""
stderr = StringIO()
try:
- self.client_remote.run(
- args=[
- 'rmdir',
- '--',
- self.mountpoint,
- ],
- cwd=self.test_dir,
- stderr=stderr,
- timeout=(60*5),
- check_status=False,
- )
+ self.client_remote.run(args=['rmdir', '--', self.mountpoint],
+ cwd=self.test_dir, stderr=stderr,
+ timeout=(60*5), check_status=False)
except CommandFailedError:
- if "No such file or directory" in stderr.getvalue():
- pass
- else:
+ if "no such file or directory" not in stderr.getvalue().lower():
raise
self.cleanup_netns()
def get_keyring_path(self):
return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id)
+ def get_key_from_keyfile(self):
+ # XXX: don't call run_shell(), since CephFS might be unmounted.
+ keyring = self.client_remote.run(
+ args=['sudo', 'cat', self.client_keyring_path], stdout=StringIO(),
+ omit_sudo=False).stdout.getvalue()
+ for line in keyring.split('\n'):
+ if line.find('key') != -1:
+ return line[line.find('=') + 1 : ].strip()
+
@property
def config_path(self):
"""
if os.path.isabs(dirname):
path = os.path.join(dirname, filename)
else:
- path = os.path.join(self.mountpoint, dirname, filename)
+ path = os.path.join(self.hostfs_mntpt, dirname, filename)
else:
- path = os.path.join(self.mountpoint, filename)
+ path = os.path.join(self.hostfs_mntpt, filename)
else:
path = filename
for suffix in self.test_files:
log.info("Creating file {0}".format(suffix))
self.client_remote.run(args=[
- 'sudo', 'touch', os.path.join(self.mountpoint, suffix)
+ 'sudo', 'touch', os.path.join(self.hostfs_mntpt, suffix)
])
def test_create_file(self, filename='testfile', dirname=None, user=None,
for suffix in self.test_files:
log.info("Checking file {0}".format(suffix))
r = self.client_remote.run(args=[
- 'sudo', 'ls', os.path.join(self.mountpoint, suffix)
+ 'sudo', 'ls', os.path.join(self.hostfs_mntpt, suffix)
], check_status=False)
if r.exitstatus != 0:
raise RuntimeError("Expected file {0} not found".format(suffix))
filename = "{0} {1}".format(datetime.datetime.now(), self.client_id)
log.debug("Creating test file {0}".format(filename))
self.client_remote.run(args=[
- 'sudo', 'touch', os.path.join(self.mountpoint, filename)
+ 'sudo', 'touch', os.path.join(self.hostfs_mntpt, filename)
])
log.debug("Deleting test file {0}".format(filename))
self.client_remote.run(args=[
- 'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename)
+ 'sudo', 'rm', '-f', os.path.join(self.hostfs_mntpt, filename)
])
def _run_python(self, pyscript, py_version='python3'):
"""
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
p = self._run_python(dedent(
"""
"""
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
if write:
pyscript = dedent("""
def wait_for_dir_empty(self, dirname, timeout=30):
i = 0
- dirpath = os.path.join(self.mountpoint, dirname)
+ dirpath = os.path.join(self.hostfs_mntpt, dirname)
while i < timeout:
nr_entries = int(self.getfattr(dirpath, "ceph.dir.entries"))
if nr_entries == 0:
i = 0
while i < timeout:
r = self.client_remote.run(args=[
- 'sudo', 'ls', os.path.join(self.mountpoint, basename)
+ 'sudo', 'ls', os.path.join(self.hostfs_mntpt, basename)
], check_status=False)
if r.exitstatus == 0:
log.debug("File {0} became visible from {1} after {2}s".format(
"""
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
script_builder = """
import time
def lock_and_release(self, basename="background_file"):
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
script = """
import time
def check_filelock(self, basename="background_file", do_flock=True):
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
script_builder = """
import fcntl
"""
assert(self.is_mounted())
- path = os.path.join(self.mountpoint, basename)
+ path = os.path.join(self.hostfs_mntpt, basename)
pyscript = dedent("""
import os
val = zlib.crc32(str(i).encode('utf-8')) & 7
f.write(chr(val))
""".format(
- path=os.path.join(self.mountpoint, filename),
+ path=os.path.join(self.hostfs_mntpt, filename),
size=size
)))
if b != chr(val):
raise RuntimeError("Bad data at offset {{0}}".format(i))
""".format(
- path=os.path.join(self.mountpoint, filename),
+ path=os.path.join(self.hostfs_mntpt, filename),
size=size
)))
"""
assert(self.is_mounted())
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
pyscript = dedent("""
import sys
def create_n_files(self, fs_path, count, sync=False):
assert(self.is_mounted())
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
pyscript = dedent("""
import sys
Raises exception on absent file.
"""
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
if follow_symlinks:
stat_call = "os.stat('" + abs_path + "')"
else:
:param fs_path:
:return:
"""
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
pyscript = dedent("""
import sys
import errno
proc.wait()
def path_to_ino(self, fs_path, follow_symlinks=True):
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
if follow_symlinks:
pyscript = dedent("""
return int(proc.stdout.getvalue().strip())
def path_to_nlink(self, fs_path):
- abs_path = os.path.join(self.mountpoint, fs_path)
+ abs_path = os.path.join(self.hostfs_mntpt, fs_path)
pyscript = dedent("""
import os
self.mount_a.umount_wait()
if isinstance(self.mount_a, FuseMount):
- self.mount_a.mount(mount_options=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1'])
+ self.mount_a.mount(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1'])
else:
try:
- self.mount_a.mount(mount_options=['recover_session=clean'])
+ self.mount_a.mount(mntopts=['recover_session=clean'])
except CommandFailedError:
self.mount_a.kill_cleanup()
self.skipTest("Not implemented in current kernel")
fs_a, fs_b = self._setup_two()
# Mount a client on fs_a
- self.mount_a.mount(mount_fs_name=fs_a.name)
+ self.mount_a.mount(cephfs_name=fs_a.name)
self.mount_a.write_n_mb("pad.bin", 1)
self.mount_a.write_n_mb("test.bin", 2)
a_created_ino = self.mount_a.path_to_ino("test.bin")
self.mount_a.create_files()
# Mount a client on fs_b
- self.mount_b.mount(mount_fs_name=fs_b.name)
+ self.mount_b.mount(cephfs_name=fs_b.name)
self.mount_b.write_n_mb("test.bin", 1)
b_created_ino = self.mount_b.path_to_ino("test.bin")
self.mount_b.create_files()
self.mount_a.setfattr("./subdir", "ceph.quota.max_bytes",
"%s" % size_before)
- self.mount_b.mount_wait(mount_path="/subdir")
+ self.mount_b.mount_wait(cephfs_mntpt="/subdir")
self.assertDictEqual(
self.mount_b.df(),
# Mount a client
self.mount_a.mount_wait()
- self.mount_b.mount_wait(mount_fs_name=recovery_fs)
+ self.mount_b.mount_wait(cephfs_name=recovery_fs)
# See that the files are present and correct
errors = workload.validate()
# Configure a client that is limited to /foo/bar
self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar")
# Check he can mount that dir and do IO
- self.mount_b.mount_wait(mount_path="/foo/bar")
+ self.mount_b.mount_wait(cephfs_mntpt="/foo/bar")
self.mount_b.create_destroy()
self.mount_b.umount_wait()
# Try to mount the client, see that it fails
with self.assert_cluster_log("client session with non-allowable root '/baz' denied"):
with self.assertRaises(CommandFailedError):
- self.mount_b.mount_wait(mount_path="/foo/bar")
+ self.mount_b.mount_wait(cephfs_mntpt="/foo/bar")
def test_session_evict_blocklisted(self):
"""
create_script = dedent("""
import os
- mount_path = "{mount_path}"
+ mountpoint = "{mountpoint}"
subdir = "delete_me"
size = {size}
file_count = {file_count}
- os.mkdir(os.path.join(mount_path, subdir))
+ os.mkdir(os.path.join(mountpoint, subdir))
for i in range(0, file_count):
filename = "{{0}}_{{1}}.bin".format(i, size)
- with open(os.path.join(mount_path, subdir, filename), 'w') as f:
+ with open(os.path.join(mountpoint, subdir, filename), 'w') as f:
f.write(size * 'x')
""".format(
- mount_path=self.mount_a.mountpoint,
+ mountpoint=self.mount_a.mountpoint,
size=1024,
file_count=file_count
))
create_script = dedent("""
import os
- mount_path = "{mount_path}"
+ mountpoint = "{mountpoint}"
subdir = "delete_me"
size_unit = {size_unit}
file_multiplier = {file_multiplier}
- os.mkdir(os.path.join(mount_path, subdir))
+ os.mkdir(os.path.join(mountpoint, subdir))
for i in range(0, file_multiplier):
for size in range(0, {size_range}*size_unit, size_unit):
filename = "{{0}}_{{1}}.bin".format(i, size // size_unit)
- with open(os.path.join(mount_path, subdir, filename), 'w') as f:
+ with open(os.path.join(mountpoint, subdir, filename), 'w') as f:
f.write(size * 'x')
""".format(
- mount_path=self.mount_a.mountpoint,
+ mountpoint=self.mount_a.mountpoint,
size_unit=size_unit,
file_multiplier=file_multiplier,
size_range=self.throttle_workload_size_range
self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path())
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
- guest_entity, mount_path,
+ guest_entity, cephfs_mntpt,
namespace_prefix=None, readonly=False,
tenant_id=None):
"""
volumeclient.
:param guest_mount: mount used by the guest client.
:param guest_entity: auth ID used by the guest client.
- :param mount_path: path of the volume.
+ :param cephfs_mntpt: path of the volume.
:param namespace_prefix: name prefix of the RADOS namespace, which
is used for the volume's layout.
:param readonly: defaults to False. If set to 'True' only read-only
:param tenant_id: (OpenStack) tenant ID of the guest client.
"""
- head, volume_id = os.path.split(mount_path)
+ head, volume_id = os.path.split(cephfs_mntpt)
head, group_id = os.path.split(head)
head, volume_prefix = os.path.split(head)
volume_prefix = "/" + volume_prefix
# Create a 100MB volume
volume_size = 100
- mount_path = self._volume_client_python(self.mount_b, dedent("""
+ cephfs_mntpt = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*{volume_size})
print(create_result['mount_path'])
# Authorize and configure credentials for the guest to mount the
# the volume.
self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity,
- mount_path, namespace_prefix)
- self.mounts[2].mount_wait(mount_path=mount_path)
+ cephfs_mntpt, namespace_prefix)
+ self.mounts[2].mount_wait(cephfs_mntpt=cephfs_mntpt)
# The kernel client doesn't have the quota-based df behaviour,
# or quotas at all, so only exercise the client behaviour when
guest_entity = "guest"
group_id = "grpid"
- mount_paths = []
+ cephfs_mntpts = []
volume_ids = []
# Create two volumes. Authorize 'guest' auth ID to mount the two
for i in range(2):
# Create volume.
volume_ids.append("volid_{0}".format(str(i)))
- mount_paths.append(
+ cephfs_mntpts.append(
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 10 * 1024 * 1024)
# Authorize 'guest' auth ID to mount the volume.
self._configure_guest_auth(volumeclient_mount, guest_mounts[i],
- guest_entity, mount_paths[i])
+ guest_entity, cephfs_mntpts[i])
# Mount the volume.
guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format(
id=guest_entity, suffix=str(i))
- guest_mounts[i].mount_wait(mount_path=mount_paths[i])
+ guest_mounts[i].mount_wait(cephfs_mntpt=cephfs_mntpts[i])
guest_mounts[i].write_n_mb("data.bin", 1)
volume_id = u"volid"
# Create
- mount_path = self._volume_client_python(self.mount_b, dedent("""
+ cephfs_mntpt = self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", u"{volume_id}")
create_result = vc.create_volume(vp, 10)
print(create_result['mount_path'])
)))
# Strip leading "/"
- mount_path = mount_path[1:]
+ cephfs_mntpt = cephfs_mntpt[1:]
# A file with non-ascii characters
- self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")])
+ self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, u"b\u00F6b")])
# A file with no permissions to do anything
- self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")])
- self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")])
+ self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, "noperms")])
+ self.mount_a.run_shell(["chmod", "0000", os.path.join(cephfs_mntpt, "noperms")])
self._volume_client_python(self.mount_b, dedent("""
vp = VolumePath("{group_id}", u"{volume_id}")
volume_id = "volid"
# Create a volume.
- mount_path = self._volume_client_python(volumeclient_mount, dedent("""
+ cephfs_mntpt = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
print(create_result['mount_path'])
# Authorize and configure credentials for the guest to mount the
# the volume with read-write access.
- self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
- mount_path, readonly=False)
+ self._configure_guest_auth(volumeclient_mount, guest_mount,
+ guest_entity, cephfs_mntpt, readonly=False)
# Mount the volume, and write to it.
- guest_mount.mount_wait(mount_path=mount_path)
+ guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt)
guest_mount.write_n_mb("data.bin", 1)
# Change the guest auth ID's authorization to read-only mount access.
guest_entity=guest_entity
)))
self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity,
- mount_path, readonly=True)
+ cephfs_mntpt, readonly=True)
# The effect of the change in access level to read-only is not
# immediate. The guest sees the change only after a remount of
# the volume.
guest_mount.umount_wait()
- guest_mount.mount_wait(mount_path=mount_path)
+ guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt)
# Read existing content of the volume.
self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"])
# Create a volume
group_id = "grpid"
volume_id = "volid"
- mount_path = self._volume_client_python(vc_mount, dedent("""
+ cephfs_mntpt = self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10)
print(create_result['mount_path'])
guest_mount.umount_wait()
# Set auth caps for the auth ID using the volumeclient
- self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path)
+ self._configure_guest_auth(vc_mount, guest_mount, guest_id, cephfs_mntpt)
# Mount the volume in the guest using the auth ID to assert that the
# auth caps are valid
- guest_mount.mount_wait(mount_path=mount_path)
+ guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt)
def test_volume_without_namespace_isolation(self):
"""
continue
kernel_mount = KernelMount(
- ctx,
- test_dir,
- id_,
- remote,
- ctx.teuthology_config.get('brxnet', None),
- )
+ ctx=ctx,
+ test_dir=test_dir,
+ client_id=id_,
+ client_remote=remote,
+ brxnet=ctx.teuthology_config.get('brxnet', None))
mounts[id_] = kernel_mount
from teuthology.orchestra.remote import Remote
from teuthology.config import config as teuth_config
from teuthology.contextutil import safe_while
+from teuthology.contextutil import MaxWhileTries
import logging
def init_log():
from tasks.cephfs.filesystem import Filesystem, MDSCluster, CephCluster
from tasks.cephfs.mount import CephFSMount
from tasks.mgr.mgr_test_case import MgrCluster
- from teuthology.contextutil import MaxWhileTries
from teuthology.task import interactive
except ImportError:
sys.stderr.write("***\nError importing packages, have you activated your teuthology virtualenv "
else:
raise
+
class LocalKernelMount(KernelMount):
- def __init__(self, ctx, test_dir, client_id, brxnet):
- super(LocalKernelMount, self).__init__(ctx, test_dir, client_id, LocalRemote(), brxnet)
+ def __init__(self, ctx, test_dir, client_id=None,
+ client_keyring_path=None, client_remote=None,
+ hostfs_mntpt=None, cephfs_name=None, cephfs_mntpt=None,
+ brxnet=None):
+ super(LocalKernelMount, self).__init__(ctx=ctx, test_dir=test_dir,
+ client_id=client_id, client_keyring_path=client_keyring_path,
+ client_remote=LocalRemote(), hostfs_mntpt=hostfs_mntpt,
+ cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet)
@property
def config_path(self):
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
return path
- def mount(self, mount_path=None, mount_fs_name=None, mount_options=[], **kwargs):
- self.setupfs(name=mount_fs_name)
+ def mount(self, mntopts=[], createfs=True, **kwargs):
+ self.update_attrs(**kwargs)
+ self.assert_and_log_minimum_mount_details()
+
if opt_use_ns:
self.using_namespace = True
self.setup_netns()
else:
self.using_namespace = False
- log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
- id=self.client_id, remote=self.client_remote, mnt=self.mountpoint))
-
- self.client_remote.run(
- args=[
- 'mkdir',
- '--',
- self.mountpoint,
- ],
- timeout=(5*60),
- )
-
- if mount_path is None:
- mount_path = "/"
-
- opts = 'name={id},norequire_active_mds,conf={conf}'.format(id=self.client_id,
- conf=self.config_path)
-
- if mount_fs_name is not None:
- opts += ",mds_namespace={0}".format(mount_fs_name)
-
- for mount_opt in mount_options:
- opts += ",{0}".format(mount_opt)
+ if not self.cephfs_mntpt:
+ self.cephfs_mntpt = "/"
+ # TODO: don't call setupfs() from within mount()
+ if createfs:
+ self.setupfs(name=self.cephfs_name)
+
+ opts = 'name=' + self.client_id
+ if self.client_keyring_path:
+ opts += ",secret=" + self.get_key_from_keyfile()
+ opts += ',norequire_active_mds,conf=' + self.config_path
+ if self.cephfs_name is not None:
+ opts += ",mds_namespace={0}".format(self.cephfs_name)
+ if mntopts:
+ opts += ',' + ','.join(mntopts)
+
+ stderr = StringIO()
+ try:
+ self.client_remote.run(args=['mkdir', '--', self.hostfs_mntpt],
+ timeout=(5*60), stderr=stderr)
+ except CommandFailedError:
+ if 'file exists' not in stderr.getvalue().lower():
+ raise
- mount_cmd_args = ['sudo']
+ cmdargs = ['sudo']
if self.using_namespace:
- mount_cmd_args += ['nsenter',
- '--net=/var/run/netns/{0}'.format(self.netns_name)]
- mount_cmd_args += ['./bin/mount.ceph',
- ':{mount_path}'.format(mount_path=mount_path),
- self.mountpoint, '-v', '-o', opts]
- self.client_remote.run(args=mount_cmd_args, timeout=(30*60),
- omit_sudo=False)
+ cmdargs += ['nsenter',
+ '--net=/var/run/netns/{0}'.format(self.netns_name)]
+ cmdargs += ['./bin/mount.ceph', ':' + self.cephfs_mntpt,
+ self.hostfs_mntpt, '-v', '-o', opts]
+ self.client_remote.run(args=cmdargs, timeout=(30*60), omit_sudo=False)
- self.client_remote.run(
- args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(5*60))
+ self.client_remote.run(args=['sudo', 'chmod', '1777',
+ self.hostfs_mntpt], timeout=(5*60))
self.mounted = True
wait=False, stdout=StringIO())
class LocalFuseMount(FuseMount):
- def __init__(self, ctx, test_dir, client_id, brxnet):
- super(LocalFuseMount, self).__init__(ctx, None, test_dir, client_id, LocalRemote(), brxnet)
+ def __init__(self, ctx, test_dir, client_id, client_keyring_path=None,
+ client_remote=None, hostfs_mntpt=None, cephfs_name=None,
+ cephfs_mntpt=None, brxnet=None):
+ super(LocalFuseMount, self).__init__(ctx=ctx, client_config=None,
+ test_dir=test_dir, client_id=client_id,
+ client_keyring_path=client_keyring_path,
+ client_remote=LocalRemote(), hostfs_mntpt=hostfs_mntpt,
+ cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet)
@property
def config_path(self):
path = "{0}/client.{1}.*.asok".format(d, self.client_id)
return path
- def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]):
- if mountpoint is not None:
- self.mountpoint = mountpoint
- self.setupfs(name=mount_fs_name)
+ def mount(self, mntopts=[], createfs=True, **kwargs):
+ self.update_attrs(**kwargs)
+ self.assert_and_log_minimum_mount_details()
+
if opt_use_ns:
self.using_namespace = True
self.setup_netns()
else:
self.using_namespace = False
- self.client_remote.run(args=['mkdir', '-p', self.mountpoint])
+ # TODO: don't call setupfs() from within mount()
+ if createfs:
+ self.setupfs(name=self.cephfs_name)
+
+ stderr = StringIO()
+ try:
+ self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt],
+ stderr=stderr)
+ except CommandFailedError:
+ if 'file exists' not in stderr.getvalue().lower():
+ raise
def list_connections():
self.client_remote.run(
pre_mount_conns = list_connections()
log.info("Pre-mount connections: {0}".format(pre_mount_conns))
- prefix = []
+ cmdargs = []
if self.using_namespace:
- prefix += ['sudo', 'nsenter',
+ cmdargs = ['sudo', 'nsenter',
'--net=/var/run/netns/{0}'.format(self.netns_name),
'--setuid', str(os.getuid())]
- prefix += [os.path.join(BIN_PREFIX, "ceph-fuse")]
+ cmdargs += [os.path.join(BIN_PREFIX, 'ceph-fuse'), self.hostfs_mntpt,
+ '-f']
+ if self.client_id is not None:
+ cmdargs += ["--id", self.client_id]
+ if self.client_keyring_path and self.client_id is not None:
+ cmdargs.extend(['-k', self.client_keyring_path])
+ if self.cephfs_name:
+ cmdargs += ["--client_mds_namespace=" + self.cephfs_name]
+ if self.cephfs_mntpt:
+ cmdargs += ["--client_fs=" + self.cephfs_mntpt]
if os.getuid() != 0:
- prefix += ["--client_die_on_failed_dentry_invalidate=false"]
- if mount_path is not None:
- prefix += ["--client_mountpoint={0}".format(mount_path)]
- if mount_fs_name is not None:
- prefix += ["--client_fs={0}".format(mount_fs_name)]
- prefix += mount_options;
- fuse_cmd_args = prefix + ["-f", "--name",
- "client.{0}".format(self.client_id),
- self.mountpoint]
-
- self.fuse_daemon = self.client_remote.run(args=fuse_cmd_args,
- wait=False, omit_sudo=False)
+ cmdargs += ["--client_die_on_failed_dentry_invalidate=false"]
+ if mntopts:
+ cmdargs += mntopts
+
+ self.fuse_daemon = self.client_remote.run(args=cmdargs, wait=False,
+ omit_sudo=False)
self._set_fuse_daemon_pid()
log.info("Mounting client.{0} with pid "
"{1}".format(self.client_id, self.fuse_daemon.subproc.pid))
self.gather_mount_info()
self.mounted = True
+
def _set_fuse_daemon_pid(self):
# NOTE: When a command <args> is launched with sudo, two processes are
# launched, one with sudo in <args> and other without. Make sure we
open("./keyring", "at").write(p.stdout.getvalue())
if use_kernel_client:
- mount = LocalKernelMount(ctx, test_dir, client_id, opt_brxnet)
+ mount = LocalKernelMount(ctx=ctx, test_dir=test_dir,
+ client_id=client_id, brxnet=opt_brxnet)
else:
- mount = LocalFuseMount(ctx, test_dir, client_id, opt_brxnet)
+ mount = LocalFuseMount(ctx=ctx, test_dir=test_dir,
+ client_id=client_id, brxnet=opt_brxnet)
mounts.append(mount)
- if os.path.exists(mount.mountpoint):
+ if os.path.exists(mount.hostfs_mntpt):
if mount.is_mounted():
- log.warning("unmounting {0}".format(mount.mountpoint))
+ log.warning("unmounting {0}".format(mount.hostfs_mntpt))
mount.umount_wait()
else:
- os.rmdir(mount.mountpoint)
+ os.rmdir(mount.hostfs_mntpt)
from tasks.cephfs_test_runner import DecoratingLoader