From 07e493ffb581cac1ae85deac0f8b502f35a75e39 Mon Sep 17 00:00:00 2001 From: Rishabh Dave Date: Mon, 27 Jan 2020 11:37:44 +0530 Subject: [PATCH] qa/cephfs: allow reusing mount objects and add remount method This commit introduces following two set of changes - First, make client keyring path, mountpoint on host FS and CephFS and CephFS's name attributes of the object representing the mount and update all the mount object creation calls accordingly. Also, rewrite all the mount object creation to use keyword arguments instead of positional arguments to avoid mistakes, especially since a new argument was added in this commit. Second, add remount method to mount.py so that it's possible to unmount safely, modify the attributes of the object representing the mount and mount again based on new state of the object *in a single call*. The method is placed in mount.py to avoid duplication. This change has two leads to two more changes: upgrading interface of mount() and mount_wait() and upgrading testsuites to adapt to these change. Signed-off-by: Rishabh Dave --- qa/tasks/ceph_fuse.py | 13 +- qa/tasks/cephfs/filesystem.py | 1 - qa/tasks/cephfs/fuse_mount.py | 103 ++++++----- qa/tasks/cephfs/kernel_mount.py | 64 ++++--- qa/tasks/cephfs/mount.py | 234 ++++++++++++++++++------ qa/tasks/cephfs/test_client_recovery.py | 4 +- qa/tasks/cephfs/test_failover.py | 4 +- qa/tasks/cephfs/test_quota.py | 2 +- qa/tasks/cephfs/test_recovery_pool.py | 2 +- qa/tasks/cephfs/test_sessionmap.py | 4 +- qa/tasks/cephfs/test_strays.py | 16 +- qa/tasks/cephfs/test_volume_client.py | 48 ++--- qa/tasks/kclient.py | 11 +- qa/tasks/vstart_runner.py | 158 +++++++++------- 14 files changed, 426 insertions(+), 238 deletions(-) diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py index f55441a17a6aa..0e178d3d449da 100644 --- a/qa/tasks/ceph_fuse.py +++ b/qa/tasks/ceph_fuse.py @@ -128,7 +128,9 @@ def task(ctx, config): continue if id_ not in all_mounts: - fuse_mount = FuseMount(ctx, client_config, testdir, auth_id, remote, brxnet) + fuse_mount = FuseMount(ctx=ctx, client_config=client_config, + test_dir=testdir, client_id=auth_id, + client_remote=remote, brxnet=brxnet) all_mounts[id_] = fuse_mount else: # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client @@ -152,9 +154,12 @@ def task(ctx, config): log.info('Mounting ceph-fuse clients...') for info in mounted_by_me.values(): config = info["config"] - mount_path = config.get("mount_path") - mountpoint = config.get("mountpoint") - info["mount"].mount(mountpoint=mountpoint, mount_path=mount_path) + mount_x = info['mount'] + if config.get("mount_path"): + mount_x.cephfs_mntpt = config.get("mount_path") + if config.get("mountpoint"): + mount_x.hostfs_mntpt = config.get("mountpoint") + mount_x.mount() for info in mounted_by_me.values(): info["mount"].wait_until_mounted() diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 3ec21125490ca..b32a73bdc5c34 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -611,7 +611,6 @@ class Filesystem(MDSCluster): self.getinfo(refresh = True) - def check_pool_application(self, pool_name): osd_map = self.mon_manager.get_osd_dump_json() for pool in osd_map['pools']: diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py index 6a8b3d30bbd9f..7f55060f7f4d1 100644 --- a/qa/tasks/cephfs/fuse_mount.py +++ b/qa/tasks/cephfs/fuse_mount.py @@ -1,8 +1,8 @@ -from io import StringIO import json import time import logging +from io import StringIO from textwrap import dedent from teuthology import misc @@ -15,9 +15,15 @@ from tasks.cephfs.mount import CephFSMount log = logging.getLogger(__name__) +# Refer mount.py for docstrings. class FuseMount(CephFSMount): - def __init__(self, ctx, client_config, test_dir, client_id, client_remote, brxnet): - super(FuseMount, self).__init__(ctx, test_dir, client_id, client_remote, brxnet) + def __init__(self, ctx, client_config, test_dir, client_id, + client_remote, client_keyring_path=None, cephfs_name=None, + cephfs_mntpt=None, hostfs_mntpt=None, brxnet=None): + super(FuseMount, self).__init__(ctx=ctx, test_dir=test_dir, + client_id=client_id, client_remote=client_remote, + client_keyring_path=client_keyring_path, hostfs_mntpt=hostfs_mntpt, + cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet) self.client_config = client_config if client_config else {} self.fuse_daemon = None @@ -26,14 +32,20 @@ class FuseMount(CephFSMount): self.inst = None self.addr = None - def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): - if mountpoint is not None: - self.mountpoint = mountpoint - self.setupfs(name=mount_fs_name) + def mount(self, mntopts=[], createfs=True, **kwargs): + self.update_attrs(**kwargs) + self.assert_and_log_minimum_mount_details() + self.setup_netns() + if createfs: + # TODO: don't call setupfs() from within mount(), since it's + # absurd. The proper order should be: create FS first and then + # call mount(). + self.setupfs(name=self.cephfs_name) + try: - return self._mount(mount_path, mount_fs_name, mount_options) + return self._mount(mntopts) except RuntimeError: # Catch exceptions by the mount() logic (i.e. not remote command # failures) and ensure the mount is not left half-up. @@ -43,18 +55,23 @@ class FuseMount(CephFSMount): self.umount_wait(force=True) raise - def _mount(self, mount_path, mount_fs_name, mount_options): - log.info("Client client.%s config is %s" % (self.client_id, self.client_config)) + def _mount(self, mntopts): + log.info("Client client.%s config is %s" % (self.client_id, + self.client_config)) daemon_signal = 'kill' - if self.client_config.get('coverage') or self.client_config.get('valgrind') is not None: + if self.client_config.get('coverage') or \ + self.client_config.get('valgrind') is not None: daemon_signal = 'term' - log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( - id=self.client_id, remote=self.client_remote, mnt=self.mountpoint)) - - self.client_remote.run(args=['mkdir', '-p', self.mountpoint], - timeout=(15*60), cwd=self.test_dir) + stderr = StringIO() + try: + self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt], + timeout=(15*60), cwd=self.test_dir, + stderr=StringIO()) + except CommandFailedError: + if 'file exists' not in stderr.getvalue().lower(): + raise run_cmd = [ 'sudo', @@ -66,20 +83,17 @@ class FuseMount(CephFSMount): ] fuse_cmd = ['ceph-fuse', "-f"] - - if mount_path is not None: - fuse_cmd += ["--client_mountpoint={0}".format(mount_path)] - - if mount_fs_name is not None: - fuse_cmd += ["--client_fs={0}".format(mount_fs_name)] - - fuse_cmd += mount_options - - fuse_cmd += [ - '--name', 'client.{id}'.format(id=self.client_id), - # TODO ceph-fuse doesn't understand dash dash '--', - self.mountpoint, - ] + if self.client_id is not None: + fuse_cmd += ['--id', self.client_id] + if self.client_keyring_path and self.client_id is not None: + fuse_cmd += ['-k', self.client_keyring_path] + if self.cephfs_mntpt is not None: + fuse_cmd += ["--client_mountpoint=" + self.cephfs_mntpt] + if self.cephfs_name is not None: + fuse_cmd += ["--client_fs=" + self.cephfs_name] + if mntopts: + fuse_cmd += mntopts + fuse_cmd.append(self.hostfs_mntpt) cwd = self.test_dir if self.client_config.get('valgrind') is not None: @@ -194,7 +208,7 @@ class FuseMount(CephFSMount): '--file-system', '--printf=%T\n', '--', - self.mountpoint, + self.hostfs_mntpt, ], cwd=self.test_dir, stdout=StringIO(), @@ -209,16 +223,16 @@ class FuseMount(CephFSMount): if ("endpoint is not connected" in error or "Software caused connection abort" in error): # This happens is fuse is killed without unmount - log.warning("Found stale moutn point at {0}".format(self.mountpoint)) + log.warning("Found stale mount point at {0}".format(self.hostfs_mntpt)) return True else: # This happens if the mount directory doesn't exist - log.info('mount point does not exist: %s', self.mountpoint) + log.info('mount point does not exist: %s', self.hostfs_mntpt) return False fstype = proc.stdout.getvalue().rstrip('\n') if fstype == 'fuseblk': - log.info('ceph-fuse is mounted on %s', self.mountpoint) + log.info('ceph-fuse is mounted on %s', self.hostfs_mntpt) return True else: log.debug('ceph-fuse not mounted, got fs type {fstype!r}'.format( @@ -246,7 +260,7 @@ class FuseMount(CephFSMount): try: stderr = StringIO() self.client_remote.run(args=['sudo', 'chmod', '1777', - self.mountpoint], + self.hostfs_mntpt], timeout=(15*60), cwd=self.test_dir, stderr=stderr, omit_sudo=False) break @@ -260,7 +274,7 @@ class FuseMount(CephFSMount): raise def _mountpoint_exists(self): - return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0 + return self.client_remote.run(args=["ls", "-d", self.hostfs_mntpt], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0 def umount(self, cleanup=True): """ @@ -276,7 +290,7 @@ class FuseMount(CephFSMount): log.info('Running fusermount -u on {name}...'.format(name=self.client_remote.name)) stderr = StringIO() self.client_remote.run(args=['sudo', 'fusermount', '-u', - self.mountpoint], + self.hostfs_mntpt], cwd=self.test_dir, stderr=stderr, timeout=(30*60), omit_sudo=False) except run.CommandFailedError: @@ -308,10 +322,10 @@ class FuseMount(CephFSMount): # make sure its unmounted try: self.client_remote.run(args=['sudo', 'umount', '-l', '-f', - self.mountpoint], + self.hostfs_mntpt], stderr=stderr, timeout=(60*15), omit_sudo=False) - except CommandFailedError: - if self.is_mounted(): + except CommandFailedError: + if self.is_mounted(): raise self.mounted = False @@ -327,9 +341,10 @@ class FuseMount(CephFSMount): :param force: Complete cleanly even if the MDS is offline """ if not (self.is_mounted() and self.fuse_daemon): - log.debug('ceph-fuse client.{id} is not mounted at {remote} {mnt}'.format(id=self.client_id, - remote=self.client_remote, - mnt=self.mountpoint)) + log.debug('ceph-fuse client.{id} is not mounted at {remote} ' + '{mnt}'.format(id=self.client_id, + remote=self.client_remote, + mnt=self.hostfs_mntpt)) self.cleanup() return @@ -386,7 +401,7 @@ class FuseMount(CephFSMount): args=[ 'rm', '-rf', - self.mountpoint, + self.hostfs_mntpt, ], cwd=self.test_dir, timeout=(60*5) diff --git a/qa/tasks/cephfs/kernel_mount.py b/qa/tasks/cephfs/kernel_mount.py index fa36031fa3807..33ca5654a1de2 100644 --- a/qa/tasks/cephfs/kernel_mount.py +++ b/qa/tasks/cephfs/kernel_mount.py @@ -1,9 +1,13 @@ import json import logging + +from io import StringIO from textwrap import dedent + from teuthology.orchestra.run import CommandFailedError from teuthology.orchestra import run from teuthology.contextutil import MaxWhileTries + from tasks.cephfs.mount import CephFSMount log = logging.getLogger(__name__) @@ -13,32 +17,46 @@ UMOUNT_TIMEOUT = 300 class KernelMount(CephFSMount): - def __init__(self, ctx, test_dir, client_id, client_remote, brxnet): - super(KernelMount, self).__init__(ctx, test_dir, client_id, client_remote, brxnet) + def __init__(self, ctx, test_dir, client_id, client_remote, + client_keyring_path=None, hostfs_mntpt=None, + cephfs_name=None, cephfs_mntpt=None, brxnet=None): + super(KernelMount, self).__init__(ctx=ctx, test_dir=test_dir, + client_id=client_id, client_remote=client_remote, + client_keyring_path=client_keyring_path, hostfs_mntpt=hostfs_mntpt, + cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet) + + def mount(self, mntopts=[], createfs=True, **kwargs): + self.update_attrs(**kwargs) + self.assert_and_log_minimum_mount_details() - def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): - if mountpoint is not None: - self.mountpoint = mountpoint - self.setupfs(name=mount_fs_name) self.setup_netns() - log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( - id=self.client_id, remote=self.client_remote, mnt=self.mountpoint)) + # TODO: don't call setupfs() from within mount(), since it's + # absurd. The proper order should be: create FS first and then + # call mount(). + if createfs: + self.setupfs(name=self.cephfs_name) + if not self.cephfs_mntpt: + self.cephfs_mntpt = '/' - self.client_remote.run(args=['mkdir', '-p', self.mountpoint], - timeout=(5*60)) - - if mount_path is None: - mount_path = "/" + stderr = StringIO() + try: + self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt], + timeout=(5*60), stderr=stderr) + except CommandFailedError: + if 'file exists' not in stderr.getvalue().lower(): + raise - opts = 'name={id},norequire_active_mds,conf={conf}'.format(id=self.client_id, - conf=self.config_path) + opts = 'name=' + self.client_id + if self.client_keyring_path and self.client_id is not None: + opts += 'secret=' + self.get_key_from_keyfile() + opts += ',norequire_active_mds,conf=' + self.config_path - if mount_fs_name is not None: - opts += ",mds_namespace={0}".format(mount_fs_name) + if self.cephfs_name is not None: + opts += ",mds_namespace={0}".format(self.cephfs_name) - for mount_opt in mount_options : - opts += ",{0}".format(mount_opt) + if mntopts: + opts += ',' + ','.join(mntopts) self.client_remote.run( args=[ @@ -51,8 +69,8 @@ class KernelMount(CephFSMount): '/bin/mount', '-t', 'ceph', - ':{mount_path}'.format(mount_path=mount_path), - self.mountpoint, + ':' + self.cephfs_mntpt, + self.hostfs_mntpt, '-v', '-o', opts @@ -61,7 +79,7 @@ class KernelMount(CephFSMount): ) self.client_remote.run( - args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(5*60)) + args=['sudo', 'chmod', '1777', self.hostfs_mntpt], timeout=(5*60)) self.mounted = True @@ -73,7 +91,7 @@ class KernelMount(CephFSMount): log.debug('Unmounting client client.{id}...'.format(id=self.client_id)) try: - cmd=['sudo', 'umount', self.mountpoint] + cmd=['sudo', 'umount', self.hostfs_mntpt] if force: cmd.append('-f') self.client_remote.run(args=cmd, timeout=(15*60), omit_sudo=False) diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py index a62209e661fcd..54daf72ce15dc 100644 --- a/qa/tasks/cephfs/mount.py +++ b/qa/tasks/cephfs/mount.py @@ -1,35 +1,61 @@ -from contextlib import contextmanager import json import logging import datetime +import os +import re import time + from io import StringIO +from contextlib import contextmanager from textwrap import dedent -import os -import re from IPy import IP + +from teuthology.misc import get_file from teuthology.orchestra import run from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw + from tasks.cephfs.filesystem import Filesystem log = logging.getLogger(__name__) class CephFSMount(object): - def __init__(self, ctx, test_dir, client_id, client_remote, brxnet): + def __init__(self, ctx, test_dir, client_id, client_remote, + client_keyring_path=None, hostfs_mntpt=None, + cephfs_name=None, cephfs_mntpt=None, brxnet=None): """ :param test_dir: Global teuthology test dir :param client_id: Client ID, the 'foo' in client.foo - :param client_remote: Remote instance for the host where client will run + :param client_keyring_path: path to keyring for given client_id + :param client_remote: Remote instance for the host where client will + run + :param hostfs_mntpt: Path to directory on the FS on which Ceph FS will + be mounted + :param cephfs_name: Name of Ceph FS to be mounted + :param cephfs_mntpt: Path to directory inside Ceph FS that will be + mounted as root """ - + self.mounted = False self.ctx = ctx self.test_dir = test_dir + + self._verify_attrs(client_id=client_id, + client_keyring_path=client_keyring_path, + hostfs_mntpt=hostfs_mntpt, cephfs_name=cephfs_name, + cephfs_mntpt=cephfs_mntpt) + self.client_id = client_id + self.client_keyring_path = client_keyring_path self.client_remote = client_remote - self.mountpoint_dir_name = 'mnt.{id}'.format(id=self.client_id) - self._mountpoint = None + if hostfs_mntpt: + self.hostfs_mntpt = hostfs_mntpt + self.hostfs_mntpt_dirname = os.path.basename(self.hostfs_mntpt) + else: + self.hostfs_mntpt = os.path.join(self.test_dir, f'mnt.{self.client_id}') + self.cephfs_name = cephfs_name + self.cephfs_mntpt = cephfs_mntpt + self.fs = None - self.mounted = False + self._netns_name = None self.nsid = -1 if brxnet is None: @@ -74,16 +100,16 @@ class CephFSMount(object): @property def mountpoint(self): - if self._mountpoint == None: - self._mountpoint= os.path.join( - self.test_dir, '{dir_name}'.format(dir_name=self.mountpoint_dir_name)) - return self._mountpoint + if self.hostfs_mntpt == None: + self.hostfs_mntpt = os.path.join(self.test_dir, + self.hostfs_mntpt_dirname) + return self.hostfs_mntpt @mountpoint.setter def mountpoint(self, path): if not isinstance(path, str): raise RuntimeError('path should be of str type.') - self._mountpoint = path + self._mountpoint = self.hostfs_mntpt = path @property def netns_name(self): @@ -95,6 +121,32 @@ class CephFSMount(object): def netns_name(self, name): self._netns_name = name + def assert_and_log_minimum_mount_details(self): + """ + Make sure we have minimum details required for mounting. Ideally, this + method should be called at the beginning of the mount method. + """ + if not self.client_id or not self.client_remote or \ + not self.hostfs_mntpt: + errmsg = ('Mounting CephFS requires that at least following ' + 'details to be provided -\n' + '1. the client ID,\n2. the mountpoint and\n' + '3. the remote machine where CephFS will be mounted.\n') + raise RuntimeError(errmsg) + + log.info('Mounting Ceph FS. Following are details of mount; remember ' + '"None" represents Python type None -') + log.info(f'self.client_remote.hostname = {self.client_remote.hostname}') + log.info(f'self.client.name = client.{self.client_id}') + log.info(f'self.hostfs_mntpt = {self.hostfs_mntpt}') + log.info(f'self.cephfs_name = {self.cephfs_name}') + log.info(f'self.cephfs_mntpt = {self.cephfs_mntpt}') + log.info(f'self.client_keyring_path = {self.client_keyring_path}') + if self.client_keyring_path: + log.info('keyring content -\n' + + get_file(self.client_remote, self.client_keyring_path, + sudo=True).decode()) + def is_mounted(self): return self.mounted @@ -341,18 +393,24 @@ class CephFSMount(object): args = ['sudo', 'ip', 'link', 'set', 'brx.{0}'.format(self.nsid), 'up'] self.client_remote.run(args=args, timeout=(5*60), omit_sudo=False) - def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): + def mount(self, mntopts=[], createfs=True, **kwargs): + """ + kwargs expects its members to be same as the arguments accepted by + self.update_attrs(). + """ raise NotImplementedError() - def mount_wait(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): - self.mount(mount_path=mount_path, mount_fs_name=mount_fs_name, mountpoint=mountpoint, - mount_options=mount_options) + def mount_wait(self, **kwargs): + """ + Accepts arguments same as self.mount(). + """ + self.mount(**kwargs) self.wait_until_mounted() def umount(self): raise NotImplementedError() - def umount_wait(self, force=False, require_clean=False): + def umount_wait(self, force=False, require_clean=False, timeout=None): """ :param force: Expect that the mount will not shutdown cleanly: kill @@ -360,10 +418,77 @@ class CephFSMount(object): :param require_clean: Wait for the Ceph client associated with the mount (e.g. ceph-fuse) to terminate, and raise if it doesn't do so cleanly. + :param timeout: amount of time to be waited for umount command to finish :return: """ raise NotImplementedError() + def _verify_attrs(self, **kwargs): + """ + Verify that client_id, client_keyring_path, client_remote, hostfs_mntpt, + cephfs_name, cephfs_mntpt are either type str or None. + """ + for k, v in kwargs.items(): + if v is not None and not isinstance(v, str): + raise RuntimeError('value of attributes should be either str ' + f'or None. {k} - {v}') + + def update_attrs(self, client_id=None, client_keyring_path=None, + client_remote=None, hostfs_mntpt=None, cephfs_name=None, + cephfs_mntpt=None): + if not (client_id or client_keyring_path or client_remote or + cephfs_name or cephfs_mntpt or hostfs_mntpt): + return + + self._verify_attrs(client_id=client_id, + client_keyring_path=client_keyring_path, + hostfs_mntpt=hostfs_mntpt, cephfs_name=cephfs_name, + cephfs_mntpt=cephfs_mntpt) + + if client_id: + self.client_id = client_id + if client_keyring_path: + self.client_keyring_path = client_keyring_path + if client_remote: + self.client_remote = client_remote + if hostfs_mntpt: + self.hostfs_mntpt = hostfs_mntpt + if cephfs_name: + self.cephfs_name = cephfs_name + if cephfs_mntpt: + self.cephfs_mntpt = cephfs_mntpt + + def remount(self, **kwargs): + """ + Update mount object's attributes and attempt remount with these + new values for these attrbiutes. + + 1. Run umount_wait(). + 2. Run update_attrs(). + 3. Run mount(). + + Accepts arguments of self.mount() and self.update_attrs() with 2 exceptions - + 1. Accepts wait too which can be True or False. + 2. The default value of createfs is False. + """ + self.umount_wait() + assert not self.mounted + + mntopts = kwargs.pop('mntopts', []) + createfs = kwargs.pop('createfs', False) + wait = kwargs.pop('wait', True) + + self.update_attrs(**kwargs) + + retval = self.mount(mntopts=mntopts, createfs=createfs) + # avoid this scenario (again): mount command might've failed and + # check_status might have silenced the exception, yet we attempt to + # wait which might lead to an error. + if retval is None and wait: + self.wait_until_mounted() + + return retval + def kill(self): """ Suspend the netns veth interface to make the client disconnected @@ -387,21 +512,11 @@ class CephFSMount(object): """ stderr = StringIO() try: - self.client_remote.run( - args=[ - 'rmdir', - '--', - self.mountpoint, - ], - cwd=self.test_dir, - stderr=stderr, - timeout=(60*5), - check_status=False, - ) + self.client_remote.run(args=['rmdir', '--', self.mountpoint], + cwd=self.test_dir, stderr=stderr, + timeout=(60*5), check_status=False) except CommandFailedError: - if "No such file or directory" in stderr.getvalue(): - pass - else: + if "no such file or directory" not in stderr.getvalue().lower(): raise self.cleanup_netns() @@ -412,6 +527,15 @@ class CephFSMount(object): def get_keyring_path(self): return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id) + def get_key_from_keyfile(self): + # XXX: don't call run_shell(), since CephFS might be unmounted. + keyring = self.client_remote.run( + args=['sudo', 'cat', self.client_keyring_path], stdout=StringIO(), + omit_sudo=False).stdout.getvalue() + for line in keyring.split('\n'): + if line.find('key') != -1: + return line[line.find('=') + 1 : ].strip() + @property def config_path(self): """ @@ -450,9 +574,9 @@ class CephFSMount(object): if os.path.isabs(dirname): path = os.path.join(dirname, filename) else: - path = os.path.join(self.mountpoint, dirname, filename) + path = os.path.join(self.hostfs_mntpt, dirname, filename) else: - path = os.path.join(self.mountpoint, filename) + path = os.path.join(self.hostfs_mntpt, filename) else: path = filename @@ -469,7 +593,7 @@ class CephFSMount(object): for suffix in self.test_files: log.info("Creating file {0}".format(suffix)) self.client_remote.run(args=[ - 'sudo', 'touch', os.path.join(self.mountpoint, suffix) + 'sudo', 'touch', os.path.join(self.hostfs_mntpt, suffix) ]) def test_create_file(self, filename='testfile', dirname=None, user=None, @@ -483,7 +607,7 @@ class CephFSMount(object): for suffix in self.test_files: log.info("Checking file {0}".format(suffix)) r = self.client_remote.run(args=[ - 'sudo', 'ls', os.path.join(self.mountpoint, suffix) + 'sudo', 'ls', os.path.join(self.hostfs_mntpt, suffix) ], check_status=False) if r.exitstatus != 0: raise RuntimeError("Expected file {0} not found".format(suffix)) @@ -494,11 +618,11 @@ class CephFSMount(object): filename = "{0} {1}".format(datetime.datetime.now(), self.client_id) log.debug("Creating test file {0}".format(filename)) self.client_remote.run(args=[ - 'sudo', 'touch', os.path.join(self.mountpoint, filename) + 'sudo', 'touch', os.path.join(self.hostfs_mntpt, filename) ]) log.debug("Deleting test file {0}".format(filename)) self.client_remote.run(args=[ - 'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename) + 'sudo', 'rm', '-f', os.path.join(self.hostfs_mntpt, filename) ]) def _run_python(self, pyscript, py_version='python3'): @@ -602,7 +726,7 @@ class CephFSMount(object): """ assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) p = self._run_python(dedent( """ @@ -621,7 +745,7 @@ class CephFSMount(object): """ assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) if write: pyscript = dedent(""" @@ -655,7 +779,7 @@ class CephFSMount(object): def wait_for_dir_empty(self, dirname, timeout=30): i = 0 - dirpath = os.path.join(self.mountpoint, dirname) + dirpath = os.path.join(self.hostfs_mntpt, dirname) while i < timeout: nr_entries = int(self.getfattr(dirpath, "ceph.dir.entries")) if nr_entries == 0: @@ -673,7 +797,7 @@ class CephFSMount(object): i = 0 while i < timeout: r = self.client_remote.run(args=[ - 'sudo', 'ls', os.path.join(self.mountpoint, basename) + 'sudo', 'ls', os.path.join(self.hostfs_mntpt, basename) ], check_status=False) if r.exitstatus == 0: log.debug("File {0} became visible from {1} after {2}s".format( @@ -692,7 +816,7 @@ class CephFSMount(object): """ assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) script_builder = """ import time @@ -720,7 +844,7 @@ class CephFSMount(object): def lock_and_release(self, basename="background_file"): assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) script = """ import time @@ -740,7 +864,7 @@ class CephFSMount(object): def check_filelock(self, basename="background_file", do_flock=True): assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) script_builder = """ import fcntl @@ -782,7 +906,7 @@ class CephFSMount(object): """ assert(self.is_mounted()) - path = os.path.join(self.mountpoint, basename) + path = os.path.join(self.hostfs_mntpt, basename) pyscript = dedent(""" import os @@ -826,7 +950,7 @@ class CephFSMount(object): val = zlib.crc32(str(i).encode('utf-8')) & 7 f.write(chr(val)) """.format( - path=os.path.join(self.mountpoint, filename), + path=os.path.join(self.hostfs_mntpt, filename), size=size ))) @@ -846,7 +970,7 @@ class CephFSMount(object): if b != chr(val): raise RuntimeError("Bad data at offset {{0}}".format(i)) """.format( - path=os.path.join(self.mountpoint, filename), + path=os.path.join(self.hostfs_mntpt, filename), size=size ))) @@ -859,7 +983,7 @@ class CephFSMount(object): """ assert(self.is_mounted()) - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) pyscript = dedent(""" import sys @@ -889,7 +1013,7 @@ class CephFSMount(object): def create_n_files(self, fs_path, count, sync=False): assert(self.is_mounted()) - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) pyscript = dedent(""" import sys @@ -977,7 +1101,7 @@ class CephFSMount(object): Raises exception on absent file. """ - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) if follow_symlinks: stat_call = "os.stat('" + abs_path + "')" else: @@ -1015,7 +1139,7 @@ class CephFSMount(object): :param fs_path: :return: """ - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) pyscript = dedent(""" import sys import errno @@ -1030,7 +1154,7 @@ class CephFSMount(object): proc.wait() def path_to_ino(self, fs_path, follow_symlinks=True): - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) if follow_symlinks: pyscript = dedent(""" @@ -1052,7 +1176,7 @@ class CephFSMount(object): return int(proc.stdout.getvalue().strip()) def path_to_nlink(self, fs_path): - abs_path = os.path.join(self.mountpoint, fs_path) + abs_path = os.path.join(self.hostfs_mntpt, fs_path) pyscript = dedent(""" import os diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index bec07ec2739ef..0aeb36a07d465 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -630,10 +630,10 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.umount_wait() if isinstance(self.mount_a, FuseMount): - self.mount_a.mount(mount_options=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1']) + self.mount_a.mount(mntopts=['--client_reconnect_stale=1', '--fuse_disable_pagecache=1']) else: try: - self.mount_a.mount(mount_options=['recover_session=clean']) + self.mount_a.mount(mntopts=['recover_session=clean']) except CommandFailedError: self.mount_a.kill_cleanup() self.skipTest("Not implemented in current kernel") diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py index 1bb1bf2046cd8..dc637883f844c 100644 --- a/qa/tasks/cephfs/test_failover.py +++ b/qa/tasks/cephfs/test_failover.py @@ -688,14 +688,14 @@ class TestMultiFilesystems(CephFSTestCase): fs_a, fs_b = self._setup_two() # Mount a client on fs_a - self.mount_a.mount(mount_fs_name=fs_a.name) + self.mount_a.mount(cephfs_name=fs_a.name) self.mount_a.write_n_mb("pad.bin", 1) self.mount_a.write_n_mb("test.bin", 2) a_created_ino = self.mount_a.path_to_ino("test.bin") self.mount_a.create_files() # Mount a client on fs_b - self.mount_b.mount(mount_fs_name=fs_b.name) + self.mount_b.mount(cephfs_name=fs_b.name) self.mount_b.write_n_mb("test.bin", 1) b_created_ino = self.mount_b.path_to_ino("test.bin") self.mount_b.create_files() diff --git a/qa/tasks/cephfs/test_quota.py b/qa/tasks/cephfs/test_quota.py index cad5baca7a6bc..0386672bd8316 100644 --- a/qa/tasks/cephfs/test_quota.py +++ b/qa/tasks/cephfs/test_quota.py @@ -47,7 +47,7 @@ class TestQuota(CephFSTestCase): self.mount_a.setfattr("./subdir", "ceph.quota.max_bytes", "%s" % size_before) - self.mount_b.mount_wait(mount_path="/subdir") + self.mount_b.mount_wait(cephfs_mntpt="/subdir") self.assertDictEqual( self.mount_b.df(), diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py index 0b3dc56bff933..f050638bbc98a 100644 --- a/qa/tasks/cephfs/test_recovery_pool.py +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -187,7 +187,7 @@ class TestRecoveryPool(CephFSTestCase): # Mount a client self.mount_a.mount_wait() - self.mount_b.mount_wait(mount_fs_name=recovery_fs) + self.mount_b.mount_wait(cephfs_name=recovery_fs) # See that the files are present and correct errors = workload.validate() diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py index 13dc9e8cf520a..abe241eafed72 100644 --- a/qa/tasks/cephfs/test_sessionmap.py +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -177,7 +177,7 @@ class TestSessionMap(CephFSTestCase): # Configure a client that is limited to /foo/bar self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar") # Check he can mount that dir and do IO - self.mount_b.mount_wait(mount_path="/foo/bar") + self.mount_b.mount_wait(cephfs_mntpt="/foo/bar") self.mount_b.create_destroy() self.mount_b.umount_wait() @@ -186,7 +186,7 @@ class TestSessionMap(CephFSTestCase): # Try to mount the client, see that it fails with self.assert_cluster_log("client session with non-allowable root '/baz' denied"): with self.assertRaises(CommandFailedError): - self.mount_b.mount_wait(mount_path="/foo/bar") + self.mount_b.mount_wait(cephfs_mntpt="/foo/bar") def test_session_evict_blocklisted(self): """ diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 4dd70d3ee05d8..757d1d1492c9d 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -39,17 +39,17 @@ class TestStrays(CephFSTestCase): create_script = dedent(""" import os - mount_path = "{mount_path}" + mountpoint = "{mountpoint}" subdir = "delete_me" size = {size} file_count = {file_count} - os.mkdir(os.path.join(mount_path, subdir)) + os.mkdir(os.path.join(mountpoint, subdir)) for i in range(0, file_count): filename = "{{0}}_{{1}}.bin".format(i, size) - with open(os.path.join(mount_path, subdir, filename), 'w') as f: + with open(os.path.join(mountpoint, subdir, filename), 'w') as f: f.write(size * 'x') """.format( - mount_path=self.mount_a.mountpoint, + mountpoint=self.mount_a.mountpoint, size=1024, file_count=file_count )) @@ -145,18 +145,18 @@ class TestStrays(CephFSTestCase): create_script = dedent(""" import os - mount_path = "{mount_path}" + mountpoint = "{mountpoint}" subdir = "delete_me" size_unit = {size_unit} file_multiplier = {file_multiplier} - os.mkdir(os.path.join(mount_path, subdir)) + os.mkdir(os.path.join(mountpoint, subdir)) for i in range(0, file_multiplier): for size in range(0, {size_range}*size_unit, size_unit): filename = "{{0}}_{{1}}.bin".format(i, size // size_unit) - with open(os.path.join(mount_path, subdir, filename), 'w') as f: + with open(os.path.join(mountpoint, subdir, filename), 'w') as f: f.write(size * 'x') """.format( - mount_path=self.mount_a.mountpoint, + mountpoint=self.mount_a.mountpoint, size_unit=size_unit, file_multiplier=file_multiplier, size_range=self.throttle_workload_size_range diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index e86f1dfa6fa1b..7f93b6d757ca1 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -56,7 +56,7 @@ vc.disconnect() self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path()) def _configure_guest_auth(self, volumeclient_mount, guest_mount, - guest_entity, mount_path, + guest_entity, cephfs_mntpt, namespace_prefix=None, readonly=False, tenant_id=None): """ @@ -66,7 +66,7 @@ vc.disconnect() volumeclient. :param guest_mount: mount used by the guest client. :param guest_entity: auth ID used by the guest client. - :param mount_path: path of the volume. + :param cephfs_mntpt: path of the volume. :param namespace_prefix: name prefix of the RADOS namespace, which is used for the volume's layout. :param readonly: defaults to False. If set to 'True' only read-only @@ -74,7 +74,7 @@ vc.disconnect() :param tenant_id: (OpenStack) tenant ID of the guest client. """ - head, volume_id = os.path.split(mount_path) + head, volume_id = os.path.split(cephfs_mntpt) head, group_id = os.path.split(head) head, volume_prefix = os.path.split(head) volume_prefix = "/" + volume_prefix @@ -177,7 +177,7 @@ vc.disconnect() # Create a 100MB volume volume_size = 100 - mount_path = self._volume_client_python(self.mount_b, dedent(""" + cephfs_mntpt = self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*{volume_size}) print(create_result['mount_path']) @@ -193,8 +193,8 @@ vc.disconnect() # Authorize and configure credentials for the guest to mount the # the volume. self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity, - mount_path, namespace_prefix) - self.mounts[2].mount_wait(mount_path=mount_path) + cephfs_mntpt, namespace_prefix) + self.mounts[2].mount_wait(cephfs_mntpt=cephfs_mntpt) # The kernel client doesn't have the quota-based df behaviour, # or quotas at all, so only exercise the client behaviour when @@ -418,7 +418,7 @@ vc.disconnect() guest_entity = "guest" group_id = "grpid" - mount_paths = [] + cephfs_mntpts = [] volume_ids = [] # Create two volumes. Authorize 'guest' auth ID to mount the two @@ -426,7 +426,7 @@ vc.disconnect() for i in range(2): # Create volume. volume_ids.append("volid_{0}".format(str(i))) - mount_paths.append( + cephfs_mntpts.append( self._volume_client_python(volumeclient_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 10 * 1024 * 1024) @@ -438,12 +438,12 @@ vc.disconnect() # Authorize 'guest' auth ID to mount the volume. self._configure_guest_auth(volumeclient_mount, guest_mounts[i], - guest_entity, mount_paths[i]) + guest_entity, cephfs_mntpts[i]) # Mount the volume. guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format( id=guest_entity, suffix=str(i)) - guest_mounts[i].mount_wait(mount_path=mount_paths[i]) + guest_mounts[i].mount_wait(cephfs_mntpt=cephfs_mntpts[i]) guest_mounts[i].write_n_mb("data.bin", 1) @@ -510,7 +510,7 @@ vc.disconnect() volume_id = u"volid" # Create - mount_path = self._volume_client_python(self.mount_b, dedent(""" + cephfs_mntpt = self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", u"{volume_id}") create_result = vc.create_volume(vp, 10) print(create_result['mount_path']) @@ -520,14 +520,14 @@ vc.disconnect() ))) # Strip leading "/" - mount_path = mount_path[1:] + cephfs_mntpt = cephfs_mntpt[1:] # A file with non-ascii characters - self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")]) + self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, u"b\u00F6b")]) # A file with no permissions to do anything - self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")]) - self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")]) + self.mount_a.run_shell(["touch", os.path.join(cephfs_mntpt, "noperms")]) + self.mount_a.run_shell(["chmod", "0000", os.path.join(cephfs_mntpt, "noperms")]) self._volume_client_python(self.mount_b, dedent(""" vp = VolumePath("{group_id}", u"{volume_id}") @@ -560,7 +560,7 @@ vc.disconnect() volume_id = "volid" # Create a volume. - mount_path = self._volume_client_python(volumeclient_mount, dedent(""" + cephfs_mntpt = self._volume_client_python(volumeclient_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10) print(create_result['mount_path']) @@ -571,11 +571,11 @@ vc.disconnect() # Authorize and configure credentials for the guest to mount the # the volume with read-write access. - self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity, - mount_path, readonly=False) + self._configure_guest_auth(volumeclient_mount, guest_mount, + guest_entity, cephfs_mntpt, readonly=False) # Mount the volume, and write to it. - guest_mount.mount_wait(mount_path=mount_path) + guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) guest_mount.write_n_mb("data.bin", 1) # Change the guest auth ID's authorization to read-only mount access. @@ -588,13 +588,13 @@ vc.disconnect() guest_entity=guest_entity ))) self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity, - mount_path, readonly=True) + cephfs_mntpt, readonly=True) # The effect of the change in access level to read-only is not # immediate. The guest sees the change only after a remount of # the volume. guest_mount.umount_wait() - guest_mount.mount_wait(mount_path=mount_path) + guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) # Read existing content of the volume. self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) @@ -1029,7 +1029,7 @@ vc.disconnect() # Create a volume group_id = "grpid" volume_id = "volid" - mount_path = self._volume_client_python(vc_mount, dedent(""" + cephfs_mntpt = self._volume_client_python(vc_mount, dedent(""" vp = VolumePath("{group_id}", "{volume_id}") create_result = vc.create_volume(vp, 1024*1024*10) print(create_result['mount_path']) @@ -1047,11 +1047,11 @@ vc.disconnect() guest_mount.umount_wait() # Set auth caps for the auth ID using the volumeclient - self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path) + self._configure_guest_auth(vc_mount, guest_mount, guest_id, cephfs_mntpt) # Mount the volume in the guest using the auth ID to assert that the # auth caps are valid - guest_mount.mount_wait(mount_path=mount_path) + guest_mount.mount_wait(cephfs_mntpt=cephfs_mntpt) def test_volume_without_namespace_isolation(self): """ diff --git a/qa/tasks/kclient.py b/qa/tasks/kclient.py index 3917606955af3..330376bb997f8 100644 --- a/qa/tasks/kclient.py +++ b/qa/tasks/kclient.py @@ -89,12 +89,11 @@ def task(ctx, config): continue kernel_mount = KernelMount( - ctx, - test_dir, - id_, - remote, - ctx.teuthology_config.get('brxnet', None), - ) + ctx=ctx, + test_dir=test_dir, + client_id=id_, + client_remote=remote, + brxnet=ctx.teuthology_config.get('brxnet', None)) mounts[id_] = kernel_mount diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py index 0af5b28482148..3702658275121 100644 --- a/qa/tasks/vstart_runner.py +++ b/qa/tasks/vstart_runner.py @@ -52,6 +52,7 @@ from teuthology.orchestra.daemon import DaemonGroup from teuthology.orchestra.remote import Remote from teuthology.config import config as teuth_config from teuthology.contextutil import safe_while +from teuthology.contextutil import MaxWhileTries import logging def init_log(): @@ -132,7 +133,6 @@ try: from tasks.cephfs.filesystem import Filesystem, MDSCluster, CephCluster from tasks.cephfs.mount import CephFSMount from tasks.mgr.mgr_test_case import MgrCluster - from teuthology.contextutil import MaxWhileTries from teuthology.task import interactive except ImportError: sys.stderr.write("***\nError importing packages, have you activated your teuthology virtualenv " @@ -553,9 +553,16 @@ def safe_kill(pid): else: raise + class LocalKernelMount(KernelMount): - def __init__(self, ctx, test_dir, client_id, brxnet): - super(LocalKernelMount, self).__init__(ctx, test_dir, client_id, LocalRemote(), brxnet) + def __init__(self, ctx, test_dir, client_id=None, + client_keyring_path=None, client_remote=None, + hostfs_mntpt=None, cephfs_name=None, cephfs_mntpt=None, + brxnet=None): + super(LocalKernelMount, self).__init__(ctx=ctx, test_dir=test_dir, + client_id=client_id, client_keyring_path=client_keyring_path, + client_remote=LocalRemote(), hostfs_mntpt=hostfs_mntpt, + cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet) @property def config_path(self): @@ -605,50 +612,49 @@ class LocalKernelMount(KernelMount): path = "{0}/client.{1}.*.asok".format(d, self.client_id) return path - def mount(self, mount_path=None, mount_fs_name=None, mount_options=[], **kwargs): - self.setupfs(name=mount_fs_name) + def mount(self, mntopts=[], createfs=True, **kwargs): + self.update_attrs(**kwargs) + self.assert_and_log_minimum_mount_details() + if opt_use_ns: self.using_namespace = True self.setup_netns() else: self.using_namespace = False - log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( - id=self.client_id, remote=self.client_remote, mnt=self.mountpoint)) - - self.client_remote.run( - args=[ - 'mkdir', - '--', - self.mountpoint, - ], - timeout=(5*60), - ) - - if mount_path is None: - mount_path = "/" - - opts = 'name={id},norequire_active_mds,conf={conf}'.format(id=self.client_id, - conf=self.config_path) - - if mount_fs_name is not None: - opts += ",mds_namespace={0}".format(mount_fs_name) - - for mount_opt in mount_options: - opts += ",{0}".format(mount_opt) + if not self.cephfs_mntpt: + self.cephfs_mntpt = "/" + # TODO: don't call setupfs() from within mount() + if createfs: + self.setupfs(name=self.cephfs_name) + + opts = 'name=' + self.client_id + if self.client_keyring_path: + opts += ",secret=" + self.get_key_from_keyfile() + opts += ',norequire_active_mds,conf=' + self.config_path + if self.cephfs_name is not None: + opts += ",mds_namespace={0}".format(self.cephfs_name) + if mntopts: + opts += ',' + ','.join(mntopts) + + stderr = StringIO() + try: + self.client_remote.run(args=['mkdir', '--', self.hostfs_mntpt], + timeout=(5*60), stderr=stderr) + except CommandFailedError: + if 'file exists' not in stderr.getvalue().lower(): + raise - mount_cmd_args = ['sudo'] + cmdargs = ['sudo'] if self.using_namespace: - mount_cmd_args += ['nsenter', - '--net=/var/run/netns/{0}'.format(self.netns_name)] - mount_cmd_args += ['./bin/mount.ceph', - ':{mount_path}'.format(mount_path=mount_path), - self.mountpoint, '-v', '-o', opts] - self.client_remote.run(args=mount_cmd_args, timeout=(30*60), - omit_sudo=False) + cmdargs += ['nsenter', + '--net=/var/run/netns/{0}'.format(self.netns_name)] + cmdargs += ['./bin/mount.ceph', ':' + self.cephfs_mntpt, + self.hostfs_mntpt, '-v', '-o', opts] + self.client_remote.run(args=cmdargs, timeout=(30*60), omit_sudo=False) - self.client_remote.run( - args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(5*60)) + self.client_remote.run(args=['sudo', 'chmod', '1777', + self.hostfs_mntpt], timeout=(5*60)) self.mounted = True @@ -665,8 +671,14 @@ class LocalKernelMount(KernelMount): wait=False, stdout=StringIO()) class LocalFuseMount(FuseMount): - def __init__(self, ctx, test_dir, client_id, brxnet): - super(LocalFuseMount, self).__init__(ctx, None, test_dir, client_id, LocalRemote(), brxnet) + def __init__(self, ctx, test_dir, client_id, client_keyring_path=None, + client_remote=None, hostfs_mntpt=None, cephfs_name=None, + cephfs_mntpt=None, brxnet=None): + super(LocalFuseMount, self).__init__(ctx=ctx, client_config=None, + test_dir=test_dir, client_id=client_id, + client_keyring_path=client_keyring_path, + client_remote=LocalRemote(), hostfs_mntpt=hostfs_mntpt, + cephfs_name=cephfs_name, cephfs_mntpt=cephfs_mntpt, brxnet=brxnet) @property def config_path(self): @@ -710,17 +722,27 @@ class LocalFuseMount(FuseMount): path = "{0}/client.{1}.*.asok".format(d, self.client_id) return path - def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None, mount_options=[]): - if mountpoint is not None: - self.mountpoint = mountpoint - self.setupfs(name=mount_fs_name) + def mount(self, mntopts=[], createfs=True, **kwargs): + self.update_attrs(**kwargs) + self.assert_and_log_minimum_mount_details() + if opt_use_ns: self.using_namespace = True self.setup_netns() else: self.using_namespace = False - self.client_remote.run(args=['mkdir', '-p', self.mountpoint]) + # TODO: don't call setupfs() from within mount() + if createfs: + self.setupfs(name=self.cephfs_name) + + stderr = StringIO() + try: + self.client_remote.run(args=['mkdir', '-p', self.hostfs_mntpt], + stderr=stderr) + except CommandFailedError: + if 'file exists' not in stderr.getvalue().lower(): + raise def list_connections(): self.client_remote.run( @@ -745,25 +767,28 @@ class LocalFuseMount(FuseMount): pre_mount_conns = list_connections() log.info("Pre-mount connections: {0}".format(pre_mount_conns)) - prefix = [] + cmdargs = [] if self.using_namespace: - prefix += ['sudo', 'nsenter', + cmdargs = ['sudo', 'nsenter', '--net=/var/run/netns/{0}'.format(self.netns_name), '--setuid', str(os.getuid())] - prefix += [os.path.join(BIN_PREFIX, "ceph-fuse")] + cmdargs += [os.path.join(BIN_PREFIX, 'ceph-fuse'), self.hostfs_mntpt, + '-f'] + if self.client_id is not None: + cmdargs += ["--id", self.client_id] + if self.client_keyring_path and self.client_id is not None: + cmdargs.extend(['-k', self.client_keyring_path]) + if self.cephfs_name: + cmdargs += ["--client_mds_namespace=" + self.cephfs_name] + if self.cephfs_mntpt: + cmdargs += ["--client_fs=" + self.cephfs_mntpt] if os.getuid() != 0: - prefix += ["--client_die_on_failed_dentry_invalidate=false"] - if mount_path is not None: - prefix += ["--client_mountpoint={0}".format(mount_path)] - if mount_fs_name is not None: - prefix += ["--client_fs={0}".format(mount_fs_name)] - prefix += mount_options; - fuse_cmd_args = prefix + ["-f", "--name", - "client.{0}".format(self.client_id), - self.mountpoint] - - self.fuse_daemon = self.client_remote.run(args=fuse_cmd_args, - wait=False, omit_sudo=False) + cmdargs += ["--client_die_on_failed_dentry_invalidate=false"] + if mntopts: + cmdargs += mntopts + + self.fuse_daemon = self.client_remote.run(args=cmdargs, wait=False, + omit_sudo=False) self._set_fuse_daemon_pid() log.info("Mounting client.{0} with pid " "{1}".format(self.client_id, self.fuse_daemon.subproc.pid)) @@ -799,6 +824,7 @@ class LocalFuseMount(FuseMount): self.gather_mount_info() self.mounted = True + def _set_fuse_daemon_pid(self): # NOTE: When a command is launched with sudo, two processes are # launched, one with sudo in and other without. Make sure we @@ -1373,17 +1399,19 @@ def exec_test(): open("./keyring", "at").write(p.stdout.getvalue()) if use_kernel_client: - mount = LocalKernelMount(ctx, test_dir, client_id, opt_brxnet) + mount = LocalKernelMount(ctx=ctx, test_dir=test_dir, + client_id=client_id, brxnet=opt_brxnet) else: - mount = LocalFuseMount(ctx, test_dir, client_id, opt_brxnet) + mount = LocalFuseMount(ctx=ctx, test_dir=test_dir, + client_id=client_id, brxnet=opt_brxnet) mounts.append(mount) - if os.path.exists(mount.mountpoint): + if os.path.exists(mount.hostfs_mntpt): if mount.is_mounted(): - log.warning("unmounting {0}".format(mount.mountpoint)) + log.warning("unmounting {0}".format(mount.hostfs_mntpt)) mount.umount_wait() else: - os.rmdir(mount.mountpoint) + os.rmdir(mount.hostfs_mntpt) from tasks.cephfs_test_runner import DecoratingLoader -- 2.39.5