From ace4cb07b2de99644c63f3ab90c21a663a384e69 Mon Sep 17 00:00:00 2001 From: Sam Lang Date: Wed, 23 Jan 2013 14:37:39 -0600 Subject: [PATCH] Replace /tmp/cephtest/ with configurable path Teuthology uses /tmp/cephtest/ as the scratch test directory for a run. This patch replaces /tmp/cephtest/ everywhere with a per-run directory: {basedir}/{rundir} where {basedir} is a directory configured in .teuthology.yaml (/tmp/cephtest if not specified), and {rundir} is the name of the run, as given in --name. If no name is specified, {user}-{timestamp} is used. To get the old behavior (/tmp/cephtest), set test_path: /tmp/cephtest in .teuthology.yaml. This change was modivated by #3782, which requires a test dir that survives across reboots, but also resolves #3767. Signed-off-by: Sam Lang Reviewed-by: Josh Durgin --- README.rst | 20 ++ teuthology/ceph.conf | 47 ---- teuthology/ceph.conf.template | 47 ++++ teuthology/misc.py | 98 ++++++--- teuthology/nuke.py | 10 +- teuthology/task/admin_socket.py | 24 +- .../{apache.conf => apache.conf.template} | 16 +- teuthology/task/autotest.py | 33 +-- teuthology/task/blktrace.py | 8 +- teuthology/task/ceph-fuse.py | 24 +- teuthology/task/ceph.py | 207 ++++++++++-------- teuthology/task/ceph_manager.py | 15 +- teuthology/task/chdir-coredump | 3 +- teuthology/task/cram.py | 31 +-- teuthology/task/die_on_err.py | 9 +- teuthology/task/divergent_priors.py | 25 ++- teuthology/task/filestore_idempotent.py | 8 +- teuthology/task/hadoop.py | 71 +++--- teuthology/task/internal.py | 86 ++++++-- teuthology/task/kclient.py | 18 +- teuthology/task/kcon_most.py | 6 +- teuthology/task/knfsd.py | 4 +- teuthology/task/lockfile.py | 35 +-- teuthology/task/locktest.py | 36 +-- teuthology/task/lost_unfound.py | 35 +-- teuthology/task/manypools.py | 18 +- teuthology/task/mpi.py | 16 +- teuthology/task/nfs.py | 7 +- teuthology/task/object_source_down.py | 20 +- teuthology/task/omapbench.py | 18 +- teuthology/task/osd_backfill.py | 19 +- teuthology/task/osd_recovery.py | 22 +- teuthology/task/peer.py | 17 +- teuthology/task/pexec.py | 6 +- teuthology/task/qemu.py | 52 +++-- teuthology/task/rados.py | 14 +- teuthology/task/radosbench.py | 51 +++-- teuthology/task/radosgw-admin.py | 13 +- teuthology/task/rbd.py | 79 ++++--- teuthology/task/rbd_fsx.py | 16 +- teuthology/task/recovery_bench.py | 38 ++-- teuthology/task/rgw-logsocket.py | 9 +- teuthology/task/rgw.py | 74 ++++--- teuthology/task/s3readwrite.py | 26 ++- teuthology/task/s3roundtrip.py | 28 ++- teuthology/task/s3tests.py | 32 +-- teuthology/task/scrub_test.py | 2 +- teuthology/task/swift.py | 30 +-- teuthology/task/watch_notify_stress.py | 12 +- teuthology/task/workunit.py | 35 +-- 50 files changed, 893 insertions(+), 677 deletions(-) delete mode 100644 teuthology/ceph.conf create mode 100644 teuthology/ceph.conf.template rename teuthology/task/{apache.conf => apache.conf.template} (66%) diff --git a/README.rst b/README.rst index d6eab3af3707d..92e767febc920 100644 --- a/README.rst +++ b/README.rst @@ -189,3 +189,23 @@ calls multiple subtasks, e.g. with ``contextutil.nested``, those cleanups *will* be performed. Later on, we can let tasks communicate the subtasks they wish to invoke to the top-level runner, avoiding this issue. + +Test Sandbox Directory +====================== + +Teuthology currently places most test files and mount points in a sandbox +directory, defaulting to /tmp/cephtest/{rundir}. The {rundir} is the name +of the run (as given by --name) or if no name is specified, user@host-timestamp +is used. To change the location of the sandbox directory, the following +options can be specified in $HOME/.teuthology.yaml: + + base_test_dir: + +The ``base_test_dir`` option will set the base directory to use for the individual +run directories. If not specified, this defaults to: ``/tmp/cephtest``. + + test_path: + +The ``test_path`` option will set the complete path to use for the test directory. +This allows for the old behavior, where ``/tmp/cephtest`` was used as the sandbox +directory. diff --git a/teuthology/ceph.conf b/teuthology/ceph.conf deleted file mode 100644 index 12f540357e1ad..0000000000000 --- a/teuthology/ceph.conf +++ /dev/null @@ -1,47 +0,0 @@ -[global] - keyring = /tmp/cephtest/ceph.keyring - log file = /tmp/cephtest/archive/log/$name.log - chdir = "" - pid file = $name.pid - auth supported = cephx - admin socket = /tmp/cephtest/asok.$name - - filestore xattr use omap = true - - mon clock drift allowed = .250 - - osd crush chooseleaf type = 0 - -[mon] - mon data = /tmp/cephtest/data/mon.$id - mon cluster log file = /tmp/cephtest/archive/log/cluster.mon.$id.log - -[osd] - osd data = /tmp/cephtest/data/osd.$id.data - osd journal = /tmp/cephtest/data/osd.$id.journal - osd journal size = 100 - keyring = /tmp/cephtest/data/osd.$id.keyring - osd class dir = /tmp/cephtest/binary/usr/local/lib/rados-classes - - osd scrub load threshold = 5.0 - osd scrub max interval = 600 - - osd recover clone overlap = true - osd recovery max chunk = 1048576 - -[mds] - keyring = /tmp/cephtest/data/mds.$id.keyring - lockdep = 1 - mds debug scatterstat = true - mds verify scatter = true - mds debug frag = true - -[client] - keyring = /tmp/cephtest/data/client.$id.keyring - rgw socket path = /tmp/cephtest/apache/tmp/fastcgi_sock/rgw_sock - rgw cache enabled = true - rgw enable ops log = true - rgw enable usage log = true - -[client.admin] - keyring = /tmp/cephtest/ceph.keyring diff --git a/teuthology/ceph.conf.template b/teuthology/ceph.conf.template new file mode 100644 index 0000000000000..81b7985223e9e --- /dev/null +++ b/teuthology/ceph.conf.template @@ -0,0 +1,47 @@ +[global] + keyring = {testdir}/ceph.keyring + log file = {testdir}/archive/log/$name.log + chdir = "" + pid file = $name.pid + auth supported = cephx + admin socket = {testdir}/asok.$name + + filestore xattr use omap = true + + mon clock drift allowed = .250 + + osd crush chooseleaf type = 0 + +[mon] + mon data = {testdir}/data/mon.$id + mon cluster log file = {testdir}/archive/log/cluster.mon.$id.log + +[osd] + osd data = {testdir}/data/osd.$id.data + osd journal = {testdir}/data/osd.$id.journal + osd journal size = 100 + keyring = {testdir}/data/osd.$id.keyring + osd class dir = {testdir}/binary/usr/local/lib/rados-classes + + osd scrub load threshold = 5.0 + osd scrub max interval = 600 + + osd recover clone overlap = true + osd recovery max chunk = 1048576 + +[mds] + keyring = {testdir}/data/mds.$id.keyring + lockdep = 1 + mds debug scatterstat = true + mds verify scatter = true + mds debug frag = true + +[client] + keyring = {testdir}/data/client.$id.keyring + rgw socket path = {testdir}/apache/tmp/fastcgi_sock/rgw_sock + rgw cache enabled = true + rgw enable ops log = true + rgw enable usage log = true + +[client.admin] + keyring = {testdir}/ceph.keyring diff --git a/teuthology/misc.py b/teuthology/misc.py index ab188b7c94edc..fc40c72c0d351 100644 --- a/teuthology/misc.py +++ b/teuthology/misc.py @@ -17,6 +17,32 @@ from .orchestra import run log = logging.getLogger(__name__) +import datetime +stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + +def get_testdir(ctx): + if 'test_path' in ctx.teuthology_config: + return ctx.teuthology_config['test_path'] + + basedir = ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest') + + if hasattr(ctx, 'name') and ctx.name: + log.debug('with name basedir: {b}'.format(b=basedir)) + return '{basedir}/{rundir}'.format( + basedir=basedir, + rundir=ctx.name) + else: + log.debug('basedir: {b}'.format(b=basedir)) + return '{basedir}/{user}-{stamp}'.format( + basedir=basedir, + user=get_user(), + stamp=stamp) + +def get_testdir_base(ctx): + if 'test_path' in ctx.teuthology_config: + return ctx.teuthology_config['test_path'] + return ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest') + def get_ceph_binary_url(package=None, branch=None, tag=None, sha1=None, dist=None, flavor=None, format=None, arch=None): @@ -113,7 +139,7 @@ def generate_caps(type_): yield subsystem yield capability -def skeleton_config(roles, ips): +def skeleton_config(ctx, roles, ips): """ Returns a ConfigObj that's prefilled with a skeleton config. @@ -121,8 +147,10 @@ def skeleton_config(roles, ips): Use conf.write to write it out, override .filename first if you want. """ - path = os.path.join(os.path.dirname(__file__), 'ceph.conf') - conf = configobj.ConfigObj(path, file_error=True) + path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template') + t = open(path, 'r') + skconf = t.read().format(testdir=get_testdir(ctx)) + conf = configobj.ConfigObj(StringIO(skconf), file_error=True) mons = get_mons(roles=roles, ips=ips) for role, addr in mons.iteritems(): conf.setdefault(role, {}) @@ -175,7 +203,7 @@ def num_instances_of_type(cluster, type_): num = sum(sum(1 for role in hostroles if role.startswith(prefix)) for hostroles in roles) return num -def create_simple_monmap(remote, conf): +def create_simple_monmap(ctx, remote, conf): """ Writes a simple monmap based on current ceph.conf into /monmap. @@ -196,11 +224,12 @@ def create_simple_monmap(remote, conf): assert addresses, "There are no monitors in config!" log.debug('Ceph mon addresses: %s', addresses) + testdir = get_testdir(ctx) args = [ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/monmaptool', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/monmaptool'.format(tdir=testdir), '--create', '--clobber', ] @@ -208,7 +237,7 @@ def create_simple_monmap(remote, conf): args.extend(('--add', name, addr)) args.extend([ '--print', - '/tmp/cephtest/monmap', + '{tdir}/monmap'.format(tdir=testdir), ]) remote.run( args=args, @@ -379,16 +408,17 @@ def get_scratch_devices(remote): pass return retval -def wait_until_healthy(remote): +def wait_until_healthy(ctx, remote): """Wait until a Ceph cluster is healthy.""" + testdir = get_testdir(ctx) while True: r = remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/ceph', - '-c', '/tmp/cephtest/ceph.conf', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'health', '--concise', ], @@ -401,17 +431,18 @@ def wait_until_healthy(remote): break time.sleep(1) -def wait_until_osds_up(cluster, remote): +def wait_until_osds_up(ctx, cluster, remote): """Wait until all Ceph OSDs are booted.""" num_osds = num_instances_of_type(cluster, 'osd') + testdir = get_testdir(ctx) while True: r = remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/ceph', - '-c', '/tmp/cephtest/ceph.conf', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '--concise', 'osd', 'dump', '--format=json' ], @@ -485,16 +516,17 @@ def reconnect(ctx, timeout): log.debug('waited {elapsed}'.format(elapsed=str(time.time() - starttime))) time.sleep(1) -def write_secret_file(remote, role, filename): +def write_secret_file(ctx, remote, role, filename): + testdir = get_testdir(ctx) remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--name={role}'.format(role=role), '--print-key', - '/tmp/cephtest/data/{role}.keyring'.format(role=role), + '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role), run.Raw('>'), filename, ], @@ -569,25 +601,25 @@ def deep_merge(a, b): return a return b -def get_valgrind_args(name, v): +def get_valgrind_args(testdir, name, v): if v is None: return [] if not isinstance(v, list): v = [v] - val_path = '/tmp/cephtest/archive/log/valgrind' + val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir) if '--tool=memcheck' in v or '--tool=helgrind' in v: extra_args = [ - '/tmp/cephtest/chdir-coredump', + '{tdir}/chdir-coredump'.format(tdir=testdir), 'valgrind', - '--suppressions=/tmp/cephtest/valgrind.supp', + '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir), '--xml=yes', '--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name) ] else: extra_args = [ - '/tmp/cephtest/chdir-coredump', + '{tdir}/chdir-coredump'.format(tdir=testdir), 'valgrind', - '--suppressions=/tmp/cephtest/valgrind.supp', + '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir), '--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name) ] extra_args.extend(v) diff --git a/teuthology/nuke.py b/teuthology/nuke.py index e90192ceb82ee..e3721fc97929f 100644 --- a/teuthology/nuke.py +++ b/teuthology/nuke.py @@ -1,6 +1,8 @@ import argparse import yaml +from teuthology import misc as teuthology + def parse_args(): from teuthology.run import config_file from teuthology.run import MergeConfig @@ -150,7 +152,10 @@ def remove_osd_mounts(ctx, log): from .orchestra import run ctx.cluster.run( args=[ - 'grep', '/tmp/cephtest/data/', '/etc/mtab', run.Raw('|'), + 'grep', + '{tdir}/data/'.format(tdir=teuthology.get_testdir(ctx)), + '/etc/mtab', + run.Raw('|'), 'awk', '{print $2}', run.Raw('|'), 'xargs', '-r', 'sudo', 'umount', run.Raw(';'), @@ -222,7 +227,8 @@ def remove_testing_tree(ctx, log): for remote in ctx.cluster.remotes.iterkeys(): proc = remote.run( args=[ - 'sudo', 'rm', '-rf', '/tmp/cephtest', + 'sudo', 'rm', '-rf', + teuthology.get_testdir(ctx), ], wait=False, ) diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index aaeef114597cc..4f9b51efcb6ac 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -62,20 +62,21 @@ def task(ctx, config): for client, tests in config.iteritems(): p.spawn(_run_tests, ctx, client, tests) -def _socket_command(remote, socket_path, command, args): +def _socket_command(ctx, remote, socket_path, command, args): """ Run an admin socket command and return the result as a string. """ json_fp = StringIO() + testdir = teuthology.get_testdir(ctx) remote.run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/ceph', - '-k', '/tmp/cephtest/ceph.keyring', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir), + '-k', '{tdir}/ceph.keyring'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '--admin-daemon', socket_path, command, ] + args, @@ -87,13 +88,14 @@ def _socket_command(remote, socket_path, command, args): return json.loads(out) def _run_tests(ctx, client, tests): + testdir = teuthology.get_testdir(ctx) log.debug('Running admin socket tests on %s', client) (remote,) = ctx.cluster.only(client).remotes.iterkeys() - socket_path = '/tmp/cephtest/asok.{name}'.format(name=client) + socket_path = '{tdir}/asok.{name}'.format(tdir=testdir, name=client) try: tmp_dir = os.path.join( - '/tmp/cephtest/', + testdir, 'admin_socket_{client}'.format(client=client), ) remote.run( @@ -135,7 +137,7 @@ def _run_tests(ctx, client, tests): args = config.get('args', []) assert isinstance(args, list), \ 'admin socket command args must be a list' - sock_out = _socket_command(remote, socket_path, command, args) + sock_out = _socket_command(ctx, remote, socket_path, command, args) if test_path is not None: remote.run( args=[ diff --git a/teuthology/task/apache.conf b/teuthology/task/apache.conf.template similarity index 66% rename from teuthology/task/apache.conf rename to teuthology/task/apache.conf.template index fd8dd42cdadf4..3abf1d190b809 100644 --- a/teuthology/task/apache.conf +++ b/teuthology/task/apache.conf.template @@ -5,14 +5,14 @@ LoadModule fastcgi_module /usr/lib/apache2/modules/mod_fastcgi.so Listen 7280 ServerName rgwtest.example.com -ServerRoot /tmp/cephtest/apache -ErrorLog /tmp/cephtest/archive/apache/error.log +ServerRoot {testdir}/apache +ErrorLog {testdir}/archive/apache/error.log LogFormat "%h l %u %t \"%r\" %>s %b \"{Referer}i\" \"%{User-agent}i\"" combined -CustomLog /tmp/cephtest/archive/apache/access.log combined -PidFile /tmp/cephtest/apache/tmp/apache.pid -DocumentRoot /tmp/cephtest/apache/htdocs -FastCgiIPCDir /tmp/cephtest/apache/tmp/fastcgi_sock -FastCgiExternalServer /tmp/cephtest/apache/htdocs/rgw.fcgi -socket rgw_sock +CustomLog {testdir}/archive/apache/access.log combined +PidFile {testdir}/apache/tmp/apache.pid +DocumentRoot {testdir}/apache/htdocs +FastCgiIPCDir {testdir}/apache/tmp/fastcgi_sock +FastCgiExternalServer {testdir}/apache/htdocs/rgw.fcgi -socket rgw_sock RewriteEngine On RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L] @@ -23,7 +23,7 @@ SetEnv RGW_LOG_LEVEL 20 SetEnv RGW_PRINT_CONTINUE yes SetEnv RGW_SHOULD_LOG yes - + Options +ExecCGI AllowOverride All SetHandler fastcgi-script diff --git a/teuthology/task/autotest.py b/teuthology/task/autotest.py index 73bdb101193d3..0999fc4dd782e 100644 --- a/teuthology/task/autotest.py +++ b/teuthology/task/autotest.py @@ -37,10 +37,11 @@ def task(ctx, config): assert isinstance(config, dict) config = teuthology.replace_all_with_clients(ctx.cluster, config) log.info('Setting up autotest...') + testdir = teuthology.get_testdir(ctx) with parallel() as p: for role in config.iterkeys(): (remote,) = ctx.cluster.only(role).remotes.keys() - p.spawn(_download, remote) + p.spawn(_download, testdir, remote) log.info('Making a separate scratch dir for every client...') for role in config.iterkeys(): @@ -49,7 +50,7 @@ def task(ctx, config): assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) remote.run( args=[ @@ -66,16 +67,16 @@ def task(ctx, config): with parallel() as p: for role, tests in config.iteritems(): (remote,) = ctx.cluster.only(role).remotes.keys() - p.spawn(_run_tests, remote, role, tests) + p.spawn(_run_tests, testdir, remote, role, tests) -def _download(remote): +def _download(testdir, remote): remote.run( args=[ # explicitly does not support multiple autotest tasks # in a single run; the result archival would conflict - 'mkdir', '/tmp/cephtest/archive/autotest', + 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir), run.Raw('&&'), - 'mkdir', '/tmp/cephtest/autotest', + 'mkdir', '{tdir}/autotest'.format(tdir=testdir), run.Raw('&&'), 'wget', '-nv', @@ -84,7 +85,7 @@ def _download(remote): '-O-', run.Raw('|'), 'tar', - '-C', '/tmp/cephtest/autotest', + '-C', '{tdir}/autotest'.format(tdir=testdir), '-x', '-z', '-f-', @@ -92,12 +93,12 @@ def _download(remote): ], ) -def _run_tests(remote, role, tests): +def _run_tests(testdir, remote, role, tests): assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) assert isinstance(tests, list) @@ -109,7 +110,7 @@ def _run_tests(remote, role, tests): testname=testname, id=id_, ) - control = '/tmp/cephtest/control.{tag}'.format(tag=tag) + control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag) teuthology.write_file( remote=remote, path=control, @@ -118,14 +119,14 @@ def _run_tests(remote, role, tests): url=testname, dir=scratch, # TODO perhaps tag - # results will be in /tmp/cephtest/autotest/client/results/dbench - # or /tmp/cephtest/autotest/client/results/dbench.{tag} + # results will be in {testdir}/autotest/client/results/dbench + # or {testdir}/autotest/client/results/dbench.{tag} )), ), ) remote.run( args=[ - '/tmp/cephtest/autotest/client/bin/autotest', + '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir), '--verbose', '--harness=simple', '--tag={tag}'.format(tag=tag), @@ -144,13 +145,13 @@ def _run_tests(remote, role, tests): args=[ 'mv', '--', - '/tmp/cephtest/autotest/client/results/{tag}'.format(tag=tag), - '/tmp/cephtest/archive/autotest/{tag}'.format(tag=tag), + '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag), + '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag), ], ) remote.run( args=[ - 'rm', '-rf', '--', '/tmp/cephtest/autotest', + 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir), ], ) diff --git a/teuthology/task/blktrace.py b/teuthology/task/blktrace.py index 342887b34df30..3b9a7acd3cecf 100644 --- a/teuthology/task/blktrace.py +++ b/teuthology/task/blktrace.py @@ -7,12 +7,13 @@ from ..orchestra import run log = logging.getLogger(__name__) blktrace = '/usr/sbin/blktrace' -log_dir = '/tmp/cephtest/archive/performance/blktrace' daemon_signal = 'term' @contextlib.contextmanager def setup(ctx, config): osds = ctx.cluster.only(teuthology.is_type('osd')) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) + for remote, roles_for_host in osds.remotes.iteritems(): log.info('Creating %s on %s' % (log_dir,remote.name)) remote.run( @@ -24,6 +25,9 @@ def setup(ctx, config): @contextlib.contextmanager def execute(ctx, config): procs = [] + testdir=teuthology.get_testdir(ctx) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) + osds = ctx.cluster.only(teuthology.is_type('osd')) for remote, roles_for_host in osds.remotes.iteritems(): roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] @@ -37,7 +41,7 @@ def execute(ctx, config): 'cd', log_dir, run.Raw(';'), - '/tmp/cephtest/daemon-helper', + '{tdir}/daemon-helper'.format(tdir=testdir), daemon_signal, 'sudo', blktrace, diff --git a/teuthology/task/ceph-fuse.py b/teuthology/task/ceph-fuse.py index 1eb0d78abefe6..7d0518bb86f37 100644 --- a/teuthology/task/ceph-fuse.py +++ b/teuthology/task/ceph-fuse.py @@ -45,6 +45,8 @@ def task(ctx, config): log.info('Mounting ceph-fuse clients...') fuse_daemons = {} + testdir = teuthology.get_testdir(ctx) + if config is None: config = dict(('client.{id}'.format(id=id_), None) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) @@ -57,7 +59,7 @@ def task(ctx, config): clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote,mnt=mnt)) @@ -79,17 +81,17 @@ def task(ctx, config): ) run_cmd=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/daemon-helper', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), daemon_signal, ] run_cmd_tail=[ - '/tmp/cephtest/binary/usr/local/bin/ceph-fuse', + '{tdir}/binary/usr/local/bin/ceph-fuse'.format(tdir=testdir), '-f', '--name', 'client.{id}'.format(id=id_), - '-c', '/tmp/cephtest/ceph.conf', + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), # TODO ceph-fuse doesn't understand dash dash '--', mnt, ] @@ -113,20 +115,20 @@ def task(ctx, config): fuse_daemons[id_] = proc for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) teuthology.wait_until_fuse_mounted( remote=remote, fuse=fuse_daemons[id_], mountpoint=mnt, ) - remote.run(args=['sudo', 'chmod', '1777', '/tmp/cephtest/mnt.{id}'.format(id=id_)],) + remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],) try: yield finally: log.info('Unmounting ceph-fuse clients...') for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) try: remote.run( args=[ @@ -160,7 +162,7 @@ def task(ctx, config): run.wait(fuse_daemons.itervalues()) for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'rmdir', diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index c84513119cd31..1c304f0f7282e 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -81,13 +81,14 @@ class CephState(object): @contextlib.contextmanager def ceph_log(ctx, config): log.info('Creating log directories...') + archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx)) run.wait( ctx.cluster.run( args=[ 'install', '-d', '-m0755', '--', - '/tmp/cephtest/archive/log', - '/tmp/cephtest/archive/log/valgrind', - '/tmp/cephtest/archive/profiling-logger', + '{adir}/log'.format(adir=archive_dir), + '{adir}/log/valgrind'.format(adir=archive_dir), + '{adir}/profiling-logger'.format(adir=archive_dir), ], wait=False, ) @@ -103,7 +104,7 @@ def ceph_log(ctx, config): ctx.cluster.run( args=[ 'find', - '/tmp/cephtest/archive/log', + '{adir}/log'.format(adir=archive_dir), '-name', '*.log', '-print0', @@ -127,10 +128,11 @@ def ship_utilities(ctx, config): assert config is None FILES = ['daemon-helper', 'enable-coredump', 'chdir-coredump', 'valgrind.supp', 'kcon_most'] + testdir = teuthology.get_testdir(ctx) for filename in FILES: log.info('Shipping %r...', filename) src = os.path.join(os.path.dirname(__file__), filename) - dst = os.path.join('/tmp/cephtest', filename) + dst = os.path.join(testdir, filename) with file(src, 'rb') as f: for rem in ctx.cluster.remotes.iterkeys(): teuthology.write_file( @@ -153,7 +155,7 @@ def ship_utilities(ctx, config): finally: log.info('Removing shipped files: %s...', ' '.join(FILES)) filenames = ( - os.path.join('/tmp/cephtest', filename) + os.path.join(testdir, filename) for filename in FILES ) run.wait( @@ -167,10 +169,11 @@ def ship_utilities(ctx, config): ), ) -def _download_binaries(remote, ceph_bindir_url): +def _download_binaries(ctx, remote, ceph_bindir_url): + testdir = teuthology.get_testdir(ctx) remote.run( args=[ - 'install', '-d', '-m0755', '--', '/tmp/cephtest/binary', + 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir), run.Raw('&&'), 'uname', '-m', run.Raw('|'), @@ -183,7 +186,7 @@ def _download_binaries(remote, ceph_bindir_url): # need to use --input-file to make wget respect --base '--input-file=-', run.Raw('|'), - 'tar', '-xzf', '-', '-C', '/tmp/cephtest/binary', + 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir), ], ) @@ -192,6 +195,8 @@ def binaries(ctx, config): path = config.get('path') tmpdir = None + testdir = teuthology.get_testdir(ctx) + if path is None: # fetch from gitbuilder gitbuilder log.info('Fetching and unpacking ceph binaries from gitbuilder...') @@ -212,7 +217,7 @@ def binaries(ctx, config): with parallel() as p: for remote in ctx.cluster.remotes.iterkeys(): - p.spawn(_download_binaries, remote, ceph_bindir_url) + p.spawn(_download_binaries, ctx, remote, ceph_bindir_url) else: with tempfile.TemporaryFile(prefix='teuthology-tarball-', suffix='.tgz') as tar_fp: tmpdir = tempfile.mkdtemp(prefix='teuthology-tarball-') @@ -249,9 +254,9 @@ def binaries(ctx, config): tar_fp.seek(0) writes = ctx.cluster.run( args=[ - 'install', '-d', '-m0755', '--', '/tmp/cephtest/binary', + 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir), run.Raw('&&'), - 'tar', '-xzf', '-', '-C', '/tmp/cephtest/binary' + 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir) ], stdin=run.PIPE, wait=False, @@ -269,7 +274,7 @@ def binaries(ctx, config): 'rm', '-rf', '--', - '/tmp/cephtest/binary', + '{tdir}/binary'.format(tdir=testdir), ], wait=False, ), @@ -281,11 +286,12 @@ def assign_devs(roles, devs): @contextlib.contextmanager def valgrind_post(ctx, config): + testdir = teuthology.get_testdir(ctx) try: yield finally: lookup_procs = list() - val_path = '/tmp/cephtest/archive/log/valgrind' + val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir) log.info('Checking for errors in any valgrind logs...'); for remote in ctx.cluster.remotes.iterkeys(): #look at valgrind logs for each node @@ -322,12 +328,13 @@ def valgrind_post(ctx, config): @contextlib.contextmanager def cluster(ctx, config): + testdir = teuthology.get_testdir(ctx) log.info('Creating ceph cluster...') run.wait( ctx.cluster.run( args=[ 'install', '-d', '-m0755', '--', - '/tmp/cephtest/data', + '{tdir}/data'.format(tdir=testdir), ], wait=False, ) @@ -378,7 +385,7 @@ def cluster(ctx, config): remotes_and_roles = ctx.cluster.remotes.items() roles = [roles for (remote, roles) in remotes_and_roles] ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in remotes_and_roles)] - conf = teuthology.skeleton_config(roles=roles, ips=ips) + conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips) for remote, roles_to_journals in remote_to_roles_to_journals.iteritems(): for role, journal in roles_to_journals.iteritems(): key = "osd." + str(role) @@ -407,7 +414,7 @@ def cluster(ctx, config): 'python', '-c', 'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))', - '/tmp/cephtest/ceph.conf', + '{tdir}/ceph.conf'.format(tdir=testdir), ], stdin=run.PIPE, wait=False, @@ -415,34 +422,35 @@ def cluster(ctx, config): teuthology.feed_many_stdins_and_close(conf_fp, writes) run.wait(writes) - coverage_dir = '/tmp/cephtest/archive/coverage' + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) firstmon = teuthology.get_first_mon(ctx, config) log.info('Setting up %s...' % firstmon) ctx.cluster.only(firstmon).run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--create-keyring', - '/tmp/cephtest/ceph.keyring', + '{tdir}/ceph.keyring'.format(tdir=testdir), ], ) ctx.cluster.only(firstmon).run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--gen-key', '--name=mon.', - '/tmp/cephtest/ceph.keyring', + '{tdir}/ceph.keyring'.format(tdir=testdir), ], ) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() teuthology.create_simple_monmap( + ctx, remote=mon0_remote, conf=conf, ) @@ -450,28 +458,28 @@ def cluster(ctx, config): log.info('Creating admin key on %s...' % firstmon) ctx.cluster.only(firstmon).run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--gen-key', '--name=client.admin', '--set-uid=0', '--cap', 'mon', 'allow *', '--cap', 'osd', 'allow *', '--cap', 'mds', 'allow', - '/tmp/cephtest/ceph.keyring', + '{tdir}/ceph.keyring'.format(tdir=testdir), ], ) log.info('Copying monmap to all nodes...') keyring = teuthology.get_file( remote=mon0_remote, - path='/tmp/cephtest/ceph.keyring', + path='{tdir}/ceph.keyring'.format(tdir=testdir), ) monmap = teuthology.get_file( remote=mon0_remote, - path='/tmp/cephtest/monmap', + path='{tdir}/monmap'.format(tdir=testdir), ) for rem in ctx.cluster.remotes.iterkeys(): @@ -479,12 +487,12 @@ def cluster(ctx, config): log.info('Sending monmap to node {remote}'.format(remote=rem)) teuthology.write_file( remote=rem, - path='/tmp/cephtest/ceph.keyring', + path='{tdir}/ceph.keyring'.format(tdir=testdir), data=keyring, ) teuthology.write_file( remote=rem, - path='/tmp/cephtest/monmap', + path='{tdir}/monmap'.format(tdir=testdir), data=monmap, ) @@ -493,17 +501,17 @@ def cluster(ctx, config): run.wait( mons.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/osdmaptool', + '{tdir}/binary/usr/local/bin/osdmaptool'.format(tdir=testdir), '-c', - '/tmp/cephtest/ceph.conf', + '{tdir}/ceph.conf'.format(tdir=testdir), '--clobber', '--createsimple', '{num:d}'.format( num=teuthology.num_instances_of_type(ctx.cluster, 'osd'), ), - '/tmp/cephtest/osdmap', + '{tdir}/osdmap'.format(tdir=testdir), '--pg_bits', '2', '--pgp_bits', '4', ], @@ -516,14 +524,14 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, 'osd'): remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--create-keyring', '--gen-key', '--name=osd.{id}'.format(id=id_), - '/tmp/cephtest/data/osd.{id}.keyring'.format(id=id_), + '{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_), ], ) @@ -533,14 +541,14 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, 'mds'): remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--create-keyring', '--gen-key', '--name=mds.{id}'.format(id=id_), - '/tmp/cephtest/data/mds.{id}.keyring'.format(id=id_), + '{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_), ], ) @@ -550,15 +558,15 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, 'client'): remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), '--create-keyring', '--gen-key', # TODO this --name= is not really obeyed, all unknown "types" are munged to "client" '--name=client.{id}'.format(id=id_), - '/tmp/cephtest/data/client.{id}.keyring'.format(id=id_), + '{tdir}/data/client.{id}.keyring'.format(tdir=testdir, id=id_), ], ) @@ -570,7 +578,8 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, type_): data = teuthology.get_file( remote=remote, - path='/tmp/cephtest/data/{type}.{id}.keyring'.format( + path='{tdir}/data/{type}.{id}.keyring'.format( + tdir=testdir, type=type_, id=id_, ), @@ -583,7 +592,7 @@ def cluster(ctx, config): args=[ 'cat', run.Raw('>>'), - '/tmp/cephtest/ceph.keyring', + '{tdir}/ceph.keyring'.format(tdir=testdir), ], stdin=run.PIPE, wait=False, @@ -595,11 +604,11 @@ def cluster(ctx, config): run.wait( mons.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-authtool', - '/tmp/cephtest/ceph.keyring', + '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir), + '{tdir}/ceph.keyring'.format(tdir=testdir), '--name={type}.{id}'.format( type=type_, id=id_, @@ -614,16 +623,16 @@ def cluster(ctx, config): for id_ in teuthology.roles_of_type(roles_for_host, 'mon'): remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-mon', + '{tdir}/binary/usr/local/bin/ceph-mon'.format(tdir=testdir), '--mkfs', '-i', id_, - '-c', '/tmp/cephtest/ceph.conf', - '--monmap=/tmp/cephtest/monmap', - '--osdmap=/tmp/cephtest/osdmap', - '--keyring=/tmp/cephtest/ceph.keyring', + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), + '--monmap={tdir}/monmap'.format(tdir=testdir), + '--osdmap={tdir}/osdmap'.format(tdir=testdir), + '--keyring={tdir}/ceph.keyring'.format(tdir=testdir), ], ) @@ -641,7 +650,7 @@ def cluster(ctx, config): remote.run( args=[ 'mkdir', - os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)), + os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)), ], ) if roles_to_devs.get(id_): @@ -691,24 +700,24 @@ def cluster(ctx, config): '-t', fs, '-o', ','.join(mount_options), dev, - os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)), + os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)), ] ) remote.run( args=[ 'sudo', 'chown', '-R', 'ubuntu.ubuntu', - os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)) + os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)) ] ) remote.run( args=[ 'sudo', 'chmod', '-R', '755', - os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)) + os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)) ] ) devs_to_clean[remote].append( os.path.join( - '/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_) + '{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_) ) ) @@ -716,14 +725,14 @@ def cluster(ctx, config): remote.run( args=[ 'MALLOC_CHECK_=3', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph-osd', + '{tdir}/binary/usr/local/bin/ceph-osd'.format(tdir=testdir), '--mkfs', '-i', id_, - '-c', '/tmp/cephtest/ceph.conf', - '--monmap', '/tmp/cephtest/monmap', + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), + '--monmap', '{tdir}/monmap'.format(tdir=testdir), ], ) run.wait( @@ -731,8 +740,8 @@ def cluster(ctx, config): args=[ 'rm', '--', - '/tmp/cephtest/monmap', - '/tmp/cephtest/osdmap', + '{tdir}/monmap'.format(tdir=testdir), + '{tdir}/osdmap'.format(tdir=testdir), ], wait=False, ), @@ -747,7 +756,7 @@ def cluster(ctx, config): def first_in_ceph_log(pattern, excludes): args = [ 'egrep', pattern, - '/tmp/cephtest/archive/log/cluster.%s.log' % firstmon, + '%s/archive/log/cluster.%s.log' % (testdir, firstmon), ] for exclude in excludes: args.extend([run.Raw('|'), 'egrep', '-v', exclude]) @@ -809,7 +818,7 @@ def cluster(ctx, config): for role in roles: if role.startswith('mon.'): teuthology.pull_directory_tarball(remote, - '/tmp/cephtest/data/%s' % role, + '%s/data/%s' % (testdir, role), path + '/' + role + '.tgz') log.info('Cleaning ceph cluster...') @@ -819,11 +828,11 @@ def cluster(ctx, config): 'rm', '-rf', '--', - '/tmp/cephtest/ceph.conf', - '/tmp/cephtest/ceph.keyring', - '/tmp/cephtest/data', - '/tmp/cephtest/monmap', - run.Raw('/tmp/cephtest/asok.*') + '{tdir}/ceph.conf'.format(tdir=testdir), + '{tdir}/ceph.keyring'.format(tdir=testdir), + '{tdir}/data'.format(tdir=testdir), + '{tdir}/monmap'.format(tdir=testdir), + run.Raw('{tdir}/asok.*'.format(tdir=testdir)) ], wait=False, ), @@ -833,8 +842,9 @@ def cluster(ctx, config): @contextlib.contextmanager def run_daemon(ctx, config, type_): log.info('Starting %s daemons...' % type_) + testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_)) - coverage_dir = '/tmp/cephtest/archive/coverage' + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) daemon_signal = 'kill' if config.get('coverage') or config.get('valgrind') is not None: @@ -849,17 +859,17 @@ def run_daemon(ctx, config, type_): num_active += 1 run_cmd = [ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/daemon-helper', + '{tdir}/daemon-helper'.format(tdir=testdir), daemon_signal, ] run_cmd_tail = [ - '/tmp/cephtest/binary/usr/local/bin/ceph-%s' % type_, + '%s/binary/usr/local/bin/ceph-%s' % (testdir, type_), '-f', '-i', id_, - '-c', '/tmp/cephtest/ceph.conf'] + '-c', '{tdir}/ceph.conf'.format(tdir=testdir)] if config.get('valgrind') is not None: valgrind_args = None @@ -867,10 +877,10 @@ def run_daemon(ctx, config, type_): valgrind_args = config['valgrind'][type_] if name in config['valgrind']: valgrind_args = config['valgrind'][name] - run_cmd.extend(teuthology.get_valgrind_args(name, valgrind_args)) + run_cmd.extend(teuthology.get_valgrind_args(testdir, name, valgrind_args)) if type_ in config.get('cpu_profile', []): - profile_path = '/tmp/cephtest/archive/log/%s.%s.prof' % (type_, id_) + profile_path = '%s/archive/log/%s.%s.prof' % (testdir, type_, id_) run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ]) run_cmd.extend(run_cmd_tail) @@ -886,11 +896,11 @@ def run_daemon(ctx, config, type_): firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() mon0_remote.run(args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), coverage_dir, - '/tmp/cephtest/binary/usr/local/bin/ceph', - '-c', '/tmp/cephtest/ceph.conf', + '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'mds', 'set_max_mds', str(num_active)]) try: @@ -914,10 +924,12 @@ def healthy(ctx, config): firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() teuthology.wait_until_osds_up( + ctx, cluster=ctx.cluster, remote=mon0_remote ) teuthology.wait_until_healthy( + ctx, remote=mon0_remote, ) @@ -1053,9 +1065,10 @@ def task(ctx, config): flavor = 'gcov' ctx.summary['flavor'] = flavor - + + testdir = teuthology.get_testdir(ctx) if config.get('coverage'): - coverage_dir = '/tmp/cephtest/archive/coverage' + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) log.info('Creating coverage directory...') run.wait( ctx.cluster.run( diff --git a/teuthology/task/ceph_manager.py b/teuthology/task/ceph_manager.py index 22b84b13ca6d0..94252cde5a514 100644 --- a/teuthology/task/ceph_manager.py +++ b/teuthology/task/ceph_manager.py @@ -226,14 +226,15 @@ class CephManager: self.pools['data'] = self.get_pool_property('data', 'pg_num') def raw_cluster_cmd(self, *args): + testdir = teuthology.get_testdir(self.ctx) ceph_args = [ - 'LD_LIBRARY_PRELOAD=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/ceph', - '-k', '/tmp/cephtest/ceph.keyring', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PRELOAD={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir), + '-k', '{tdir}/ceph.keyring'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '--concise', ] ceph_args.extend(args) diff --git a/teuthology/task/chdir-coredump b/teuthology/task/chdir-coredump index cf1acd45496aa..b27904eaf6bc8 100644 --- a/teuthology/task/chdir-coredump +++ b/teuthology/task/chdir-coredump @@ -1,7 +1,8 @@ #!/bin/sh set -e +testdir=$(realpath $(dirname $0)) # valgrind only dumps to cwd, so cwd there... -cd /tmp/cephtest/archive/coredump +cd ${testdir}/archive/coredump exec "$@" diff --git a/teuthology/task/cram.py b/teuthology/task/cram.py index 5289212de7707..a6a6d4507fdb9 100644 --- a/teuthology/task/cram.py +++ b/teuthology/task/cram.py @@ -41,17 +41,19 @@ def task(ctx, config): clients = teuthology.replace_all_with_clients(ctx.cluster, config['clients']) + testdir = teuthology.get_testdir(ctx) + try: for client, tests in clients.iteritems(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() - client_dir = '/tmp/cephtest/archive/cram.{role}'.format(role=client) + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) remote.run( args=[ 'mkdir', '--', client_dir, run.Raw('&&'), - 'virtualenv', '/tmp/cephtest/virtualenv', + 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir), run.Raw('&&'), - '/tmp/cephtest/virtualenv/bin/pip', + '{tdir}/virtualenv/bin/pip'.format(tdir=testdir), 'install', 'cram', ], ) @@ -70,7 +72,7 @@ def task(ctx, config): finally: for client, tests in clients.iteritems(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() - client_dir = '/tmp/cephtest/archive/cram.{role}'.format(role=client) + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) test_files = set([test.rsplit('/', 1)[1] for test in tests]) # remove test files unless they failed @@ -90,7 +92,7 @@ def task(ctx, config): remote.run( args=[ 'rm', '-rf', '--', - '/tmp/cephtest/virtualenv', + '{tdir}/virtualenv'.format(tdir=testdir), run.Raw(';'), 'rmdir', '--ignore-fail-on-non-empty', client_dir, ], @@ -104,21 +106,22 @@ def _run_tests(ctx, role): (remote,) = ctx.cluster.only(role).remotes.iterkeys() ceph_ref = ctx.summary.get('ceph-sha1', 'master') + testdir = teuthology.get_testdir(ctx) log.info('Running tests for %s...', role) remote.run( args=[ run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), - run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'), - run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'), - run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'), + run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)), + run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)), + run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), - run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.7/dist-packages:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'), - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/virtualenv/bin/cram', + run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/virtualenv/bin/cram'.format(tdir=testdir), '-v', '--', - run.Raw('/tmp/cephtest/archive/cram.{role}/*.t'.format(role=role)), + run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)), ], logger=log.getChild(role), ) diff --git a/teuthology/task/die_on_err.py b/teuthology/task/die_on_err.py index cfbda38cc4613..b7ee269213780 100644 --- a/teuthology/task/die_on_err.py +++ b/teuthology/task/die_on_err.py @@ -11,7 +11,7 @@ log = logging.getLogger(__name__) @contextlib.contextmanager def task(ctx, config): """ - Die if /tmp/cephtest/err exists or if an OSD dumps core + Die if {testdir}/err exists or if an OSD dumps core """ if config is None: config = {} @@ -31,13 +31,14 @@ def task(ctx, config): while len(manager.get_osd_status()['up']) < num_osds: time.sleep(10) - log_path = '/tmp/cephtest/archive/log' + testdir = teuthology.get_testdir(ctx) + log_path = '{tdir}/archive/log'.format(tdir=testdir) while True: for i in range(num_osds): (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys() p = osd_remote.run( - args = [ 'test', '-e', '/tmp/cephtest/err' ], + args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], wait=True, check_status=False, ) @@ -47,7 +48,7 @@ def task(ctx, config): log.info("osd %d has an error" % i) raise Exception("osd %d error" % i) - log_path = '/tmp/cephtest/archive/log/osd.%d.log' % i + log_path = '%s/archive/log/osd.%d.log' % (testdir, i) p = osd_remote.run( args = [ diff --git a/teuthology/task/divergent_priors.py b/teuthology/task/divergent_priors.py index 5a34f352d9d2b..86a3e472432bf 100644 --- a/teuthology/task/divergent_priors.py +++ b/teuthology/task/divergent_priors.py @@ -7,15 +7,15 @@ import time log = logging.getLogger(__name__) -def rados(remote, cmd, wait=True): +def rados(testdir, remote, cmd, wait=True): log.info("rados %s" % ' '.join(cmd)) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -44,6 +44,7 @@ def task(ctx, config): first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() + testdir = teuthology.get_testdir(ctx) manager = ceph_manager.CephManager( mon, ctx=ctx, @@ -82,7 +83,7 @@ def task(ctx, config): log.info('writing initial objects') # write 1000 objects for i in range(1000): - rados(mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + rados(testdir, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) manager.wait_for_clean() @@ -94,7 +95,7 @@ def task(ctx, config): # write 1 (divergent) object log.info('writing divergent object existing_0') rados( - mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2], + testdir, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2], wait=False) time.sleep(10) mon.run( @@ -124,7 +125,7 @@ def task(ctx, config): # write 1 non-divergent object (ensure that old divergent one is divergent) log.info('writing non-divergent object existing_1') - rados(mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2]) + rados(testdir, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2]) manager.wait_for_recovery() @@ -146,7 +147,7 @@ def task(ctx, config): manager.mark_in_osd(divergent) log.info('wait for peering') - rados(mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + rados(testdir, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) log.info("killing divergent %d", divergent) manager.kill_osd(divergent) @@ -158,7 +159,7 @@ def task(ctx, config): manager.set_config(i, osd_recovery_delay_start=0) log.info('reading existing_0') - exit_status = rados(mon, + exit_status = rados(testdir, mon, ['-p', 'foo', 'get', 'existing_0', '-o', '/tmp/existing']) assert exit_status is 0 diff --git a/teuthology/task/filestore_idempotent.py b/teuthology/task/filestore_idempotent.py index d63e2b01fd6e3..95039b7bd2819 100644 --- a/teuthology/task/filestore_idempotent.py +++ b/teuthology/task/filestore_idempotent.py @@ -28,7 +28,9 @@ def task(ctx, config): client = clients[0]; (remote,) = ctx.cluster.only(client).remotes.iterkeys() - dir = '/tmp/cephtest/data/test.%s' % client + testdir = teuthology.get_testdir(ctx) + + dir = '%s/data/test.%s' % (testdir, client) seed = str(int(random.uniform(1,100))) @@ -53,7 +55,7 @@ def task(ctx, config): args=[ 'cd', dir, run.Raw('&&'), - run.Raw('PATH="/tmp/cephtest/binary/usr/local/bin:$PATH"'), + run.Raw('PATH="{tdir}/binary/usr/local/bin:$PATH"'.format(tdir=testdir)), './run_seed_to_range.sh', seed, '50', '300', ], wait=False, @@ -63,7 +65,7 @@ def task(ctx, config): if result != 0: remote.run( args=[ - 'cp', '-a', dir, '/tmp/cephtest/archive/idempotent_failure', + 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir), ]) raise Exception("./run_seed_to_range.sh errored out") diff --git a/teuthology/task/hadoop.py b/teuthology/task/hadoop.py index 034716c8f75ce..5fb745381b161 100644 --- a/teuthology/task/hadoop.py +++ b/teuthology/task/hadoop.py @@ -40,25 +40,26 @@ def validate_config(ctx, config): ## Add required entries to conf/hadoop-env.sh def write_hadoop_env(ctx, config): - hadoopEnvFile = "/tmp/cephtest/hadoop/conf/hadoop-env.sh" + hadoopEnvFile = "{tdir}/hadoop/conf/hadoop-env.sh".format(tdir=teuthology.get_testdir(ctx)) hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop')) for remote, roles_for_host in hadoopNodes.remotes.iteritems(): teuthology.write_file(remote, hadoopEnvFile, '''export JAVA_HOME=/usr/lib/jvm/default-java -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib:/usr/lib -export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/tmp/cephtest/binary/usr/local/lib/libcephfs.jar:/tmp/cephtest/hadoop/build/hadoop-core*.jar +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib:/usr/lib +export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{tdir}/binary/usr/local/lib/libcephfs.jar:{tdir}/hadoop/build/hadoop-core*.jar export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS" export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS" export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS" export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS" export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS" -''' ) +'''.format(tdir=teuthology.get_testdir(ctx)) ) log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote)) ## Add required entries to conf/core-site.xml def write_core_site(ctx, config): - coreSiteFile = "/tmp/cephtest/hadoop/conf/core-site.xml" + testdir = teuthology.get_testdir(ctx) + coreSiteFile = "{tdir}/hadoop/conf/core-site.xml".format(tdir=testdir) hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop')) for remote, roles_for_host in hadoopNodes.remotes.iteritems(): @@ -85,10 +86,10 @@ def write_core_site(ctx, config): ceph.conf.file - /tmp/cephtest/ceph.conf + {tdir}/ceph.conf -'''.format(default_fs=default_fs_string)) +'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string)) log.info("wrote file: " + coreSiteFile + " to host: " + str(remote)) @@ -101,7 +102,7 @@ def get_hadoop_master_ip(ctx): ## Add required entries to conf/mapred-site.xml def write_mapred_site(ctx): - mapredSiteFile = "/tmp/cephtest/hadoop/conf/mapred-site.xml" + mapredSiteFile = "{tdir}/hadoop/conf/mapred-site.xml".format(tdir=teuthology.get_testdir(ctx)) master_ip = get_hadoop_master_ip(ctx) log.info('adding host {remote} as jobtracker'.format(remote=master_ip)) @@ -124,7 +125,7 @@ def write_mapred_site(ctx): ## Add required entries to conf/hdfs-site.xml def write_hdfs_site(ctx): - hdfsSiteFile = "/tmp/cephtest/hadoop/conf/hdfs-site.xml" + hdfsSiteFile = "{tdir}/hadoop/conf/hdfs-site.xml".format(tdir=teuthology.get_testdir(ctx)) hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop')) for remote, roles_for_host in hadoopNodes.remotes.iteritems(): @@ -146,7 +147,7 @@ def write_hdfs_site(ctx): def write_slaves(ctx): log.info('Setting up slave nodes...') - slavesFile = "/tmp/cephtest/hadoop/conf/slaves" + slavesFile = "{tdir}/hadoop/conf/slaves".format(tdir=teuthology.get_testdir(ctx)) tmpFile = StringIO() slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave')) @@ -164,7 +165,7 @@ def write_slaves(ctx): ## Add required entries to conf/masters ## These nodes host JobTrackers and Namenodes def write_master(ctx): - mastersFile = "/tmp/cephtest/hadoop/conf/masters" + mastersFile = "{tdir}/hadoop/conf/masters".format(tdir=teuthology.get_testdir(ctx)) master = _get_master(ctx) remote, _ = master @@ -200,7 +201,9 @@ def configure_hadoop(ctx, config): master = _get_master(ctx) remote, _ = master remote.run( - args=["/tmp/cephtest/hadoop/bin/hadoop","namenode","-format"], + args=["{tdir}/hadoop/bin/hadoop".format(tdir=teuthology.get_testdir(ctx)), + "namenode", + "-format"], wait=True, ) @@ -222,30 +225,32 @@ def configure_hadoop(ctx, config): ), ) -def _start_hadoop(remote, config): +def _start_hadoop(ctx, remote, config): + testdir = teuthology.get_testdir(ctx) if config.get('hdfs'): remote.run( - args=['/tmp/cephtest/hadoop/bin/start-dfs.sh', ], + args=['{tdir}/hadoop/bin/start-dfs.sh'.format(tdir=testdir), ], wait=True, ) log.info('done starting hdfs') remote.run( - args=['/tmp/cephtest/hadoop/bin/start-mapred.sh', ], + args=['{tdir}/hadoop/bin/start-mapred.sh'.format(tdir=testdir), ], wait=True, ) log.info('done starting mapred') -def _stop_hadoop(remote, config): +def _stop_hadoop(ctx, remote, config): + testdir = teuthology.get_testdir(ctx) remote.run( - args=['/tmp/cephtest/hadoop/bin/stop-mapred.sh', ], + args=['{tdir}/hadoop/bin/stop-mapred.sh'.format(tdir=testdir), ], wait=True, ) if config.get('hdfs'): remote.run( - args=['/tmp/cephtest/hadoop/bin/stop-dfs.sh', ], + args=['{tdir}/hadoop/bin/stop-dfs.sh'.format(tdir=testdir), ], wait=True, ) @@ -263,22 +268,23 @@ def start_hadoop(ctx, config): remote, _ = master log.info('Starting hadoop on {remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0])) - _start_hadoop(remote, config) + _start_hadoop(ctx, remote, config) try: yield finally: log.info('Running stop-mapred.sh on {remote}'.format(remote=remote.ssh.get_transport().getpeername()[0])) - _stop_hadoop(remote, config) + _stop_hadoop(ctx, remote, config) -# download and untar the most recent hadoop binaries into /tmp/cephtest/hadoop -def _download_hadoop_binaries(remote, hadoop_url): +# download and untar the most recent hadoop binaries into {testdir}/hadoop +def _download_hadoop_binaries(ctx, remote, hadoop_url): log.info('_download_hadoop_binaries: path %s' % hadoop_url) fileName = 'hadoop.tgz' + testdir = teuthology.get_testdir(ctx) remote.run( args=[ - 'mkdir', '-p', '-m0755', '/tmp/cephtest/hadoop', + 'mkdir', '-p', '-m0755', '{tdir}/hadoop'.format(tdir=testdir), run.Raw('&&'), 'echo', '{fileName}'.format(fileName=fileName), @@ -290,7 +296,7 @@ def _download_hadoop_binaries(remote, hadoop_url): # need to use --input-file to make wget respect --base '--input-file=-', run.Raw('|'), - 'tar', '-xzf', '-', '-C', '/tmp/cephtest/hadoop', + 'tar', '-xzf', '-', '-C', '{tdir}/hadoop'.format(tdir=testdir), ], ) @@ -320,7 +326,7 @@ def binaries(ctx, config): with parallel() as p: hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop')) for remote in hadoopNodes.remotes.iterkeys(): - p.spawn(_download_hadoop_binaries, remote, hadoop_bindir_url) + p.spawn(_download_hadoop_binaries, ctx, remote, hadoop_bindir_url) try: yield @@ -328,7 +334,7 @@ def binaries(ctx, config): log.info('Removing hadoop binaries...') run.wait( ctx.cluster.run( - args=[ 'rm', '-rf', '--', '/tmp/cephtest/hadoop'], + args=[ 'rm', '-rf', '--', '{tdir}/hadoop'.format(tdir=teuthology.get_testdir(ctx))], wait=False, ), ) @@ -344,7 +350,10 @@ def out_of_safemode(ctx, config): master = _get_master(ctx) remote, _ = master remote.run( - args=["/tmp/cephtest/hadoop/bin/hadoop","dfsadmin","-safemode", "wait"], + args=["{tdir}/hadoop/bin/hadoop".format(tdir=teuthology.get_testdir(ctx)), + "dfsadmin", + "-safemode", + "wait"], wait=True, ) else: @@ -395,11 +404,11 @@ def task(ctx, config): - mkdir -p /tmp/hadoop_input - wget http://ceph.com/qa/hadoop_input_files.tar -O /tmp/hadoop_input/files.tar - cd /tmp/hadoop_input/; tar -xf /tmp/hadoop_input/files.tar - - /tmp/cephtest/hadoop/bin/hadoop fs -mkdir wordcount_input - - /tmp/cephtest/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/ - - /tmp/cephtest/hadoop/bin/hadoop jar /tmp/cephtest/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output + - {tdir}/hadoop/bin/hadoop fs -mkdir wordcount_input + - {tdir}/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/ + - {tdir}/hadoop/bin/hadoop jar {tdir}/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output - rm -rf /tmp/hadoop_input - """ + """.format(tdir=teuthology.get_testdir(ctx)) dist = 'precise' format = 'jar' arch = 'x86_64' diff --git a/teuthology/task/internal.py b/teuthology/task/internal.py index 9b5136f3eefb1..323b8c12bd4c5 100644 --- a/teuthology/task/internal.py +++ b/teuthology/task/internal.py @@ -15,15 +15,29 @@ log = logging.getLogger(__name__) @contextlib.contextmanager def base(ctx, config): log.info('Creating base directory...') + test_basedir = teuthology.get_testdir_base(ctx) + testdir = teuthology.get_testdir(ctx) + # make base dir if it doesn't exist run.wait( ctx.cluster.run( args=[ - 'mkdir', '-m0755', '--', - '/tmp/cephtest', + 'mkdir', '-m0755', '-p', '--', + test_basedir, ], - wait=False, - ) + wait=False, + ) ) + # only create testdir if its not set to basedir + if test_basedir != testdir: + run.wait( + ctx.cluster.run( + args=[ + 'mkdir', '-m0755', '--', + testdir, + ], + wait=False, + ) + ) try: yield @@ -36,7 +50,7 @@ def base(ctx, config): args=[ 'rmdir', '--', - '/tmp/cephtest', + testdir, ], wait=False, ), @@ -154,9 +168,36 @@ def connect(ctx, config): def check_conflict(ctx, config): log.info('Checking for old test directory...') + test_basedir = teuthology.get_testdir_base(ctx) processes = ctx.cluster.run( args=[ - 'test', '!', '-e', '/tmp/cephtest', + 'test', '!', '-e', test_basedir, + ], + wait=False, + ) + for proc in processes: + assert isinstance(proc.exitstatus, gevent.event.AsyncResult) + try: + proc.exitstatus.get() + except run.CommandFailedError: + # base dir exists + r = proc.remote.run( + args=[ + 'ls', test_basedir, run.Raw('|'), 'wc', '-l' + ], + stdout=StringIO(), + ) + + if int(r.stdout.getvalue()) > 0: + log.error('WARNING: Host %s has stale test directories, these need to be investigated and cleaned up!', + proc.remote.shortname) + + # testdir might be the same as base dir (if test_path is set) + # need to bail out in that case if the testdir exists + testdir = teuthology.get_testdir(ctx) + processes = ctx.cluster.run( + args=[ + 'test', '!', '-e', testdir, ], wait=False, ) @@ -166,7 +207,7 @@ def check_conflict(ctx, config): try: proc.exitstatus.get() except run.CommandFailedError: - log.error('Host %s has stale cephtest directory, check your lock and reboot to clean up.', proc.remote.shortname) + log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir) failed = True if failed: raise RuntimeError('Stale jobs detected, aborting.') @@ -174,11 +215,12 @@ def check_conflict(ctx, config): @contextlib.contextmanager def archive(ctx, config): log.info('Creating archive directory...') + testdir = teuthology.get_testdir(ctx) + archive_dir = '{tdir}/archive'.format(tdir=testdir) run.wait( ctx.cluster.run( args=[ - 'install', '-d', '-m0755', '--', - '/tmp/cephtest/archive', + 'install', '-d', '-m0755', '--', archive_dir, ], wait=False, ) @@ -193,7 +235,7 @@ def archive(ctx, config): os.mkdir(logdir) for remote in ctx.cluster.remotes.iterkeys(): path = os.path.join(logdir, remote.shortname) - teuthology.pull_directory(remote, '/tmp/cephtest/archive', path) + teuthology.pull_directory(remote, archive_dir, path) log.info('Removing archive directory...') run.wait( @@ -202,7 +244,7 @@ def archive(ctx, config): 'rm', '-rf', '--', - '/tmp/cephtest/archive', + archive_dir, ], wait=False, ), @@ -211,13 +253,14 @@ def archive(ctx, config): @contextlib.contextmanager def coredump(ctx, config): log.info('Enabling coredump saving...') + archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx)) run.wait( ctx.cluster.run( args=[ 'install', '-d', '-m0755', '--', - '/tmp/cephtest/archive/coredump', + '{adir}/coredump'.format(adir=archive_dir), run.Raw('&&'), - 'sudo', 'sysctl', '-w', 'kernel.core_pattern=/tmp/cephtest/archive/coredump/%t.%p.core', + 'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir), ], wait=False, ) @@ -235,7 +278,7 @@ def coredump(ctx, config): 'rmdir', '--ignore-fail-on-non-empty', '--', - '/tmp/cephtest/archive/coredump', + '{adir}/coredump'.format(adir=archive_dir), ], wait=False, ) @@ -246,7 +289,7 @@ def coredump(ctx, config): for remote in ctx.cluster.remotes.iterkeys(): r = remote.run( args=[ - 'if', 'test', '!', '-e', '/tmp/cephtest/archive/coredump', run.Raw(';'), 'then', + 'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then', 'echo', 'OK', run.Raw(';'), 'fi', ], @@ -268,11 +311,12 @@ def syslog(ctx, config): log.info('Starting syslog monitoring...') + archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx)) run.wait( ctx.cluster.run( args=[ 'mkdir', '-m0755', '--', - '/tmp/cephtest/archive/syslog', + '{adir}/syslog'.format(adir=archive_dir), ], wait=False, ) @@ -280,9 +324,9 @@ def syslog(ctx, config): CONF = '/etc/rsyslog.d/80-cephtest.conf' conf_fp = StringIO(""" -kern.* -/tmp/cephtest/archive/syslog/kern.log;RSYSLOG_FileFormat -*.*;kern.none -/tmp/cephtest/archive/syslog/misc.log;RSYSLOG_FileFormat -""") +kern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat +*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat +""".format(adir=archive_dir)) try: for rem in ctx.cluster.remotes.iterkeys(): teuthology.sudo_write_file( @@ -336,7 +380,7 @@ kern.* -/tmp/cephtest/archive/syslog/kern.log;RSYSLOG_FileFormat args=[ 'egrep', '\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b', - run.Raw('/tmp/cephtest/archive/syslog/*.log'), + run.Raw('{adir}/archive/syslog/*.log'.format(adir=archive_dir)), run.Raw('|'), 'grep', '-v', 'task .* blocked for more than .* seconds', run.Raw('|'), @@ -377,7 +421,7 @@ kern.* -/tmp/cephtest/archive/syslog/kern.log;RSYSLOG_FileFormat ctx.cluster.run( args=[ 'find', - '/tmp/cephtest/archive/syslog', + '{adir}/archive/syslog'.format(adir=archive_dir), '-name', '*.log', '-print0', diff --git a/teuthology/task/kclient.py b/teuthology/task/kclient.py index 7ef67347bfaa2..867ee1e61f077 100644 --- a/teuthology/task/kclient.py +++ b/teuthology/task/kclient.py @@ -40,8 +40,10 @@ def task(ctx, config): for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) + testdir = teuthology.get_testdir(ctx) + for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( id=id_, remote=remote, mnt=mnt)) @@ -51,8 +53,8 @@ def task(ctx, config): ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)] mons = teuthology.get_mons(roles, ips).values() - secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_) - teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret) + secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_) + teuthology.write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), secret) remote.run( args=[ @@ -65,10 +67,10 @@ def task(ctx, config): remote.run( args=[ 'sudo', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/sbin/mount.ceph', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/sbin/mount.ceph'.format(tdir=testdir), '{mons}:/'.format(mons=','.join(mons)), mnt, '-v', @@ -84,7 +86,7 @@ def task(ctx, config): log.info('Unmounting kernel clients...') for id_, remote in clients: log.debug('Unmounting client client.{id}...'.format(id=id_)) - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'sudo', diff --git a/teuthology/task/kcon_most.py b/teuthology/task/kcon_most.py index 5836f2faa9ca4..f9817313c867e 100644 --- a/teuthology/task/kcon_most.py +++ b/teuthology/task/kcon_most.py @@ -35,6 +35,8 @@ def task(ctx, config): for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] clients = list(teuthology.get_clients(ctx=ctx, roles=config)) + testdir = teuthology.get_testdir(ctx) + for id_, remote in clients: # TODO: Don't have to run this more than once per node (remote) log.info('Enable logging on client.{id} at {remote} ...'.format( @@ -42,7 +44,7 @@ def task(ctx, config): remote.run( args=[ 'sudo', - '/tmp/cephtest/kcon_most', + '{tdir}/kcon_most'.format(tdir=testdir), 'on' ], ) @@ -56,7 +58,7 @@ def task(ctx, config): remote.run( args=[ 'sudo', - '/tmp/cephtest/kcon_most', + '{tdir}/kcon_most'.format(tdir=testdir), 'off' ], ) diff --git a/teuthology/task/knfsd.py b/teuthology/task/knfsd.py index 0f4764ca013b6..dc46dac1c08ac 100644 --- a/teuthology/task/knfsd.py +++ b/teuthology/task/knfsd.py @@ -63,7 +63,7 @@ def task(ctx, config): clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} @@ -127,7 +127,7 @@ def task(ctx, config): log.info('Unexporting nfs server...') for id_, remote in clients: log.debug('Unexporting client client.{id}...'.format(id=id_)) - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'sudo', diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index ea988bcafa309..9e4d40b1f1d9a 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -2,6 +2,7 @@ import logging import os from ..orchestra import run +from teuthology import misc as teuthology import time import gevent @@ -66,6 +67,7 @@ def task(ctx, config): if badconfig: raise KeyError("bad config {op_}".format(op_=op)) + testdir = teuthology.get_testdir(ctx) clients = set(clients) files = set(files) lock_procs = list() @@ -73,22 +75,22 @@ def task(ctx, config): (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() log.info("got a client remote") (_, _, client_id) = client.partition('.') - filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"]) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) proc = client_remote.run( args=[ - 'mkdir', '-p', '/tmp/cephtest/archive/lockfile', + 'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir), run.Raw('&&'), - 'mkdir', '-p', '/tmp/cephtest/lockfile', + 'mkdir', '-p', '{tdir}/lockfile'.format(tdir=testdir), run.Raw('&&'), 'wget', '-nv', '--no-check-certificate', 'https://raw.github.com/gregsfortytwo/FileLocker/master/sclockandhold.cpp', - '-O', '/tmp/cephtest/lockfile/sclockandhold.cpp', + '-O', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir), run.Raw('&&'), - 'g++', '/tmp/cephtest/lockfile/sclockandhold.cpp', - '-o', '/tmp/cephtest/lockfile/sclockandhold' + 'g++', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir), + '-o', '{tdir}/lockfile/sclockandhold'.format(tdir=testdir) ], logger=log.getChild('lockfile_client.{id}'.format(id=client_id)), wait=False @@ -107,7 +109,7 @@ def task(ctx, config): (_, _, client_id) = client.partition('.') file_procs = list() for lockfile in files: - filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), lockfile) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile) proc = client_remote.run( args=[ 'sudo', @@ -121,7 +123,7 @@ def task(ctx, config): run.wait(file_procs) file_procs = list() for lockfile in files: - filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), lockfile) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile) proc = client_remote.run( args=[ 'sudo', 'chown', 'ubuntu.ubuntu', filepath @@ -162,10 +164,10 @@ def task(ctx, config): for client in clients: (client_remote,) = ctx.cluster.only(client).remotes.iterkeys() (_, _, client_id) = client.partition('.') - filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"]) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) proc = client_remote.run( args=[ - 'rm', '-rf', '/tmp/cephtest/lockfile', + 'rm', '-rf', '{tdir}/lockfile'.format(tdir=testdir), run.Raw(';'), 'sudo', 'rm', '-rf', filepath ], @@ -181,7 +183,8 @@ def lock_one(op, ctx): result = None (client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys() (_, _, client_id) = op['client'].partition('.') - filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"]) + testdir = teuthology.get_testdir(ctx) + filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"]) if "maxwait" in op: timeout = gevent.Timeout(seconds=float(op["maxwait"])) @@ -189,12 +192,12 @@ def lock_one(op, ctx): try: proc = client_remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/daemon-helper', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), 'kill', - '/tmp/cephtest/lockfile/sclockandhold', + '{tdir}/lockfile/sclockandhold'.format(tdir=testdir), filepath, '{holdtime}'.format(holdtime=op["holdtime"]), '{offset}'.format(offset=op.get("offset", '0')), diff --git a/teuthology/task/locktest.py b/teuthology/task/locktest.py index 724f0a85aac68..e5f479338cea6 100755 --- a/teuthology/task/locktest.py +++ b/teuthology/task/locktest.py @@ -1,6 +1,7 @@ import logging from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -29,8 +30,9 @@ def task(ctx, config): (client,) = ctx.cluster.only(config[1]).remotes ( _, _, host_id) = config[0].partition('.') ( _, _, client_id) = config[1].partition('.') - hostmnt = '/tmp/cephtest/mnt.{id}'.format(id=host_id) - clientmnt = '/tmp/cephtest/mnt.{id}'.format(id=client_id) + testdir = teuthology.get_testdir(ctx) + hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id) + clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id) try: for client_name in config: @@ -39,17 +41,17 @@ def task(ctx, config): args=[ # explicitly does not support multiple autotest tasks # in a single run; the result archival would conflict - 'mkdir', '/tmp/cephtest/archive/locktest', + 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir), run.Raw('&&'), - 'mkdir', '/tmp/cephtest/locktest', + 'mkdir', '{tdir}/locktest'.format(tdir=testdir), run.Raw('&&'), 'wget', '-nv', 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c', - '-O', '/tmp/cephtest/locktest/locktest.c', + '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir), run.Raw('&&'), - 'g++', '/tmp/cephtest/locktest/locktest.c', - '-o', '/tmp/cephtest/locktest/locktest' + 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + '-o', '{tdir}/locktest/locktest'.format(tdir=testdir) ], logger=log.getChild('locktest_client.{id}'.format(id=client_name)), ) @@ -67,7 +69,7 @@ def task(ctx, config): log.info('starting on host') hostproc = host.run( args=[ - '/tmp/cephtest/locktest/locktest', + '{tdir}/locktest/locktest'.format(tdir=testdir), '-p', '6788', '-d', '{mnt}/locktestfile'.format(mnt=hostmnt), @@ -79,7 +81,7 @@ def task(ctx, config): (_,_,hostaddr) = host.name.partition('@') clientproc = client.run( args=[ - '/tmp/cephtest/locktest/locktest', + '{tdir}/locktest/locktest'.format(tdir=testdir), '-p', '6788', '-d', '-h', hostaddr, @@ -100,26 +102,26 @@ def task(ctx, config): log.info('cleaning up host dir') host.run( args=[ - 'mkdir', '-p', '/tmp/cephtest/locktest', + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), run.Raw('&&'), - 'rm', '-f', '/tmp/cephtest/locktest/locktest.c', + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), run.Raw('&&'), - 'rm', '-f', '/tmp/cephtest/locktest/locktest', + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), run.Raw('&&'), - 'rmdir', '/tmp/cephtest/locktest' + 'rmdir', '{tdir}/locktest' ], logger=log.getChild('.{id}'.format(id=config[0])), ) log.info('cleaning up client dir') client.run( args=[ - 'mkdir', '-p', '/tmp/cephtest/locktest', + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), run.Raw('&&'), - 'rm', '-f', '/tmp/cephtest/locktest/locktest.c', + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), run.Raw('&&'), - 'rm', '-f', '/tmp/cephtest/locktest/locktest', + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), run.Raw('&&'), - 'rmdir', '/tmp/cephtest/locktest' + 'rmdir', '{tdir}/locktest'.format(tdir=testdir) ], logger=log.getChild('.{id}'.format(\ id=config[1])), diff --git a/teuthology/task/lost_unfound.py b/teuthology/task/lost_unfound.py index 09932e451a2d3..68baad945fd97 100644 --- a/teuthology/task/lost_unfound.py +++ b/teuthology/task/lost_unfound.py @@ -6,15 +6,16 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -def rados(remote, cmd): +def rados(ctx, remote, cmd): + testdir = teuthology.get_testdir(ctx) log.info("rados %s" % ' '.join(cmd)) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -56,7 +57,7 @@ def task(ctx, config): manager.mark_out_osd(2) # kludge to make sure they get a map - rados(mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') @@ -64,9 +65,9 @@ def task(ctx, config): # create old objects for f in range(1, 10): - rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) - rados(mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(mon, ['-p', 'data', 'rm', 'existed_%d' % f]) + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f]) # delay recovery, and make the pg log very long (to prevent backfill) manager.raw_cluster_cmd( @@ -79,9 +80,9 @@ def task(ctx, config): manager.mark_down_osd(0) for f in range(1, 10): - rados(mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) - rados(mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) - rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) # bring osd.0 back up, let it peer, but don't replicate the new # objects... @@ -152,11 +153,11 @@ def task(ctx, config): # verify result for f in range(1, 10): - err = rados(mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) + err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-']) assert err - err = rados(mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) + err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-']) assert err - err = rados(mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) + err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-']) assert not err # see if osd.1 can cope diff --git a/teuthology/task/manypools.py b/teuthology/task/manypools.py index 9a69094b889cd..c8a010b8eaf43 100644 --- a/teuthology/task/manypools.py +++ b/teuthology/task/manypools.py @@ -28,6 +28,8 @@ def task(ctx, config): time: 360 """ + testdir = teuthology.get_testdir(ctx) + log.info('creating {n} pools'.format(n=config)) poolnum = int(config) @@ -51,17 +53,17 @@ def task(ctx, config): log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_)) proc = remote.run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role_), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), + '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_), '--name', role_, 'mkpool', 'pool{num}'.format(num=poolnum), '-1', run.Raw('&&'), - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role_), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), + '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_), '--name', role_, '--pool', 'pool{num}'.format(num=poolnum), 'bench', '0', 'write', '-t', '16', '--block-size', '1' diff --git a/teuthology/task/mpi.py b/teuthology/task/mpi.py index 67d444f935a1d..b6e760c4d94d6 100644 --- a/teuthology/task/mpi.py +++ b/teuthology/task/mpi.py @@ -43,14 +43,14 @@ def task(ctx, config): - ceph-fuse: - pexec: clients: - - ln -s /tmp/cephtest/mnt.* /tmp/cephtest/gmnt + - ln -s {testdir}/mnt.* {testdir}/gmnt - ssh_keys: - mpi: exec: fsx-mpi - workdir: /tmp/cephtest/gmnt + workdir: {testdir}/gmnt - pexec: clients: - - rm -f /tmp/cephtest/gmnt + - rm -f {testdir}/gmnt """ assert isinstance(config, dict), 'task mpi got invalid config' @@ -89,13 +89,17 @@ def task(ctx, config): log.info('mpi rank 0 is: {name}'.format(name=master_remote.name)) + testdir = teuthology.get_testdir(ctx) + # write out the mpi hosts file log.info('mpi nodes: [%s]' % (', '.join(hosts))) - teuthology.write_file(remote=master_remote, path='/tmp/cephtest/mpi-hosts', data='\n'.join(hosts)) + teuthology.write_file(remote=master_remote, + path='{tdir}/mpi-hosts'.format(tdir=testdir), + data='\n'.join(hosts)) log.info('mpiexec on {name}: {cmd}'.format(name=master_remote.name, cmd=mpiexec)) - args=['mpiexec', '-f', '/tmp/cephtest/mpi-hosts'] + args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)] args.extend(workdir) args.extend(mpiexec.split(' ')) master_remote.run(args=args, ) log.info('mpi task completed') - master_remote.run(args=['rm', '/tmp/cephtest/mpi-hosts']) + master_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)]) diff --git a/teuthology/task/nfs.py b/teuthology/task/nfs.py index ed92f5f88ff9b..52123f577a7d4 100644 --- a/teuthology/task/nfs.py +++ b/teuthology/task/nfs.py @@ -50,8 +50,9 @@ def task(ctx, config): clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + testdir = teuthology.get_testdir(ctx) for id_, remote in clients: - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) client_config = config.get("client.%s" % id_) if client_config is None: client_config = {} @@ -61,7 +62,7 @@ def task(ctx, config): server = client_config.get('server'); svr_id = server[len('client.'):] - svr_mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=svr_id)) + svr_mnt = os.path.join(testdir, 'mnt.{id}'.format(id=svr_id)) svr_remote = None all_config = ['client.{id}'.format(id=tmpid) @@ -109,7 +110,7 @@ def task(ctx, config): log.info('Unmounting nfs clients...') for id_, remote in clients: log.debug('Unmounting nfs client client.{id}...'.format(id=id_)) - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) remote.run( args=[ 'sudo', diff --git a/teuthology/task/object_source_down.py b/teuthology/task/object_source_down.py index dda6f04f927e6..e138e9bc58998 100644 --- a/teuthology/task/object_source_down.py +++ b/teuthology/task/object_source_down.py @@ -6,15 +6,15 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -def rados(remote, cmd): +def rados(testdir, remote, cmd): log.info("rados %s" % ' '.join(cmd)) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -77,12 +77,14 @@ def task(ctx, config): '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' ) + testdir = teuthology.get_testdir(ctx) + # kludge to make sure they get a map - rados(mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + rados(testdir, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) # create old objects for f in range(1, 10): - rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + rados(testdir, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) manager.mark_out_osd(3) manager.wait_till_active() diff --git a/teuthology/task/omapbench.py b/teuthology/task/omapbench.py index e01d42ff924a7..3d69c7ecdf3f2 100644 --- a/teuthology/task/omapbench.py +++ b/teuthology/task/omapbench.py @@ -2,6 +2,7 @@ import contextlib import logging from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -41,6 +42,7 @@ def task(ctx, config): assert isinstance(config, dict), \ "please list clients to run on" omapbench = {} + testdir = teuthology.get_testdir(ctx) print(str(config.get('increment',-1))) for role in config.get('clients', ['client.0']): assert isinstance(role, basestring) @@ -51,13 +53,13 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/omapbench', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role), + " ".join(['CEPH_CONF={tdir}/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib', + '{tdir}/enable-coredump', + '{tdir}/binary/usr/local/bin/ceph-coverage', + '{tdir}/archive/coverage', + '{tdir}/binary/usr/local/bin/omapbench', + '-k', '{tdir}/data/{role}.keyring'.format(role=role), '--name', role[len(PREFIX):], '-t', str(config.get('threads', 30)), '-o', str(config.get('objects', 1000)), @@ -66,7 +68,7 @@ def task(ctx, config): '--valsize', str(config.get('valsize',1000)), '--inc', str(config.get('increment',10)), '--omaptype', str(config.get('omaptype','uniform')) - ]), + ]).format(tdir=testdir), ], logger=log.getChild('omapbench.{id}'.format(id=id_)), stdin=run.PIPE, diff --git a/teuthology/task/osd_backfill.py b/teuthology/task/osd_backfill.py index 5710757763692..7849ad5bd1316 100644 --- a/teuthology/task/osd_backfill.py +++ b/teuthology/task/osd_backfill.py @@ -7,15 +7,16 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -def rados_start(remote, cmd): +def rados_start(ctx, remote, cmd): log.info("rados %s" % ' '.join(cmd)) + testdir = teuthology.get_testdir(ctx) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -53,7 +54,7 @@ def task(ctx, config): manager.wait_for_clean() # write some data - p = rados_start(mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', + p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', '--no-cleanup']) err = p.exitstatus.get(); log.info('err is %d' % err) @@ -71,7 +72,7 @@ def task(ctx, config): manager.wait_for_recovery() # write some new data - p = rados_start(mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096', + p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096', '--no-cleanup']) time.sleep(15) diff --git a/teuthology/task/osd_recovery.py b/teuthology/task/osd_recovery.py index a495c600fbc6f..7d2ee01f24cdc 100644 --- a/teuthology/task/osd_recovery.py +++ b/teuthology/task/osd_recovery.py @@ -7,15 +7,15 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -def rados_start(remote, cmd): +def rados_start(testdir, remote, cmd): log.info("rados %s" % ' '.join(cmd)) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -32,6 +32,7 @@ def task(ctx, config): config = {} assert isinstance(config, dict), \ 'task only accepts a dict for configuration' + testdir = teuthology.get_testdir(ctx) first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() @@ -63,7 +64,7 @@ def task(ctx, config): manager.raw_cluster_cmd('osd', 'unset', 'nodown') # write some new data - p = rados_start(mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096', + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096', '--no-cleanup']) time.sleep(15) @@ -86,7 +87,7 @@ def task(ctx, config): manager.wait_for_active_or_down() # write some more (make sure osd.2 really is divergent) - p = rados_start(mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) p.exitstatus.get(); # revive divergent osd @@ -108,6 +109,7 @@ def test_incomplete_pgs(ctx, config): """ Test handling of incomplete pgs. Requires 4 osds. """ + testdir = teuthology.get_testdir(ctx) if config is None: config = {} assert isinstance(config, dict), \ @@ -156,7 +158,7 @@ def test_incomplete_pgs(ctx, config): # few objects in metadata pool (with pg log, normal recovery) for f in range(1, 20): - p = rados_start(mon, ['-p', 'metadata', 'put', + p = rados_start(testdir, mon, ['-p', 'metadata', 'put', 'foo.%d' % f, '/etc/passwd']) p.exitstatus.get() diff --git a/teuthology/task/peer.py b/teuthology/task/peer.py index 0d1cf2ee20219..a5b901d943361 100644 --- a/teuthology/task/peer.py +++ b/teuthology/task/peer.py @@ -7,15 +7,16 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) -def rados(remote, cmd): +def rados(ctx, remote, cmd): + testdir = teuthology.get_testdir(ctx) log.info("rados %s" % ' '.join(cmd)) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), ]; pre.extend(cmd) proc = remote.run( @@ -58,7 +59,7 @@ def task(ctx, config): manager.mark_down_osd(2) # kludge to make sure they get a map - rados(mon, ['-p', 'data', 'get', 'dummy', '-']) + rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-']) manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats') manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats') diff --git a/teuthology/task/pexec.py b/teuthology/task/pexec.py index de0fb519c0347..0cb520149653f 100644 --- a/teuthology/task/pexec.py +++ b/teuthology/task/pexec.py @@ -89,14 +89,14 @@ def task(ctx, config): tasks: - pexec: all: - - grep FAIL /tmp/cephtest/archive/log/* + - grep FAIL {testdir}/archive/log/* Or if you want to run in parallel on all clients: tasks: - pexec: clients: - - dd if=/dev/zero of=/tmp/cephtest/mnt.* count=1024 bs=1024 + - dd if=/dev/zero of={testdir}/mnt.* count=1024 bs=1024 You can also ensure that parallel commands are synchronized with the special 'barrier' statement: @@ -104,7 +104,7 @@ def task(ctx, config): tasks: - pexec: clients: - - cd /tmp/cephtest/mnt.* + - cd {testdir}/mnt.* - while true; do - barrier - dd if=/dev/zero of=./foo count=1024 bs=1024 diff --git a/teuthology/task/qemu.py b/teuthology/task/qemu.py index 8a119aae23dd2..1d0def9f0dd1c 100644 --- a/teuthology/task/qemu.py +++ b/teuthology/task/qemu.py @@ -17,14 +17,15 @@ DEFAULT_MEM = 1024 # in megabytes @contextlib.contextmanager def create_dirs(ctx, config): + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ 'install', '-d', '-m0755', '--', - '/tmp/cephtest/qemu', - '/tmp/cephtest/archive/qemu', + '{tdir}/qemu'.format(tdir=testdir), + '{tdir}/archive/qemu'.format(tdir=testdir), ] ) try: @@ -35,18 +36,19 @@ def create_dirs(ctx, config): (remote,) = ctx.cluster.only(client).remotes.keys() remote.run( args=[ - 'rmdir', '/tmp/cephtest/qemu', run.Raw('||'), 'true', + 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true', ] ) @contextlib.contextmanager def generate_iso(ctx, config): log.info('generating iso...') + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' src_dir = os.path.dirname(__file__) - userdata_path = os.path.join('/tmp/cephtest/qemu', 'userdata.' + client) - metadata_path = os.path.join('/tmp/cephtest/qemu', 'metadata.' + client) + userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) + metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f: test_setup = ''.join(f.readlines()) @@ -81,7 +83,7 @@ def generate_iso(ctx, config): with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: teuthology.write_file(remote, metadata_path, f) - test_file = '/tmp/cephtest/qemu/{client}.test.sh'.format(client=client) + test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) remote.run( args=[ 'wget', '-nv', '-O', test_file, @@ -94,7 +96,7 @@ def generate_iso(ctx, config): args=[ 'genisoimage', '-quiet', '-input-charset', 'utf-8', '-volid', 'cidata', '-joliet', '-rock', - '-o', '/tmp/cephtest/qemu/{client}.iso'.format(client=client), + '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), '-graft-points', 'user-data={userdata}'.format(userdata=userdata_path), 'meta-data={metadata}'.format(metadata=metadata_path), @@ -109,19 +111,20 @@ def generate_iso(ctx, config): remote.run( args=[ 'rm', '-f', - '/tmp/cephtest/qemu/{client}.iso'.format(client=client), - os.path.join('/tmp/cephtest/qemu', 'userdata.' + client), - os.path.join('/tmp/cephtest/qemu', 'metadata.' + client), - '/tmp/cephtest/qemu/{client}.test.sh'.format(client=client), + '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + os.path.join(testdir, 'qemu', 'userdata.' + client), + os.path.join(testdir, 'qemu', 'metadata.' + client), + '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client), ], ) @contextlib.contextmanager def download_image(ctx, config): log.info('downloading base image') + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): (remote,) = ctx.cluster.only(client).remotes.keys() - base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format(client=client) + base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) remote.run( args=[ 'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL, @@ -132,7 +135,8 @@ def download_image(ctx, config): finally: log.debug('cleaning up base image files') for client in config.iterkeys(): - base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format( + base_file = '{tdir}/qemu/base.{client}.qcow2'.format( + tdir=testdir, client=client, ) (remote,) = ctx.cluster.only(client).remotes.keys() @@ -145,22 +149,23 @@ def download_image(ctx, config): @contextlib.contextmanager def run_qemu(ctx, config): procs = [] + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): (remote,) = ctx.cluster.only(client).remotes.keys() - log_dir = '/tmp/cephtest/archive/qemu/{client}'.format(client=client) + log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) remote.run( args=[ 'mkdir', log_dir, ] ) - base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format(client=client) + base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) args=[ - run.Raw('LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib'), - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/daemon-helper', + run.Raw('LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir)), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), 'term', 'kvm', '-enable-kvm', '-nographic', '-m', str(client_config.get('memory', DEFAULT_MEM)), @@ -168,7 +173,7 @@ def run_qemu(ctx, config): '-drive', 'file={base},format=qcow2,if=virtio'.format(base=base_file), # cd holding metadata for cloud-init - '-cdrom', '/tmp/cephtest/qemu/{client}.iso'.format(client=client), + '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), # virtio 9p fs for logging '-fsdev', 'local,id=log,path={log},security_model=none'.format(log=log_dir), @@ -190,7 +195,7 @@ def run_qemu(ctx, config): args.extend([ '-drive', 'file=rbd:rbd/{img}:conf={conf}:id={id},format=rbd,if=virtio,cache={cachemode}'.format( - conf='/tmp/cephtest/ceph.conf', + conf='{tdir}/ceph.conf'.format(tdir=testdir), img='{client}.{num}'.format(client=client, num=i), id=client[len('client.'):], cachemode=cachemode, @@ -219,7 +224,8 @@ def run_qemu(ctx, config): remote.run( args=[ 'test', '-f', - '/tmp/cephtest/archive/qemu/{client}/success'.format( + '{tdir}/archive/qemu/{client}/success'.format( + tdir=testdir, client=client ), ], diff --git a/teuthology/task/rados.py b/teuthology/task/rados.py index e43701a3adde7..277f0e39729f5 100644 --- a/teuthology/task/rados.py +++ b/teuthology/task/rados.py @@ -2,6 +2,7 @@ import contextlib import logging from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -50,13 +51,14 @@ def task(ctx, config): object_size = int(config.get('object_size', 4000000)) op_weights = config.get('op_weights', {}) + testdir = teuthology.get_testdir(ctx) args = [ - 'CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/testrados', + 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/testrados'.format(tdir=testdir), '--op', 'read', str(op_weights.get('read', 100)), '--op', 'write', str(op_weights.get('write', 100)), '--op', 'delete', str(op_weights.get('delete', 10)), diff --git a/teuthology/task/radosbench.py b/teuthology/task/radosbench.py index b1b213a383da9..39caef5463270 100644 --- a/teuthology/task/radosbench.py +++ b/teuthology/task/radosbench.py @@ -2,6 +2,7 @@ import contextlib import logging from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -31,6 +32,8 @@ def task(ctx, config): "please list clients to run on" radosbench = {} + testdir = teuthology.get_testdir(ctx) + for role in config.get('clients', ['client.0']): assert isinstance(role, basestring) PREFIX = 'client.' @@ -42,16 +45,16 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role), + " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib', + '{tdir}/enable-coredump', + '{tdir}/binary/usr/local/bin/ceph-coverage', + '{tdir}/archive/coverage', + '{tdir}/binary/usr/local/bin/rados', + '-c', '{tdir}/ceph.conf', + '-k', '{tdir}/data/{role}.keyring'.format(role=role), '--name', role, 'mkpool', str(config.get('pool', 'data')) - ]), + ]).format(tdir=testdir), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, @@ -62,17 +65,17 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role), + " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib', + '{tdir}/enable-coredump', + '{tdir}/binary/usr/local/bin/ceph-coverage', + '{tdir}/archive/coverage', + '{tdir}/binary/usr/local/bin/rados', + '-c', '{tdir}/ceph.conf', + '-k', '{tdir}/data/{role}.keyring'.format(role=role), '--name', role, '-p' , str(config.get('pool', 'data')), 'bench', str(config.get('time', 360)), 'write', - ]), + ]).format(tdir=testdir), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, @@ -90,16 +93,16 @@ def task(ctx, config): proc = remote.run( args=[ "/bin/sh", "-c", - " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rados', - '-c', '/tmp/cephtest/ceph.conf', - '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role), + " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib', + '{tdir}/enable-coredump', + '{tdir}/binary/usr/local/bin/ceph-coverage', + '{tdir}/archive/coverage', + '{tdir}/binary/usr/local/bin/rados', + '-c', '{tdir}/ceph.conf', + '-k', '{tdir}/data/{role}.keyring'.format(role=role), '--name', role, 'rmpool', str(config.get('pool', 'data')) - ]), + ]).format(tdir=testdir), ], logger=log.getChild('radosbench.{id}'.format(id=id_)), stdin=run.PIPE, diff --git a/teuthology/task/radosgw-admin.py b/teuthology/task/radosgw-admin.py index 6bc4b33b09aad..b1007133009d5 100644 --- a/teuthology/task/radosgw-admin.py +++ b/teuthology/task/radosgw-admin.py @@ -27,13 +27,14 @@ def successful_ops(out): def rgwadmin(ctx, client, cmd): log.info('radosgw-admin: %s' % cmd) + testdir = teuthology.get_testdir(ctx) pre = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/radosgw-admin', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '--log-to-stderr', '--format', 'json', ] diff --git a/teuthology/task/rbd.py b/teuthology/task/rbd.py index 4a58c860dfaa5..4461c1f66b450 100644 --- a/teuthology/task/rbd.py +++ b/teuthology/task/rbd.py @@ -43,6 +43,7 @@ def create_image(ctx, config): else: images = [(role, None) for role in config] + testdir = teuthology.get_testdir(ctx) for role, properties in images: if properties is None: properties = {} @@ -53,12 +54,12 @@ def create_image(ctx, config): log.info('Creating image {name} with size {size}'.format(name=name, size=size)) args = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rbd', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '-p', 'rbd', 'create', '--size', str(size), @@ -80,12 +81,12 @@ def create_image(ctx, config): (remote,) = ctx.cluster.only(role).remotes.keys() remote.run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rbd', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '-p', 'rbd', 'rm', name, @@ -158,6 +159,9 @@ def dev_create(ctx, config): role_images = [(role, None) for role in config] log.info('Creating rbd block devices...') + + testdir = teuthology.get_testdir(ctx) + for role, image in role_images: if image is None: image = default_image_name(role) @@ -167,32 +171,32 @@ def dev_create(ctx, config): remote.run( args=[ 'echo', - 'KERNEL=="rbd[0-9]*", PROGRAM="/tmp/cephtest/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"', + 'KERNEL=="rbd[0-9]*", PROGRAM="{tdir}/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"'.format(tdir=testdir), run.Raw('>'), - '/tmp/cephtest/51-rbd.rules', + '{tdir}/51-rbd.rules'.format(tdir=testdir), ], ) remote.run( args=[ 'sudo', 'mv', - '/tmp/cephtest/51-rbd.rules', + '{tdir}/51-rbd.rules'.format(tdir=testdir), '/etc/udev/rules.d/', ], ) - secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) - teuthology.write_secret_file(remote, role, secretfile) + secretfile = '{tdir}/data/{role}.secret'.format(tdir=testdir, role=role) + teuthology.write_secret_file(ctx, remote, role, secretfile) remote.run( args=[ 'sudo', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rbd', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '--user', role.rsplit('.')[-1], '--secret', secretfile, '-p', 'rbd', @@ -216,12 +220,12 @@ def dev_create(ctx, config): remote.run( args=[ 'sudo', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/rbd', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), '-p', 'rbd', 'unmap', '/dev/rbd/rbd/{imgname}'.format(imgname=image), @@ -311,13 +315,15 @@ def mount(ctx, config): id_ = role[len(PREFIX):] return id_ - mnt_template = '/tmp/cephtest/mnt.{id}' + testdir = teuthology.get_testdir(ctx) + + mnt_template = '{tdir}/mnt.{id}' for role, image in role_images: if image is None: image = default_image_name(role) (remote,) = ctx.cluster.only(role).remotes.keys() id_ = strip_client_prefix(role) - mnt = mnt_template.format(id=id_) + mnt = mnt_template.format(tdir=testdir, id=id_) remote.run( args=[ 'mkdir', @@ -410,6 +416,7 @@ def run_xfstests(ctx, config): yield def run_xfstests_one_client(ctx, role, properties): + testdir = teuthology.get_testdir(ctx) try: count = properties.get('count') test_dev = properties.get('test_dev') @@ -428,7 +435,7 @@ def run_xfstests_one_client(ctx, role, properties): (remote,) = ctx.cluster.only(role).remotes.keys() # Fetch the test script - test_root = '/tmp/cephtest' + test_root = teuthology.get_testdir(ctx) test_script = 'run_xfstests.sh' test_path = os.path.join(test_root, test_script) @@ -453,10 +460,10 @@ def run_xfstests_one_client(ctx, role, properties): # readlink -f in order to get their canonical # pathname (so it matches what the kernel remembers). args = [ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), '/usr/bin/sudo', '/bin/bash', test_path, diff --git a/teuthology/task/rbd_fsx.py b/teuthology/task/rbd_fsx.py index d2973babddbf6..b6cea5b3252a4 100644 --- a/teuthology/task/rbd_fsx.py +++ b/teuthology/task/rbd_fsx.py @@ -2,6 +2,7 @@ import contextlib import logging from teuthology.parallel import parallel +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -37,19 +38,20 @@ def task(ctx, config): yield def _run_one_client(ctx, config, role): + testdir = teuthology.get_testdir(ctx) (remote,) = ctx.cluster.only(role).remotes.iterkeys() remote.run( args=[ - 'CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/test_librbd_fsx', + 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/test_librbd_fsx'.format(tdir=testdir), '-d', '-W', '-R', # mmap doesn't work with rbd '-p', str(config.get('progress_interval', 100)), # show progress - '-P', '/tmp/cephtest/archive', + '-P', '{tdir}/archive'.format(tdir=testdir), '-t', str(config.get('truncbdy',1)), '-l', str(config.get('size', 1073741824)), '-S', str(config.get('seed', 0)), diff --git a/teuthology/task/recovery_bench.py b/teuthology/task/recovery_bench.py index ea4fb765ee3a1..7dd94720782e7 100644 --- a/teuthology/task/recovery_bench.py +++ b/teuthology/task/recovery_bench.py @@ -99,15 +99,17 @@ class RecoveryBencher: osd = str(random.choice(self.osds)) (osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys() + testdir = teuthology.get_testdir(self.ceph_manager.ctx) + # create the objects osd_remote.run( args=[ - 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/smalliobench', + 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir), '--use-prefix', 'recovery_bench', '--init-only', '1', '--num-objects', str(num_objects), @@ -120,12 +122,12 @@ class RecoveryBencher: log.info('non-recovery (baseline)') p = osd_remote.run( args=[ - 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/smalliobench', + 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir), '--use-prefix', 'recovery_bench', '--do-not-init', '1', '--duration', str(duration), @@ -144,12 +146,12 @@ class RecoveryBencher: log.info('recovery active') p = osd_remote.run( args=[ - 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf', - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/smalliobench', + 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir), '--use-prefix', 'recovery_bench', '--do-not-init', '1', '--duration', str(duration), diff --git a/teuthology/task/rgw-logsocket.py b/teuthology/task/rgw-logsocket.py index 0f6b1c7f0ab83..fff5b7b698be3 100644 --- a/teuthology/task/rgw-logsocket.py +++ b/teuthology/task/rgw-logsocket.py @@ -28,16 +28,17 @@ def configure(ctx, config): @contextlib.contextmanager def run_tests(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): client_config['extra_args'] = [ 's3tests.functional.test_s3:test_bucket_list_return_data', ] # args = [ -# 'S3TEST_CONF=/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client), -# '/tmp/cephtest/s3-tests/virtualenv/bin/nosetests', +# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), +# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), # '-w', -# '/tmp/cephtest/s3-tests', +# '{tdir}/s3-tests'.format(tdir=testdir), # '-v', # 's3tests.functional.test_s3:test_bucket_list_return_data', # ] @@ -57,7 +58,7 @@ def run_tests(ctx, config): args = [ 'netcat', '-w', '5', - '-U', '/tmp/cephtest/rgw.opslog.sock', + '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir), ], stdout = netcat_out, ) diff --git a/teuthology/task/rgw.py b/teuthology/task/rgw.py index 04ed675db5f1a..5ed76e33eed42 100644 --- a/teuthology/task/rgw.py +++ b/teuthology/task/rgw.py @@ -12,16 +12,17 @@ log = logging.getLogger(__name__) @contextlib.contextmanager def create_dirs(ctx, config): log.info('Creating apache directories...') + testdir = teuthology.get_testdir(ctx) for client in config.iterkeys(): ctx.cluster.only(client).run( args=[ 'mkdir', '-p', - '/tmp/cephtest/apache/htdocs', - '/tmp/cephtest/apache/tmp', + '{tdir}/apache/htdocs'.format(tdir=testdir), + '{tdir}/apache/tmp'.format(tdir=testdir), run.Raw('&&'), 'mkdir', - '/tmp/cephtest/archive/apache', + '{tdir}/archive/apache'.format(tdir=testdir), ], ) try: @@ -33,13 +34,13 @@ def create_dirs(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/apache/tmp', + '{tdir}/apache/tmp'.format(tdir=testdir), run.Raw('&&'), 'rmdir', - '/tmp/cephtest/apache/htdocs', + '{tdir}/apache/htdocs'.format(tdir=testdir), run.Raw('&&'), 'rmdir', - '/tmp/cephtest/apache', + '{tdir}/apache'.format(tdir=testdir), ], ) @@ -47,30 +48,31 @@ def create_dirs(ctx, config): @contextlib.contextmanager def ship_config(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) log.info('Shipping apache config and rgw.fcgi...') - src = os.path.join(os.path.dirname(__file__), 'apache.conf') + src = os.path.join(os.path.dirname(__file__), 'apache.conf.template') for client in config.iterkeys(): (remote,) = ctx.cluster.only(client).remotes.keys() with file(src, 'rb') as f: teuthology.write_file( remote=remote, - path='/tmp/cephtest/apache/apache.conf', - data=f, + path='{tdir}/apache/apache.conf'.format(tdir=testdir), + data=f.format(testdir=testdir), ) teuthology.write_file( remote=remote, - path='/tmp/cephtest/apache/htdocs/rgw.fcgi', + path='{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir), data="""#!/bin/sh ulimit -c unlimited -export LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib -exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf -""" +export LD_LIBRARY_PATH={tdir}/binary/usr/local/lib +exec {tdir}/binary/usr/local/bin/radosgw -f -c {tdir}/ceph.conf +""".format(tdir=testdir) ) remote.run( args=[ 'chmod', 'a=rx', - '/tmp/cephtest/apache/htdocs/rgw.fcgi', + '{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir), ], ) try: @@ -82,11 +84,11 @@ exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf args=[ 'rm', '-f', - '/tmp/cephtest/apache/apache.conf', + '{tdir}/apache/apache.conf'.format(tdir=testdir), run.Raw('&&'), 'rm', '-f', - '/tmp/cephtest/apache/htdocs/rgw.fcgi', + '{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir), ], ) @@ -94,6 +96,7 @@ exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf @contextlib.contextmanager def start_rgw(ctx, config): log.info('Starting rgw...') + testdir = teuthology.get_testdir(ctx) rgws = {} for client in config.iterkeys(): (remote,) = ctx.cluster.only(client).remotes.iterkeys() @@ -104,22 +107,22 @@ def start_rgw(ctx, config): log.info("rgw %s config is %s", client, client_config) run_cmd=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/daemon-helper', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), 'term', ] run_cmd_tail=[ - '/tmp/cephtest/binary/usr/local/bin/radosgw', - '-c', '/tmp/cephtest/ceph.conf', - '--log-file', '/tmp/cephtest/archive/log/rgw.log', - '--rgw_ops_log_socket_path', '/tmp/cephtest/rgw.opslog.sock', - '/tmp/cephtest/apache/apache.conf', + '{tdir}/binary/usr/local/bin/radosgw'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), + '--log-file', '{tdir}/archive/log/rgw.log'.format(tdir=testdir), + '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.sock'.format(tdir=testdir), + '{tdir}/apache/apache.conf'.format(tdir=testdir), '--foreground', run.Raw('>'), - '/tmp/cephtest/archive/log/rgw.stdout', + '{tdir}/archive/log/rgw.stdout'.format(tdir=testdir), run.Raw('2>&1'), ] @@ -151,7 +154,7 @@ def start_rgw(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/rgw.opslog.sock', + '{tdir}/rgw.opslog.sock'.format(tdir=testdir), ], ) @@ -161,18 +164,19 @@ def start_rgw(ctx, config): @contextlib.contextmanager def start_apache(ctx, config): log.info('Starting apache...') + testdir = teuthology.get_testdir(ctx) apaches = {} for client in config.iterkeys(): (remote,) = ctx.cluster.only(client).remotes.keys() proc = remote.run( args=[ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/daemon-helper', - 'kill', - 'apache2', - '-X', - '-f', - '/tmp/cephtest/apache/apache.conf', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), + 'kill'.format(tdir=testdir), + 'apache2'.format(tdir=testdir), + '-X'.format(tdir=testdir), + '-f'.format(tdir=testdir), + '{tdir}/apache/apache.conf'.format(tdir=testdir), ], logger=log.getChild(client), stdin=run.PIPE, diff --git a/teuthology/task/s3readwrite.py b/teuthology/task/s3readwrite.py index 5bb222652fa39..c517b225d5286 100644 --- a/teuthology/task/s3readwrite.py +++ b/teuthology/task/s3readwrite.py @@ -25,7 +25,7 @@ def download(ctx, config): 'git', 'clone', # 'https://github.com/ceph/s3-tests.git', 'git://ceph.com/git/s3-tests.git', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), ], ) try: @@ -37,7 +37,7 @@ def download(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), ], ) @@ -52,6 +52,7 @@ def _config_user(s3tests_conf, section, user): def create_users(ctx, config): assert isinstance(config, dict) log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) for client in config['clients']: s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('readwrite', {}) @@ -68,12 +69,12 @@ def create_users(ctx, config): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) ctx.cluster.only(client).run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/radosgw-admin', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'user', 'create', '--uid', s3tests_conf[section]['user_id'], '--display-name', s3tests_conf[section]['display_name'], @@ -111,7 +112,7 @@ def configure(ctx, config): remote.run( args=[ 'cd', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), run.Raw('&&'), './bootstrap', ], @@ -124,7 +125,7 @@ def configure(ctx, config): yaml.safe_dump(conf, conf_fp, default_flow_style=False) teuthology.write_file( remote=remote, - path='/tmp/cephtest/archive/s3readwrite.{client}.config.yaml'.format(client=client), + path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client), data=conf_fp.getvalue(), ) yield @@ -133,11 +134,12 @@ def configure(ctx, config): @contextlib.contextmanager def run_tests(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): (remote,) = ctx.cluster.only(client).remotes.keys() - conf = teuthology.get_file(remote, '/tmp/cephtest/archive/s3readwrite.{client}.config.yaml'.format(client=client)) + conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ - '/tmp/cephtest/s3-tests/virtualenv/bin/s3tests-test-readwrite', + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir), ] if client_config is not None and 'extra_args' in client_config: args.extend(client_config['extra_args']) diff --git a/teuthology/task/s3roundtrip.py b/teuthology/task/s3roundtrip.py index 152b04a789755..10c8b75cdeb72 100644 --- a/teuthology/task/s3roundtrip.py +++ b/teuthology/task/s3roundtrip.py @@ -19,13 +19,14 @@ log = logging.getLogger(__name__) def download(ctx, config): assert isinstance(config, list) log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) for client in config: ctx.cluster.only(client).run( args=[ 'git', 'clone', # 'https://github.com/ceph/s3-tests.git', 'git://ceph.com/git/s3-tests.git', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), ], ) try: @@ -37,7 +38,7 @@ def download(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), ], ) @@ -52,6 +53,7 @@ def _config_user(s3tests_conf, section, user): def create_users(ctx, config): assert isinstance(config, dict) log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) for client in config['clients']: s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('roundtrip', {}) @@ -68,12 +70,12 @@ def create_users(ctx, config): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) ctx.cluster.only(client).run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/radosgw-admin', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'user', 'create', '--uid', s3tests_conf[section]['user_id'], '--display-name', s3tests_conf[section]['display_name'], @@ -89,6 +91,7 @@ def create_users(ctx, config): def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-roundtrip-tests...') + testdir = teuthology.get_testdir(ctx) for client, properties in config['clients'].iteritems(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: @@ -111,7 +114,7 @@ def configure(ctx, config): remote.run( args=[ 'cd', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), run.Raw('&&'), './bootstrap', ], @@ -124,7 +127,7 @@ def configure(ctx, config): yaml.safe_dump(conf, conf_fp, default_flow_style=False) teuthology.write_file( remote=remote, - path='/tmp/cephtest/archive/s3roundtrip.{client}.config.yaml'.format(client=client), + path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client), data=conf_fp.getvalue(), ) yield @@ -133,11 +136,12 @@ def configure(ctx, config): @contextlib.contextmanager def run_tests(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): (remote,) = ctx.cluster.only(client).remotes.keys() - conf = teuthology.get_file(remote, '/tmp/cephtest/archive/s3roundtrip.{client}.config.yaml'.format(client=client)) + conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) args = [ - '/tmp/cephtest/s3-tests/virtualenv/bin/s3tests-test-roundtrip', + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir), ] if client_config is not None and 'extra_args' in client_config: args.extend(client_config['extra_args']) diff --git a/teuthology/task/s3tests.py b/teuthology/task/s3tests.py index 0df77d03eddb9..6f4b984e7b126 100644 --- a/teuthology/task/s3tests.py +++ b/teuthology/task/s3tests.py @@ -18,6 +18,7 @@ log = logging.getLogger(__name__) def do_download(ctx, config): assert isinstance(config, dict) log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) for (client, cconf) in config.items(): branch = cconf.get('branch', 'master') sha1 = cconf.get('sha1') @@ -27,13 +28,13 @@ def do_download(ctx, config): '-b', branch, # 'https://github.com/ceph/s3-tests.git', 'git://ceph.com/git/s3-tests.git', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), ], ) if sha1 is not None: ctx.cluster.only(client).run( args=[ - 'cd', '/tmp/cephtest/s3-tests', + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), run.Raw('&&'), 'git', 'reset', '--hard', sha1, ], @@ -47,7 +48,7 @@ def do_download(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), ], ) @@ -65,6 +66,7 @@ def _config_user(s3tests_conf, section, user): def do_create_users(ctx, config): assert isinstance(config, dict) log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) for client in config['clients']: s3tests_conf = config['s3tests_conf'][client] s3tests_conf.setdefault('fixtures', {}) @@ -73,12 +75,12 @@ def do_create_users(ctx, config): _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) ctx.cluster.only(client).run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/radosgw-admin', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'user', 'create', '--uid', s3tests_conf[section]['user_id'], '--display-name', s3tests_conf[section]['display_name'], @@ -96,6 +98,7 @@ def create_users(ctx, config): def do_configure(ctx, config): assert isinstance(config, dict) log.info('Configuring s3-tests...') + testdir = teuthology.get_testdir(ctx) for client, properties in config['clients'].iteritems(): s3tests_conf = config['s3tests_conf'][client] if properties is not None and 'rgw_server' in properties: @@ -114,7 +117,7 @@ def do_configure(ctx, config): remote.run( args=[ 'cd', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), run.Raw('&&'), './bootstrap', ], @@ -123,7 +126,7 @@ def do_configure(ctx, config): s3tests_conf.write(conf_fp) teuthology.write_file( remote=remote, - path='/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client), + path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), data=conf_fp.getvalue(), ) yield @@ -134,12 +137,13 @@ def configure(ctx, config): def do_run_tests(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): args = [ - 'S3TEST_CONF=/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client), - '/tmp/cephtest/s3-tests/virtualenv/bin/nosetests', + 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), + '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), '-w', - '/tmp/cephtest/s3-tests', + '{tdir}/s3-tests'.format(tdir=testdir), '-v', '-a', '!fails_on_rgw', ] diff --git a/teuthology/task/scrub_test.py b/teuthology/task/scrub_test.py index 3375c56f5d5f5..515c2f43c2289 100644 --- a/teuthology/task/scrub_test.py +++ b/teuthology/task/scrub_test.py @@ -59,7 +59,7 @@ def task(ctx, config): log.info('messing with PG %s on osd %d' % (victim, osd)) (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys() - data_path = os.path.join('/tmp/cephtest/data', + data_path = os.path.join('{tdir}/data'.format(tdir=teuthology.get_testdir(ctx)), 'osd.{id}.data'.format(id=osd), 'current', '{pg}_head'.format(pg=victim) diff --git a/teuthology/task/swift.py b/teuthology/task/swift.py index f76fb5a57fc5f..3518ee163bfc5 100644 --- a/teuthology/task/swift.py +++ b/teuthology/task/swift.py @@ -15,6 +15,7 @@ log = logging.getLogger(__name__) @contextlib.contextmanager def download(ctx, config): + testdir = teuthology.get_testdir(ctx) assert isinstance(config, list) log.info('Downloading swift...') for client in config: @@ -22,7 +23,7 @@ def download(ctx, config): args=[ 'git', 'clone', 'git://ceph.com/git/swift.git', - '/tmp/cephtest/swift', + '{tdir}/swift'.format(tdir=testdir), ], ) try: @@ -34,7 +35,7 @@ def download(ctx, config): args=[ 'rm', '-rf', - '/tmp/cephtest/swift', + '{tdir}/swift'.format(tdir=testdir), ], ) @@ -49,18 +50,19 @@ def _config_user(testswift_conf, account, user, suffix): def create_users(ctx, config): assert isinstance(config, dict) log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) for client in config['clients']: testswift_conf = config['testswift_conf'][client] for user, suffix in [('foo', ''), ('bar', '2')]: _config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix) ctx.cluster.only(client).run( args=[ - 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib', - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', - '/tmp/cephtest/binary/usr/local/bin/radosgw-admin', - '-c', '/tmp/cephtest/ceph.conf', + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir), + '-c', '{tdir}/ceph.conf'.format(tdir=testdir), 'user', 'create', '--subuser', '{account}:{user}'.format(account=testswift_conf['func_test']['account{s}'.format(s=suffix)],user=user), '--display-name', testswift_conf['func_test']['display_name{s}'.format(s=suffix)], @@ -76,6 +78,7 @@ def create_users(ctx, config): def configure(ctx, config): assert isinstance(config, dict) log.info('Configuring testswift...') + testdir = teuthology.get_testdir(ctx) for client, properties in config['clients'].iteritems(): print 'client={c}'.format(c=client) print 'config={c}'.format(c=config) @@ -97,7 +100,7 @@ def configure(ctx, config): remote.run( args=[ 'cd', - '/tmp/cephtest/swift', + '{tdir}/swift'.format(tdir=testdir), run.Raw('&&'), './bootstrap', ], @@ -106,7 +109,7 @@ def configure(ctx, config): testswift_conf.write(conf_fp) teuthology.write_file( remote=remote, - path='/tmp/cephtest/archive/testswift.{client}.conf'.format(client=client), + path='{tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), data=conf_fp.getvalue(), ) yield @@ -115,12 +118,13 @@ def configure(ctx, config): @contextlib.contextmanager def run_tests(ctx, config): assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): args = [ - 'SWIFT_TEST_CONFIG_FILE=/tmp/cephtest/archive/testswift.{client}.conf'.format(client=client), - '/tmp/cephtest/swift/virtualenv/bin/nosetests', + 'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), + '{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir), '-w', - '/tmp/cephtest/swift/test/functional', + '{tdir}/swift/test/functional'.format(tdir=testdir), '-v', '-a', '!fails_on_rgw', ] diff --git a/teuthology/task/watch_notify_stress.py b/teuthology/task/watch_notify_stress.py index e15c731132076..26a4ed7316bcd 100644 --- a/teuthology/task/watch_notify_stress.py +++ b/teuthology/task/watch_notify_stress.py @@ -3,6 +3,7 @@ import logging import proc_thrasher from ..orchestra import run +from teuthology import misc as teuthology log = logging.getLogger(__name__) @@ -30,6 +31,9 @@ def task(ctx, config): testwatch = {} remotes = [] + + testdir = teuthology.get_testdir(ctx) + for role in config.get('clients', ['client.0']): assert isinstance(role, basestring) PREFIX = 'client.' @@ -39,11 +43,11 @@ def task(ctx, config): remotes.append(remote) args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), - 'CEPH_CONF=/tmp/cephtest/ceph.conf', + 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir), 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')), - 'LD_PRELOAD=/tmp/cephtest/binary/usr/local/lib/librados.so.2', - '/tmp/cephtest/daemon-helper', 'kill', - '/tmp/cephtest/binary/usr/local/bin/multi_stress_watch foo foo' + 'LD_PRELOAD={tdir}/binary/usr/local/lib/librados.so.2'.format(tdir=testdir), + '{tdir}/daemon-helper'.format(tdir=testdir), 'kill', + '{tdir}/binary/usr/local/bin/multi_stress_watch foo foo'.format(tdir=testdir) ] log.info("args are %s" % (args,)) diff --git a/teuthology/task/workunit.py b/teuthology/task/workunit.py index aae28eeaa6002..c4b5067fd2a95 100644 --- a/teuthology/task/workunit.py +++ b/teuthology/task/workunit.py @@ -140,7 +140,7 @@ def _make_scratch_dir(ctx, role, subdir): log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role)) (remote,) = ctx.cluster.only(role).remotes.iterkeys() dir_owner = remote.shortname.split('@', 1)[0] - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) # if neither kclient nor ceph-fuse are required for a workunit, # mnt may not exist. Stat and create the directory if it doesn't. try: @@ -212,20 +212,21 @@ def _spawn_on_all_clients(ctx, refspec, tests, env, subdir): p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir) def _run_tests(ctx, refspec, role, tests, env, subdir=None): + testdir = teuthology.get_testdir(ctx) assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) # subdir so we can remove and recreate this a lot without sudo if subdir is None: scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') else: scratch_tmp = os.path.join(mnt, subdir) - srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role) - secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role) - teuthology.write_secret_file(remote, role, secretfile) + srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role) + secretfile = '{tdir}/data/{role}.secret'.format(tdir=testdir, role=role) + teuthology.write_secret_file(ctx, remote, role, secretfile) ceph_ref = ctx.summary.get('ceph-sha1', 'master') @@ -249,11 +250,13 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None): 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', run.Raw('&&'), 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), - run.Raw('>/tmp/cephtest/workunits.list'), + run.Raw('>{tdir}/workunits.list'.format(tdir=testdir)), ], ) - workunits = sorted(teuthology.get_file(remote, '/tmp/cephtest/workunits.list').split('\0')) + workunits = sorted(teuthology.get_file( + remote, + '{tdir}/workunits.list'.format(tdir=testdir)).split('\0')) assert workunits try: @@ -272,13 +275,13 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None): 'cd', '--', scratch_tmp, run.Raw('&&'), run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), - run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'), - run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'), - run.Raw('CEPH_JAVA_PATH="/tmp/cephtest/binary/usr/local/share/java"'), - run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'), + run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)), + run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)), + run.Raw('CEPH_JAVA_PATH="{tdir}/binary/usr/local/share/java"'.format(tdir=testdir)), + run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)), run.Raw('CEPH_SECRET_FILE="{file}"'.format(file=secretfile)), run.Raw('CEPH_ID="{id}"'.format(id=id_)), - run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.7/dist-packages:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'), + run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)), ] if env is not None: for var, val in env.iteritems(): @@ -286,9 +289,9 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None): env_arg = '{var}={val}'.format(var=var, val=quoted_val) args.append(run.Raw(env_arg)) args.extend([ - '/tmp/cephtest/enable-coredump', - '/tmp/cephtest/binary/usr/local/bin/ceph-coverage', - '/tmp/cephtest/archive/coverage', + '{tdir}/enable-coredump'.format(tdir=testdir), + '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), '{srcdir}/{workunit}'.format( srcdir=srcdir, workunit=workunit, @@ -307,6 +310,6 @@ def _run_tests(ctx, refspec, role, tests, env, subdir=None): remote.run( logger=log.getChild(role), args=[ - 'rm', '-rf', '--', '/tmp/cephtest/workunits.list', srcdir, + 'rm', '-rf', '--', '{tdir}/workunits.list'.format(tdir=testdir), srcdir, ], ) -- 2.39.5