cleanups *will* be performed. Later on, we can let tasks communicate
the subtasks they wish to invoke to the top-level runner, avoiding
this issue.
+
+Test Sandbox Directory
+======================
+
+Teuthology currently places most test files and mount points in a sandbox
+directory, defaulting to /tmp/cephtest/{rundir}. The {rundir} is the name
+of the run (as given by --name) or if no name is specified, user@host-timestamp
+is used. To change the location of the sandbox directory, the following
+options can be specified in $HOME/.teuthology.yaml:
+
+ base_test_dir: <directory>
+
+The ``base_test_dir`` option will set the base directory to use for the individual
+run directories. If not specified, this defaults to: ``/tmp/cephtest``.
+
+ test_path: <directory>
+
+The ``test_path`` option will set the complete path to use for the test directory.
+This allows for the old behavior, where ``/tmp/cephtest`` was used as the sandbox
+directory.
+++ /dev/null
-[global]
- keyring = /tmp/cephtest/ceph.keyring
- log file = /tmp/cephtest/archive/log/$name.log
- chdir = ""
- pid file = $name.pid
- auth supported = cephx
- admin socket = /tmp/cephtest/asok.$name
-
- filestore xattr use omap = true
-
- mon clock drift allowed = .250
-
- osd crush chooseleaf type = 0
-
-[mon]
- mon data = /tmp/cephtest/data/mon.$id
- mon cluster log file = /tmp/cephtest/archive/log/cluster.mon.$id.log
-
-[osd]
- osd data = /tmp/cephtest/data/osd.$id.data
- osd journal = /tmp/cephtest/data/osd.$id.journal
- osd journal size = 100
- keyring = /tmp/cephtest/data/osd.$id.keyring
- osd class dir = /tmp/cephtest/binary/usr/local/lib/rados-classes
-
- osd scrub load threshold = 5.0
- osd scrub max interval = 600
-
- osd recover clone overlap = true
- osd recovery max chunk = 1048576
-
-[mds]
- keyring = /tmp/cephtest/data/mds.$id.keyring
- lockdep = 1
- mds debug scatterstat = true
- mds verify scatter = true
- mds debug frag = true
-
-[client]
- keyring = /tmp/cephtest/data/client.$id.keyring
- rgw socket path = /tmp/cephtest/apache/tmp/fastcgi_sock/rgw_sock
- rgw cache enabled = true
- rgw enable ops log = true
- rgw enable usage log = true
-
-[client.admin]
- keyring = /tmp/cephtest/ceph.keyring
--- /dev/null
+[global]
+ keyring = {testdir}/ceph.keyring
+ log file = {testdir}/archive/log/$name.log
+ chdir = ""
+ pid file = $name.pid
+ auth supported = cephx
+ admin socket = {testdir}/asok.$name
+
+ filestore xattr use omap = true
+
+ mon clock drift allowed = .250
+
+ osd crush chooseleaf type = 0
+
+[mon]
+ mon data = {testdir}/data/mon.$id
+ mon cluster log file = {testdir}/archive/log/cluster.mon.$id.log
+
+[osd]
+ osd data = {testdir}/data/osd.$id.data
+ osd journal = {testdir}/data/osd.$id.journal
+ osd journal size = 100
+ keyring = {testdir}/data/osd.$id.keyring
+ osd class dir = {testdir}/binary/usr/local/lib/rados-classes
+
+ osd scrub load threshold = 5.0
+ osd scrub max interval = 600
+
+ osd recover clone overlap = true
+ osd recovery max chunk = 1048576
+
+[mds]
+ keyring = {testdir}/data/mds.$id.keyring
+ lockdep = 1
+ mds debug scatterstat = true
+ mds verify scatter = true
+ mds debug frag = true
+
+[client]
+ keyring = {testdir}/data/client.$id.keyring
+ rgw socket path = {testdir}/apache/tmp/fastcgi_sock/rgw_sock
+ rgw cache enabled = true
+ rgw enable ops log = true
+ rgw enable usage log = true
+
+[client.admin]
+ keyring = {testdir}/ceph.keyring
log = logging.getLogger(__name__)
+import datetime
+stamp = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
+
+def get_testdir(ctx):
+ if 'test_path' in ctx.teuthology_config:
+ return ctx.teuthology_config['test_path']
+
+ basedir = ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest')
+
+ if hasattr(ctx, 'name') and ctx.name:
+ log.debug('with name basedir: {b}'.format(b=basedir))
+ return '{basedir}/{rundir}'.format(
+ basedir=basedir,
+ rundir=ctx.name)
+ else:
+ log.debug('basedir: {b}'.format(b=basedir))
+ return '{basedir}/{user}-{stamp}'.format(
+ basedir=basedir,
+ user=get_user(),
+ stamp=stamp)
+
+def get_testdir_base(ctx):
+ if 'test_path' in ctx.teuthology_config:
+ return ctx.teuthology_config['test_path']
+ return ctx.teuthology_config.get('base_test_dir', '/tmp/cephtest')
+
def get_ceph_binary_url(package=None,
branch=None, tag=None, sha1=None, dist=None,
flavor=None, format=None, arch=None):
yield subsystem
yield capability
-def skeleton_config(roles, ips):
+def skeleton_config(ctx, roles, ips):
"""
Returns a ConfigObj that's prefilled with a skeleton config.
Use conf.write to write it out, override .filename first if you want.
"""
- path = os.path.join(os.path.dirname(__file__), 'ceph.conf')
- conf = configobj.ConfigObj(path, file_error=True)
+ path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template')
+ t = open(path, 'r')
+ skconf = t.read().format(testdir=get_testdir(ctx))
+ conf = configobj.ConfigObj(StringIO(skconf), file_error=True)
mons = get_mons(roles=roles, ips=ips)
for role, addr in mons.iteritems():
conf.setdefault(role, {})
num = sum(sum(1 for role in hostroles if role.startswith(prefix)) for hostroles in roles)
return num
-def create_simple_monmap(remote, conf):
+def create_simple_monmap(ctx, remote, conf):
"""
Writes a simple monmap based on current ceph.conf into <tmpdir>/monmap.
assert addresses, "There are no monitors in config!"
log.debug('Ceph mon addresses: %s', addresses)
+ testdir = get_testdir(ctx)
args = [
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/monmaptool',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/monmaptool'.format(tdir=testdir),
'--create',
'--clobber',
]
args.extend(('--add', name, addr))
args.extend([
'--print',
- '/tmp/cephtest/monmap',
+ '{tdir}/monmap'.format(tdir=testdir),
])
remote.run(
args=args,
pass
return retval
-def wait_until_healthy(remote):
+def wait_until_healthy(ctx, remote):
"""Wait until a Ceph cluster is healthy."""
+ testdir = get_testdir(ctx)
while True:
r = remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/ceph',
- '-c', '/tmp/cephtest/ceph.conf',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'health',
'--concise',
],
break
time.sleep(1)
-def wait_until_osds_up(cluster, remote):
+def wait_until_osds_up(ctx, cluster, remote):
"""Wait until all Ceph OSDs are booted."""
num_osds = num_instances_of_type(cluster, 'osd')
+ testdir = get_testdir(ctx)
while True:
r = remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/ceph',
- '-c', '/tmp/cephtest/ceph.conf',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--concise',
'osd', 'dump', '--format=json'
],
log.debug('waited {elapsed}'.format(elapsed=str(time.time() - starttime)))
time.sleep(1)
-def write_secret_file(remote, role, filename):
+def write_secret_file(ctx, remote, role, filename):
+ testdir = get_testdir(ctx)
remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--name={role}'.format(role=role),
'--print-key',
- '/tmp/cephtest/data/{role}.keyring'.format(role=role),
+ '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role),
run.Raw('>'),
filename,
],
return a
return b
-def get_valgrind_args(name, v):
+def get_valgrind_args(testdir, name, v):
if v is None:
return []
if not isinstance(v, list):
v = [v]
- val_path = '/tmp/cephtest/archive/log/valgrind'
+ val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir)
if '--tool=memcheck' in v or '--tool=helgrind' in v:
extra_args = [
- '/tmp/cephtest/chdir-coredump',
+ '{tdir}/chdir-coredump'.format(tdir=testdir),
'valgrind',
- '--suppressions=/tmp/cephtest/valgrind.supp',
+ '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
'--xml=yes',
'--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
]
else:
extra_args = [
- '/tmp/cephtest/chdir-coredump',
+ '{tdir}/chdir-coredump'.format(tdir=testdir),
'valgrind',
- '--suppressions=/tmp/cephtest/valgrind.supp',
+ '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
'--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name)
]
extra_args.extend(v)
import argparse
import yaml
+from teuthology import misc as teuthology
+
def parse_args():
from teuthology.run import config_file
from teuthology.run import MergeConfig
from .orchestra import run
ctx.cluster.run(
args=[
- 'grep', '/tmp/cephtest/data/', '/etc/mtab', run.Raw('|'),
+ 'grep',
+ '{tdir}/data/'.format(tdir=teuthology.get_testdir(ctx)),
+ '/etc/mtab',
+ run.Raw('|'),
'awk', '{print $2}', run.Raw('|'),
'xargs', '-r',
'sudo', 'umount', run.Raw(';'),
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
- 'sudo', 'rm', '-rf', '/tmp/cephtest',
+ 'sudo', 'rm', '-rf',
+ teuthology.get_testdir(ctx),
],
wait=False,
)
for client, tests in config.iteritems():
p.spawn(_run_tests, ctx, client, tests)
-def _socket_command(remote, socket_path, command, args):
+def _socket_command(ctx, remote, socket_path, command, args):
"""
Run an admin socket command and return the result as a string.
"""
json_fp = StringIO()
+ testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/ceph',
- '-k', '/tmp/cephtest/ceph.keyring',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
+ '-k', '{tdir}/ceph.keyring'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--admin-daemon', socket_path,
command,
] + args,
return json.loads(out)
def _run_tests(ctx, client, tests):
+ testdir = teuthology.get_testdir(ctx)
log.debug('Running admin socket tests on %s', client)
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
- socket_path = '/tmp/cephtest/asok.{name}'.format(name=client)
+ socket_path = '{tdir}/asok.{name}'.format(tdir=testdir, name=client)
try:
tmp_dir = os.path.join(
- '/tmp/cephtest/',
+ testdir,
'admin_socket_{client}'.format(client=client),
)
remote.run(
args = config.get('args', [])
assert isinstance(args, list), \
'admin socket command args must be a list'
- sock_out = _socket_command(remote, socket_path, command, args)
+ sock_out = _socket_command(ctx, remote, socket_path, command, args)
if test_path is not None:
remote.run(
args=[
+++ /dev/null
-LoadModule env_module /usr/lib/apache2/modules/mod_env.so
-LoadModule rewrite_module /usr/lib/apache2/modules/mod_rewrite.so
-LoadModule fastcgi_module /usr/lib/apache2/modules/mod_fastcgi.so
-
-Listen 7280
-ServerName rgwtest.example.com
-
-ServerRoot /tmp/cephtest/apache
-ErrorLog /tmp/cephtest/archive/apache/error.log
-LogFormat "%h l %u %t \"%r\" %>s %b \"{Referer}i\" \"%{User-agent}i\"" combined
-CustomLog /tmp/cephtest/archive/apache/access.log combined
-PidFile /tmp/cephtest/apache/tmp/apache.pid
-DocumentRoot /tmp/cephtest/apache/htdocs
-FastCgiIPCDir /tmp/cephtest/apache/tmp/fastcgi_sock
-FastCgiExternalServer /tmp/cephtest/apache/htdocs/rgw.fcgi -socket rgw_sock
-RewriteEngine On
-
-RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
-
-# Set fastcgi environment variables.
-# Note that this is separate from Unix environment variables!
-SetEnv RGW_LOG_LEVEL 20
-SetEnv RGW_PRINT_CONTINUE yes
-SetEnv RGW_SHOULD_LOG yes
-
-<Directory /tmp/cephtest/apache/htdocs>
- Options +ExecCGI
- AllowOverride All
- SetHandler fastcgi-script
-</Directory>
-
-AllowEncodedSlashes On
-ServerSignature Off
--- /dev/null
+LoadModule env_module /usr/lib/apache2/modules/mod_env.so
+LoadModule rewrite_module /usr/lib/apache2/modules/mod_rewrite.so
+LoadModule fastcgi_module /usr/lib/apache2/modules/mod_fastcgi.so
+
+Listen 7280
+ServerName rgwtest.example.com
+
+ServerRoot {testdir}/apache
+ErrorLog {testdir}/archive/apache/error.log
+LogFormat "%h l %u %t \"%r\" %>s %b \"{Referer}i\" \"%{User-agent}i\"" combined
+CustomLog {testdir}/archive/apache/access.log combined
+PidFile {testdir}/apache/tmp/apache.pid
+DocumentRoot {testdir}/apache/htdocs
+FastCgiIPCDir {testdir}/apache/tmp/fastcgi_sock
+FastCgiExternalServer {testdir}/apache/htdocs/rgw.fcgi -socket rgw_sock
+RewriteEngine On
+
+RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /rgw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
+
+# Set fastcgi environment variables.
+# Note that this is separate from Unix environment variables!
+SetEnv RGW_LOG_LEVEL 20
+SetEnv RGW_PRINT_CONTINUE yes
+SetEnv RGW_SHOULD_LOG yes
+
+<Directory {testdir}/apache/htdocs>
+ Options +ExecCGI
+ AllowOverride All
+ SetHandler fastcgi-script
+</Directory>
+
+AllowEncodedSlashes On
+ServerSignature Off
assert isinstance(config, dict)
config = teuthology.replace_all_with_clients(ctx.cluster, config)
log.info('Setting up autotest...')
+ testdir = teuthology.get_testdir(ctx)
with parallel() as p:
for role in config.iterkeys():
(remote,) = ctx.cluster.only(role).remotes.keys()
- p.spawn(_download, remote)
+ p.spawn(_download, testdir, remote)
log.info('Making a separate scratch dir for every client...')
for role in config.iterkeys():
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
remote.run(
args=[
with parallel() as p:
for role, tests in config.iteritems():
(remote,) = ctx.cluster.only(role).remotes.keys()
- p.spawn(_run_tests, remote, role, tests)
+ p.spawn(_run_tests, testdir, remote, role, tests)
-def _download(remote):
+def _download(testdir, remote):
remote.run(
args=[
# explicitly does not support multiple autotest tasks
# in a single run; the result archival would conflict
- 'mkdir', '/tmp/cephtest/archive/autotest',
+ 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir),
run.Raw('&&'),
- 'mkdir', '/tmp/cephtest/autotest',
+ 'mkdir', '{tdir}/autotest'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'-O-',
run.Raw('|'),
'tar',
- '-C', '/tmp/cephtest/autotest',
+ '-C', '{tdir}/autotest'.format(tdir=testdir),
'-x',
'-z',
'-f-',
],
)
-def _run_tests(remote, role, tests):
+def _run_tests(testdir, remote, role, tests):
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
scratch = os.path.join(mnt, 'client.{id}'.format(id=id_))
assert isinstance(tests, list)
testname=testname,
id=id_,
)
- control = '/tmp/cephtest/control.{tag}'.format(tag=tag)
+ control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag)
teuthology.write_file(
remote=remote,
path=control,
url=testname,
dir=scratch,
# TODO perhaps tag
- # results will be in /tmp/cephtest/autotest/client/results/dbench
- # or /tmp/cephtest/autotest/client/results/dbench.{tag}
+ # results will be in {testdir}/autotest/client/results/dbench
+ # or {testdir}/autotest/client/results/dbench.{tag}
)),
),
)
remote.run(
args=[
- '/tmp/cephtest/autotest/client/bin/autotest',
+ '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir),
'--verbose',
'--harness=simple',
'--tag={tag}'.format(tag=tag),
args=[
'mv',
'--',
- '/tmp/cephtest/autotest/client/results/{tag}'.format(tag=tag),
- '/tmp/cephtest/archive/autotest/{tag}'.format(tag=tag),
+ '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag),
+ '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag),
],
)
remote.run(
args=[
- 'rm', '-rf', '--', '/tmp/cephtest/autotest',
+ 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir),
],
)
log = logging.getLogger(__name__)
blktrace = '/usr/sbin/blktrace'
-log_dir = '/tmp/cephtest/archive/performance/blktrace'
daemon_signal = 'term'
@contextlib.contextmanager
def setup(ctx, config):
osds = ctx.cluster.only(teuthology.is_type('osd'))
+ log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx))
+
for remote, roles_for_host in osds.remotes.iteritems():
log.info('Creating %s on %s' % (log_dir,remote.name))
remote.run(
@contextlib.contextmanager
def execute(ctx, config):
procs = []
+ testdir=teuthology.get_testdir(ctx)
+ log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir)
+
osds = ctx.cluster.only(teuthology.is_type('osd'))
for remote, roles_for_host in osds.remotes.iteritems():
roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote]
'cd',
log_dir,
run.Raw(';'),
- '/tmp/cephtest/daemon-helper',
+ '{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
'sudo',
blktrace,
log.info('Mounting ceph-fuse clients...')
fuse_daemons = {}
+ testdir = teuthology.get_testdir(ctx)
+
if config is None:
config = dict(('client.{id}'.format(id=id_), None)
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
id=id_, remote=remote,mnt=mnt))
)
run_cmd=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/daemon-helper',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
]
run_cmd_tail=[
- '/tmp/cephtest/binary/usr/local/bin/ceph-fuse',
+ '{tdir}/binary/usr/local/bin/ceph-fuse'.format(tdir=testdir),
'-f',
'--name', 'client.{id}'.format(id=id_),
- '-c', '/tmp/cephtest/ceph.conf',
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
# TODO ceph-fuse doesn't understand dash dash '--',
mnt,
]
fuse_daemons[id_] = proc
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
teuthology.wait_until_fuse_mounted(
remote=remote,
fuse=fuse_daemons[id_],
mountpoint=mnt,
)
- remote.run(args=['sudo', 'chmod', '1777', '/tmp/cephtest/mnt.{id}'.format(id=id_)],)
+ remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
try:
yield
finally:
log.info('Unmounting ceph-fuse clients...')
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
try:
remote.run(
args=[
run.wait(fuse_daemons.itervalues())
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
remote.run(
args=[
'rmdir',
@contextlib.contextmanager
def ceph_log(ctx, config):
log.info('Creating log directories...')
+ archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx))
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
- '/tmp/cephtest/archive/log',
- '/tmp/cephtest/archive/log/valgrind',
- '/tmp/cephtest/archive/profiling-logger',
+ '{adir}/log'.format(adir=archive_dir),
+ '{adir}/log/valgrind'.format(adir=archive_dir),
+ '{adir}/profiling-logger'.format(adir=archive_dir),
],
wait=False,
)
ctx.cluster.run(
args=[
'find',
- '/tmp/cephtest/archive/log',
+ '{adir}/log'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
assert config is None
FILES = ['daemon-helper', 'enable-coredump', 'chdir-coredump',
'valgrind.supp', 'kcon_most']
+ testdir = teuthology.get_testdir(ctx)
for filename in FILES:
log.info('Shipping %r...', filename)
src = os.path.join(os.path.dirname(__file__), filename)
- dst = os.path.join('/tmp/cephtest', filename)
+ dst = os.path.join(testdir, filename)
with file(src, 'rb') as f:
for rem in ctx.cluster.remotes.iterkeys():
teuthology.write_file(
finally:
log.info('Removing shipped files: %s...', ' '.join(FILES))
filenames = (
- os.path.join('/tmp/cephtest', filename)
+ os.path.join(testdir, filename)
for filename in FILES
)
run.wait(
),
)
-def _download_binaries(remote, ceph_bindir_url):
+def _download_binaries(ctx, remote, ceph_bindir_url):
+ testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
- 'install', '-d', '-m0755', '--', '/tmp/cephtest/binary',
+ 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir),
run.Raw('&&'),
'uname', '-m',
run.Raw('|'),
# need to use --input-file to make wget respect --base
'--input-file=-',
run.Raw('|'),
- 'tar', '-xzf', '-', '-C', '/tmp/cephtest/binary',
+ 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir),
],
)
path = config.get('path')
tmpdir = None
+ testdir = teuthology.get_testdir(ctx)
+
if path is None:
# fetch from gitbuilder gitbuilder
log.info('Fetching and unpacking ceph binaries from gitbuilder...')
with parallel() as p:
for remote in ctx.cluster.remotes.iterkeys():
- p.spawn(_download_binaries, remote, ceph_bindir_url)
+ p.spawn(_download_binaries, ctx, remote, ceph_bindir_url)
else:
with tempfile.TemporaryFile(prefix='teuthology-tarball-', suffix='.tgz') as tar_fp:
tmpdir = tempfile.mkdtemp(prefix='teuthology-tarball-')
tar_fp.seek(0)
writes = ctx.cluster.run(
args=[
- 'install', '-d', '-m0755', '--', '/tmp/cephtest/binary',
+ 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir),
run.Raw('&&'),
- 'tar', '-xzf', '-', '-C', '/tmp/cephtest/binary'
+ 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir)
],
stdin=run.PIPE,
wait=False,
'rm',
'-rf',
'--',
- '/tmp/cephtest/binary',
+ '{tdir}/binary'.format(tdir=testdir),
],
wait=False,
),
@contextlib.contextmanager
def valgrind_post(ctx, config):
+ testdir = teuthology.get_testdir(ctx)
try:
yield
finally:
lookup_procs = list()
- val_path = '/tmp/cephtest/archive/log/valgrind'
+ val_path = '{tdir}/archive/log/valgrind'.format(tdir=testdir)
log.info('Checking for errors in any valgrind logs...');
for remote in ctx.cluster.remotes.iterkeys():
#look at valgrind logs for each node
@contextlib.contextmanager
def cluster(ctx, config):
+ testdir = teuthology.get_testdir(ctx)
log.info('Creating ceph cluster...')
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
- '/tmp/cephtest/data',
+ '{tdir}/data'.format(tdir=testdir),
],
wait=False,
)
remotes_and_roles = ctx.cluster.remotes.items()
roles = [roles for (remote, roles) in remotes_and_roles]
ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in remotes_and_roles)]
- conf = teuthology.skeleton_config(roles=roles, ips=ips)
+ conf = teuthology.skeleton_config(ctx, roles=roles, ips=ips)
for remote, roles_to_journals in remote_to_roles_to_journals.iteritems():
for role, journal in roles_to_journals.iteritems():
key = "osd." + str(role)
'python',
'-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
- '/tmp/cephtest/ceph.conf',
+ '{tdir}/ceph.conf'.format(tdir=testdir),
],
stdin=run.PIPE,
wait=False,
teuthology.feed_many_stdins_and_close(conf_fp, writes)
run.wait(writes)
- coverage_dir = '/tmp/cephtest/archive/coverage'
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
firstmon = teuthology.get_first_mon(ctx, config)
log.info('Setting up %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--create-keyring',
- '/tmp/cephtest/ceph.keyring',
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
],
)
ctx.cluster.only(firstmon).run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--gen-key',
'--name=mon.',
- '/tmp/cephtest/ceph.keyring',
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
],
)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
teuthology.create_simple_monmap(
+ ctx,
remote=mon0_remote,
conf=conf,
)
log.info('Creating admin key on %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--gen-key',
'--name=client.admin',
'--set-uid=0',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
'--cap', 'mds', 'allow',
- '/tmp/cephtest/ceph.keyring',
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
],
)
log.info('Copying monmap to all nodes...')
keyring = teuthology.get_file(
remote=mon0_remote,
- path='/tmp/cephtest/ceph.keyring',
+ path='{tdir}/ceph.keyring'.format(tdir=testdir),
)
monmap = teuthology.get_file(
remote=mon0_remote,
- path='/tmp/cephtest/monmap',
+ path='{tdir}/monmap'.format(tdir=testdir),
)
for rem in ctx.cluster.remotes.iterkeys():
log.info('Sending monmap to node {remote}'.format(remote=rem))
teuthology.write_file(
remote=rem,
- path='/tmp/cephtest/ceph.keyring',
+ path='{tdir}/ceph.keyring'.format(tdir=testdir),
data=keyring,
)
teuthology.write_file(
remote=rem,
- path='/tmp/cephtest/monmap',
+ path='{tdir}/monmap'.format(tdir=testdir),
data=monmap,
)
run.wait(
mons.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/osdmaptool',
+ '{tdir}/binary/usr/local/bin/osdmaptool'.format(tdir=testdir),
'-c',
- '/tmp/cephtest/ceph.conf',
+ '{tdir}/ceph.conf'.format(tdir=testdir),
'--clobber',
'--createsimple', '{num:d}'.format(
num=teuthology.num_instances_of_type(ctx.cluster, 'osd'),
),
- '/tmp/cephtest/osdmap',
+ '{tdir}/osdmap'.format(tdir=testdir),
'--pg_bits', '2',
'--pgp_bits', '4',
],
for id_ in teuthology.roles_of_type(roles_for_host, 'osd'):
remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--create-keyring',
'--gen-key',
'--name=osd.{id}'.format(id=id_),
- '/tmp/cephtest/data/osd.{id}.keyring'.format(id=id_),
+ '{tdir}/data/osd.{id}.keyring'.format(tdir=testdir, id=id_),
],
)
for id_ in teuthology.roles_of_type(roles_for_host, 'mds'):
remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--create-keyring',
'--gen-key',
'--name=mds.{id}'.format(id=id_),
- '/tmp/cephtest/data/mds.{id}.keyring'.format(id=id_),
+ '{tdir}/data/mds.{id}.keyring'.format(tdir=testdir, id=id_),
],
)
for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
'--create-keyring',
'--gen-key',
# TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
'--name=client.{id}'.format(id=id_),
- '/tmp/cephtest/data/client.{id}.keyring'.format(id=id_),
+ '{tdir}/data/client.{id}.keyring'.format(tdir=testdir, id=id_),
],
)
for id_ in teuthology.roles_of_type(roles_for_host, type_):
data = teuthology.get_file(
remote=remote,
- path='/tmp/cephtest/data/{type}.{id}.keyring'.format(
+ path='{tdir}/data/{type}.{id}.keyring'.format(
+ tdir=testdir,
type=type_,
id=id_,
),
args=[
'cat',
run.Raw('>>'),
- '/tmp/cephtest/ceph.keyring',
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
],
stdin=run.PIPE,
wait=False,
run.wait(
mons.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-authtool',
- '/tmp/cephtest/ceph.keyring',
+ '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
'--name={type}.{id}'.format(
type=type_,
id=id_,
for id_ in teuthology.roles_of_type(roles_for_host, 'mon'):
remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-mon',
+ '{tdir}/binary/usr/local/bin/ceph-mon'.format(tdir=testdir),
'--mkfs',
'-i', id_,
- '-c', '/tmp/cephtest/ceph.conf',
- '--monmap=/tmp/cephtest/monmap',
- '--osdmap=/tmp/cephtest/osdmap',
- '--keyring=/tmp/cephtest/ceph.keyring',
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ '--monmap={tdir}/monmap'.format(tdir=testdir),
+ '--osdmap={tdir}/osdmap'.format(tdir=testdir),
+ '--keyring={tdir}/ceph.keyring'.format(tdir=testdir),
],
)
remote.run(
args=[
'mkdir',
- os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)),
+ os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
],
)
if roles_to_devs.get(id_):
'-t', fs,
'-o', ','.join(mount_options),
dev,
- os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)),
+ os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)),
]
)
remote.run(
args=[
'sudo', 'chown', '-R', 'ubuntu.ubuntu',
- os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_))
+ os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
]
)
remote.run(
args=[
'sudo', 'chmod', '-R', '755',
- os.path.join('/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_))
+ os.path.join('{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_))
]
)
devs_to_clean[remote].append(
os.path.join(
- '/tmp/cephtest/data', 'osd.{id}.data'.format(id=id_)
+ '{tdir}/data'.format(tdir=testdir), 'osd.{id}.data'.format(id=id_)
)
)
remote.run(
args=[
'MALLOC_CHECK_=3',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph-osd',
+ '{tdir}/binary/usr/local/bin/ceph-osd'.format(tdir=testdir),
'--mkfs',
'-i', id_,
- '-c', '/tmp/cephtest/ceph.conf',
- '--monmap', '/tmp/cephtest/monmap',
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ '--monmap', '{tdir}/monmap'.format(tdir=testdir),
],
)
run.wait(
args=[
'rm',
'--',
- '/tmp/cephtest/monmap',
- '/tmp/cephtest/osdmap',
+ '{tdir}/monmap'.format(tdir=testdir),
+ '{tdir}/osdmap'.format(tdir=testdir),
],
wait=False,
),
def first_in_ceph_log(pattern, excludes):
args = [
'egrep', pattern,
- '/tmp/cephtest/archive/log/cluster.%s.log' % firstmon,
+ '%s/archive/log/cluster.%s.log' % (testdir, firstmon),
]
for exclude in excludes:
args.extend([run.Raw('|'), 'egrep', '-v', exclude])
for role in roles:
if role.startswith('mon.'):
teuthology.pull_directory_tarball(remote,
- '/tmp/cephtest/data/%s' % role,
+ '%s/data/%s' % (testdir, role),
path + '/' + role + '.tgz')
log.info('Cleaning ceph cluster...')
'rm',
'-rf',
'--',
- '/tmp/cephtest/ceph.conf',
- '/tmp/cephtest/ceph.keyring',
- '/tmp/cephtest/data',
- '/tmp/cephtest/monmap',
- run.Raw('/tmp/cephtest/asok.*')
+ '{tdir}/ceph.conf'.format(tdir=testdir),
+ '{tdir}/ceph.keyring'.format(tdir=testdir),
+ '{tdir}/data'.format(tdir=testdir),
+ '{tdir}/monmap'.format(tdir=testdir),
+ run.Raw('{tdir}/asok.*'.format(tdir=testdir))
],
wait=False,
),
@contextlib.contextmanager
def run_daemon(ctx, config, type_):
log.info('Starting %s daemons...' % type_)
+ testdir = teuthology.get_testdir(ctx)
daemons = ctx.cluster.only(teuthology.is_type(type_))
- coverage_dir = '/tmp/cephtest/archive/coverage'
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
daemon_signal = 'kill'
if config.get('coverage') or config.get('valgrind') is not None:
num_active += 1
run_cmd = [
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/daemon-helper',
+ '{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
]
run_cmd_tail = [
- '/tmp/cephtest/binary/usr/local/bin/ceph-%s' % type_,
+ '%s/binary/usr/local/bin/ceph-%s' % (testdir, type_),
'-f',
'-i', id_,
- '-c', '/tmp/cephtest/ceph.conf']
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir)]
if config.get('valgrind') is not None:
valgrind_args = None
valgrind_args = config['valgrind'][type_]
if name in config['valgrind']:
valgrind_args = config['valgrind'][name]
- run_cmd.extend(teuthology.get_valgrind_args(name, valgrind_args))
+ run_cmd.extend(teuthology.get_valgrind_args(testdir, name, valgrind_args))
if type_ in config.get('cpu_profile', []):
- profile_path = '/tmp/cephtest/archive/log/%s.%s.prof' % (type_, id_)
+ profile_path = '%s/archive/log/%s.%s.prof' % (testdir, type_, id_)
run_cmd.extend([ 'env', 'CPUPROFILE=%s' % profile_path ])
run_cmd.extend(run_cmd_tail)
firstmon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
mon0_remote.run(args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
coverage_dir,
- '/tmp/cephtest/binary/usr/local/bin/ceph',
- '-c', '/tmp/cephtest/ceph.conf',
+ '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'mds', 'set_max_mds', str(num_active)])
try:
firstmon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
teuthology.wait_until_osds_up(
+ ctx,
cluster=ctx.cluster,
remote=mon0_remote
)
teuthology.wait_until_healthy(
+ ctx,
remote=mon0_remote,
)
flavor = 'gcov'
ctx.summary['flavor'] = flavor
-
+
+ testdir = teuthology.get_testdir(ctx)
if config.get('coverage'):
- coverage_dir = '/tmp/cephtest/archive/coverage'
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
log.info('Creating coverage directory...')
run.wait(
ctx.cluster.run(
self.pools['data'] = self.get_pool_property('data', 'pg_num')
def raw_cluster_cmd(self, *args):
+ testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
- 'LD_LIBRARY_PRELOAD=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/ceph',
- '-k', '/tmp/cephtest/ceph.keyring',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PRELOAD={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
+ '-k', '{tdir}/ceph.keyring'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--concise',
]
ceph_args.extend(args)
#!/bin/sh
set -e
+testdir=$(realpath $(dirname $0))
# valgrind only dumps to cwd, so cwd there...
-cd /tmp/cephtest/archive/coredump
+cd ${testdir}/archive/coredump
exec "$@"
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
+ testdir = teuthology.get_testdir(ctx)
+
try:
for client, tests in clients.iteritems():
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
- client_dir = '/tmp/cephtest/archive/cram.{role}'.format(role=client)
+ client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
- 'virtualenv', '/tmp/cephtest/virtualenv',
+ 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
- '/tmp/cephtest/virtualenv/bin/pip',
+ '{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram',
],
)
finally:
for client, tests in clients.iteritems():
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
- client_dir = '/tmp/cephtest/archive/cram.{role}'.format(role=client)
+ client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
remote.run(
args=[
'rm', '-rf', '--',
- '/tmp/cephtest/virtualenv',
+ '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
+ testdir = teuthology.get_testdir(ctx)
log.info('Running tests for %s...', role)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
- run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'),
- run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'),
- run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'),
+ run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)),
+ run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)),
+ run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
- run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.7/dist-packages:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'),
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/virtualenv/bin/cram',
+ run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
- run.Raw('/tmp/cephtest/archive/cram.{role}/*.t'.format(role=role)),
+ run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
],
logger=log.getChild(role),
)
@contextlib.contextmanager
def task(ctx, config):
"""
- Die if /tmp/cephtest/err exists or if an OSD dumps core
+ Die if {testdir}/err exists or if an OSD dumps core
"""
if config is None:
config = {}
while len(manager.get_osd_status()['up']) < num_osds:
time.sleep(10)
- log_path = '/tmp/cephtest/archive/log'
+ testdir = teuthology.get_testdir(ctx)
+ log_path = '{tdir}/archive/log'.format(tdir=testdir)
while True:
for i in range(num_osds):
(osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.iterkeys()
p = osd_remote.run(
- args = [ 'test', '-e', '/tmp/cephtest/err' ],
+ args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ],
wait=True,
check_status=False,
)
log.info("osd %d has an error" % i)
raise Exception("osd %d error" % i)
- log_path = '/tmp/cephtest/archive/log/osd.%d.log' % i
+ log_path = '%s/archive/log/osd.%d.log' % (testdir, i)
p = osd_remote.run(
args = [
log = logging.getLogger(__name__)
-def rados(remote, cmd, wait=True):
+def rados(testdir, remote, cmd, wait=True):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
+ testdir = teuthology.get_testdir(ctx)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
log.info('writing initial objects')
# write 1000 objects
for i in range(1000):
- rados(mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+ rados(testdir, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
# write 1 (divergent) object
log.info('writing divergent object existing_0')
rados(
- mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
+ testdir, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
wait=False)
time.sleep(10)
mon.run(
# write 1 non-divergent object (ensure that old divergent one is divergent)
log.info('writing non-divergent object existing_1')
- rados(mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
+ rados(testdir, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
manager.wait_for_recovery()
manager.mark_in_osd(divergent)
log.info('wait for peering')
- rados(mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+ rados(testdir, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
manager.set_config(i, osd_recovery_delay_start=0)
log.info('reading existing_0')
- exit_status = rados(mon,
+ exit_status = rados(testdir, mon,
['-p', 'foo', 'get', 'existing_0',
'-o', '/tmp/existing'])
assert exit_status is 0
client = clients[0];
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
- dir = '/tmp/cephtest/data/test.%s' % client
+ testdir = teuthology.get_testdir(ctx)
+
+ dir = '%s/data/test.%s' % (testdir, client)
seed = str(int(random.uniform(1,100)))
args=[
'cd', dir,
run.Raw('&&'),
- run.Raw('PATH="/tmp/cephtest/binary/usr/local/bin:$PATH"'),
+ run.Raw('PATH="{tdir}/binary/usr/local/bin:$PATH"'.format(tdir=testdir)),
'./run_seed_to_range.sh', seed, '50', '300',
],
wait=False,
if result != 0:
remote.run(
args=[
- 'cp', '-a', dir, '/tmp/cephtest/archive/idempotent_failure',
+ 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir),
])
raise Exception("./run_seed_to_range.sh errored out")
## Add required entries to conf/hadoop-env.sh
def write_hadoop_env(ctx, config):
- hadoopEnvFile = "/tmp/cephtest/hadoop/conf/hadoop-env.sh"
+ hadoopEnvFile = "{tdir}/hadoop/conf/hadoop-env.sh".format(tdir=teuthology.get_testdir(ctx))
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote, hadoopEnvFile,
'''export JAVA_HOME=/usr/lib/jvm/default-java
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib:/usr/lib
-export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/tmp/cephtest/binary/usr/local/lib/libcephfs.jar:/tmp/cephtest/hadoop/build/hadoop-core*.jar
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib:/usr/lib
+export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{tdir}/binary/usr/local/lib/libcephfs.jar:{tdir}/hadoop/build/hadoop-core*.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-''' )
+'''.format(tdir=teuthology.get_testdir(ctx)) )
log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote))
## Add required entries to conf/core-site.xml
def write_core_site(ctx, config):
- coreSiteFile = "/tmp/cephtest/hadoop/conf/core-site.xml"
+ testdir = teuthology.get_testdir(ctx)
+ coreSiteFile = "{tdir}/hadoop/conf/core-site.xml".format(tdir=testdir)
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
</property>
<property>
<name>ceph.conf.file</name>
- <value>/tmp/cephtest/ceph.conf</value>
+ <value>{tdir}/ceph.conf</value>
</property>
</configuration>
-'''.format(default_fs=default_fs_string))
+'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string))
log.info("wrote file: " + coreSiteFile + " to host: " + str(remote))
## Add required entries to conf/mapred-site.xml
def write_mapred_site(ctx):
- mapredSiteFile = "/tmp/cephtest/hadoop/conf/mapred-site.xml"
+ mapredSiteFile = "{tdir}/hadoop/conf/mapred-site.xml".format(tdir=teuthology.get_testdir(ctx))
master_ip = get_hadoop_master_ip(ctx)
log.info('adding host {remote} as jobtracker'.format(remote=master_ip))
## Add required entries to conf/hdfs-site.xml
def write_hdfs_site(ctx):
- hdfsSiteFile = "/tmp/cephtest/hadoop/conf/hdfs-site.xml"
+ hdfsSiteFile = "{tdir}/hadoop/conf/hdfs-site.xml".format(tdir=teuthology.get_testdir(ctx))
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
def write_slaves(ctx):
log.info('Setting up slave nodes...')
- slavesFile = "/tmp/cephtest/hadoop/conf/slaves"
+ slavesFile = "{tdir}/hadoop/conf/slaves".format(tdir=teuthology.get_testdir(ctx))
tmpFile = StringIO()
slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
## Add required entries to conf/masters
## These nodes host JobTrackers and Namenodes
def write_master(ctx):
- mastersFile = "/tmp/cephtest/hadoop/conf/masters"
+ mastersFile = "{tdir}/hadoop/conf/masters".format(tdir=teuthology.get_testdir(ctx))
master = _get_master(ctx)
remote, _ = master
master = _get_master(ctx)
remote, _ = master
remote.run(
- args=["/tmp/cephtest/hadoop/bin/hadoop","namenode","-format"],
+ args=["{tdir}/hadoop/bin/hadoop".format(tdir=teuthology.get_testdir(ctx)),
+ "namenode",
+ "-format"],
wait=True,
)
),
)
-def _start_hadoop(remote, config):
+def _start_hadoop(ctx, remote, config):
+ testdir = teuthology.get_testdir(ctx)
if config.get('hdfs'):
remote.run(
- args=['/tmp/cephtest/hadoop/bin/start-dfs.sh', ],
+ args=['{tdir}/hadoop/bin/start-dfs.sh'.format(tdir=testdir), ],
wait=True,
)
log.info('done starting hdfs')
remote.run(
- args=['/tmp/cephtest/hadoop/bin/start-mapred.sh', ],
+ args=['{tdir}/hadoop/bin/start-mapred.sh'.format(tdir=testdir), ],
wait=True,
)
log.info('done starting mapred')
-def _stop_hadoop(remote, config):
+def _stop_hadoop(ctx, remote, config):
+ testdir = teuthology.get_testdir(ctx)
remote.run(
- args=['/tmp/cephtest/hadoop/bin/stop-mapred.sh', ],
+ args=['{tdir}/hadoop/bin/stop-mapred.sh'.format(tdir=testdir), ],
wait=True,
)
if config.get('hdfs'):
remote.run(
- args=['/tmp/cephtest/hadoop/bin/stop-dfs.sh', ],
+ args=['{tdir}/hadoop/bin/stop-dfs.sh'.format(tdir=testdir), ],
wait=True,
)
remote, _ = master
log.info('Starting hadoop on {remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0]))
- _start_hadoop(remote, config)
+ _start_hadoop(ctx, remote, config)
try:
yield
finally:
log.info('Running stop-mapred.sh on {remote}'.format(remote=remote.ssh.get_transport().getpeername()[0]))
- _stop_hadoop(remote, config)
+ _stop_hadoop(ctx, remote, config)
-# download and untar the most recent hadoop binaries into /tmp/cephtest/hadoop
-def _download_hadoop_binaries(remote, hadoop_url):
+# download and untar the most recent hadoop binaries into {testdir}/hadoop
+def _download_hadoop_binaries(ctx, remote, hadoop_url):
log.info('_download_hadoop_binaries: path %s' % hadoop_url)
fileName = 'hadoop.tgz'
+ testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
- 'mkdir', '-p', '-m0755', '/tmp/cephtest/hadoop',
+ 'mkdir', '-p', '-m0755', '{tdir}/hadoop'.format(tdir=testdir),
run.Raw('&&'),
'echo',
'{fileName}'.format(fileName=fileName),
# need to use --input-file to make wget respect --base
'--input-file=-',
run.Raw('|'),
- 'tar', '-xzf', '-', '-C', '/tmp/cephtest/hadoop',
+ 'tar', '-xzf', '-', '-C', '{tdir}/hadoop'.format(tdir=testdir),
],
)
with parallel() as p:
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoopNodes.remotes.iterkeys():
- p.spawn(_download_hadoop_binaries, remote, hadoop_bindir_url)
+ p.spawn(_download_hadoop_binaries, ctx, remote, hadoop_bindir_url)
try:
yield
log.info('Removing hadoop binaries...')
run.wait(
ctx.cluster.run(
- args=[ 'rm', '-rf', '--', '/tmp/cephtest/hadoop'],
+ args=[ 'rm', '-rf', '--', '{tdir}/hadoop'.format(tdir=teuthology.get_testdir(ctx))],
wait=False,
),
)
master = _get_master(ctx)
remote, _ = master
remote.run(
- args=["/tmp/cephtest/hadoop/bin/hadoop","dfsadmin","-safemode", "wait"],
+ args=["{tdir}/hadoop/bin/hadoop".format(tdir=teuthology.get_testdir(ctx)),
+ "dfsadmin",
+ "-safemode",
+ "wait"],
wait=True,
)
else:
- mkdir -p /tmp/hadoop_input
- wget http://ceph.com/qa/hadoop_input_files.tar -O /tmp/hadoop_input/files.tar
- cd /tmp/hadoop_input/; tar -xf /tmp/hadoop_input/files.tar
- - /tmp/cephtest/hadoop/bin/hadoop fs -mkdir wordcount_input
- - /tmp/cephtest/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/
- - /tmp/cephtest/hadoop/bin/hadoop jar /tmp/cephtest/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output
+ - {tdir}/hadoop/bin/hadoop fs -mkdir wordcount_input
+ - {tdir}/hadoop/bin/hadoop fs -put /tmp/hadoop_input/*txt wordcount_input/
+ - {tdir}/hadoop/bin/hadoop jar {tdir}/hadoop/build/hadoop-example*jar wordcount wordcount_input wordcount_output
- rm -rf /tmp/hadoop_input
- """
+ """.format(tdir=teuthology.get_testdir(ctx))
dist = 'precise'
format = 'jar'
arch = 'x86_64'
@contextlib.contextmanager
def base(ctx, config):
log.info('Creating base directory...')
+ test_basedir = teuthology.get_testdir_base(ctx)
+ testdir = teuthology.get_testdir(ctx)
+ # make base dir if it doesn't exist
run.wait(
ctx.cluster.run(
args=[
- 'mkdir', '-m0755', '--',
- '/tmp/cephtest',
+ 'mkdir', '-m0755', '-p', '--',
+ test_basedir,
],
- wait=False,
- )
+ wait=False,
+ )
)
+ # only create testdir if its not set to basedir
+ if test_basedir != testdir:
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'mkdir', '-m0755', '--',
+ testdir,
+ ],
+ wait=False,
+ )
+ )
try:
yield
args=[
'rmdir',
'--',
- '/tmp/cephtest',
+ testdir,
],
wait=False,
),
def check_conflict(ctx, config):
log.info('Checking for old test directory...')
+ test_basedir = teuthology.get_testdir_base(ctx)
processes = ctx.cluster.run(
args=[
- 'test', '!', '-e', '/tmp/cephtest',
+ 'test', '!', '-e', test_basedir,
+ ],
+ wait=False,
+ )
+ for proc in processes:
+ assert isinstance(proc.exitstatus, gevent.event.AsyncResult)
+ try:
+ proc.exitstatus.get()
+ except run.CommandFailedError:
+ # base dir exists
+ r = proc.remote.run(
+ args=[
+ 'ls', test_basedir, run.Raw('|'), 'wc', '-l'
+ ],
+ stdout=StringIO(),
+ )
+
+ if int(r.stdout.getvalue()) > 0:
+ log.error('WARNING: Host %s has stale test directories, these need to be investigated and cleaned up!',
+ proc.remote.shortname)
+
+ # testdir might be the same as base dir (if test_path is set)
+ # need to bail out in that case if the testdir exists
+ testdir = teuthology.get_testdir(ctx)
+ processes = ctx.cluster.run(
+ args=[
+ 'test', '!', '-e', testdir,
],
wait=False,
)
try:
proc.exitstatus.get()
except run.CommandFailedError:
- log.error('Host %s has stale cephtest directory, check your lock and reboot to clean up.', proc.remote.shortname)
+ log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
failed = True
if failed:
raise RuntimeError('Stale jobs detected, aborting.')
@contextlib.contextmanager
def archive(ctx, config):
log.info('Creating archive directory...')
+ testdir = teuthology.get_testdir(ctx)
+ archive_dir = '{tdir}/archive'.format(tdir=testdir)
run.wait(
ctx.cluster.run(
args=[
- 'install', '-d', '-m0755', '--',
- '/tmp/cephtest/archive',
+ 'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
os.mkdir(logdir)
for remote in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, remote.shortname)
- teuthology.pull_directory(remote, '/tmp/cephtest/archive', path)
+ teuthology.pull_directory(remote, archive_dir, path)
log.info('Removing archive directory...')
run.wait(
'rm',
'-rf',
'--',
- '/tmp/cephtest/archive',
+ archive_dir,
],
wait=False,
),
@contextlib.contextmanager
def coredump(ctx, config):
log.info('Enabling coredump saving...')
+ archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx))
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
- '/tmp/cephtest/archive/coredump',
+ '{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
- 'sudo', 'sysctl', '-w', 'kernel.core_pattern=/tmp/cephtest/archive/coredump/%t.%p.core',
+ 'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
'rmdir',
'--ignore-fail-on-non-empty',
'--',
- '/tmp/cephtest/archive/coredump',
+ '{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
for remote in ctx.cluster.remotes.iterkeys():
r = remote.run(
args=[
- 'if', 'test', '!', '-e', '/tmp/cephtest/archive/coredump', run.Raw(';'), 'then',
+ 'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
log.info('Starting syslog monitoring...')
+ archive_dir = '{tdir}/archive'.format(tdir=teuthology.get_testdir(ctx))
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
- '/tmp/cephtest/archive/syslog',
+ '{adir}/syslog'.format(adir=archive_dir),
],
wait=False,
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
conf_fp = StringIO("""
-kern.* -/tmp/cephtest/archive/syslog/kern.log;RSYSLOG_FileFormat
-*.*;kern.none -/tmp/cephtest/archive/syslog/misc.log;RSYSLOG_FileFormat
-""")
+kern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat
+*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat
+""".format(adir=archive_dir))
try:
for rem in ctx.cluster.remotes.iterkeys():
teuthology.sudo_write_file(
args=[
'egrep',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
- run.Raw('/tmp/cephtest/archive/syslog/*.log'),
+ run.Raw('{adir}/archive/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
ctx.cluster.run(
args=[
'find',
- '/tmp/cephtest/archive/syslog',
+ '{adir}/archive/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
clients = list(teuthology.get_clients(ctx=ctx, roles=config))
+ testdir = teuthology.get_testdir(ctx)
+
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format(
id=id_, remote=remote, mnt=mnt))
ips = [host for (host, port) in (remote_.ssh.get_transport().getpeername() for (remote_, roles) in remotes_and_roles)]
mons = teuthology.get_mons(roles, ips).values()
- secret = '/tmp/cephtest/data/client.{id}.secret'.format(id=id_)
- teuthology.write_secret_file(remote, 'client.{id}'.format(id=id_), secret)
+ secret = '{tdir}/data/client.{id}.secret'.format(tdir=testdir, id=id_)
+ teuthology.write_secret_file(ctx, remote, 'client.{id}'.format(id=id_), secret)
remote.run(
args=[
remote.run(
args=[
'sudo',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/sbin/mount.ceph',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/sbin/mount.ceph'.format(tdir=testdir),
'{mons}:/'.format(mons=','.join(mons)),
mnt,
'-v',
log.info('Unmounting kernel clients...')
for id_, remote in clients:
log.debug('Unmounting client client.{id}...'.format(id=id_))
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
remote.run(
args=[
'sudo',
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
clients = list(teuthology.get_clients(ctx=ctx, roles=config))
+ testdir = teuthology.get_testdir(ctx)
+
for id_, remote in clients:
# TODO: Don't have to run this more than once per node (remote)
log.info('Enable logging on client.{id} at {remote} ...'.format(
remote.run(
args=[
'sudo',
- '/tmp/cephtest/kcon_most',
+ '{tdir}/kcon_most'.format(tdir=testdir),
'on'
],
)
remote.run(
args=[
'sudo',
- '/tmp/cephtest/kcon_most',
+ '{tdir}/kcon_most'.format(tdir=testdir),
'off'
],
)
clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
client_config = config.get("client.%s" % id_)
if client_config is None:
client_config = {}
log.info('Unexporting nfs server...')
for id_, remote in clients:
log.debug('Unexporting client client.{id}...'.format(id=id_))
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
remote.run(
args=[
'sudo',
import os
from ..orchestra import run
+from teuthology import misc as teuthology
import time
import gevent
if badconfig:
raise KeyError("bad config {op_}".format(op_=op))
+ testdir = teuthology.get_testdir(ctx)
clients = set(clients)
files = set(files)
lock_procs = list()
(client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
log.info("got a client remote")
(_, _, client_id) = client.partition('.')
- filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"])
+ filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
args=[
- 'mkdir', '-p', '/tmp/cephtest/archive/lockfile',
+ 'mkdir', '-p', '{tdir}/archive/lockfile'.format(tdir=testdir),
run.Raw('&&'),
- 'mkdir', '-p', '/tmp/cephtest/lockfile',
+ 'mkdir', '-p', '{tdir}/lockfile'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'--no-check-certificate',
'https://raw.github.com/gregsfortytwo/FileLocker/master/sclockandhold.cpp',
- '-O', '/tmp/cephtest/lockfile/sclockandhold.cpp',
+ '-O', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
run.Raw('&&'),
- 'g++', '/tmp/cephtest/lockfile/sclockandhold.cpp',
- '-o', '/tmp/cephtest/lockfile/sclockandhold'
+ 'g++', '{tdir}/lockfile/sclockandhold.cpp'.format(tdir=testdir),
+ '-o', '{tdir}/lockfile/sclockandhold'.format(tdir=testdir)
],
logger=log.getChild('lockfile_client.{id}'.format(id=client_id)),
wait=False
(_, _, client_id) = client.partition('.')
file_procs = list()
for lockfile in files:
- filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), lockfile)
+ filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile)
proc = client_remote.run(
args=[
'sudo',
run.wait(file_procs)
file_procs = list()
for lockfile in files:
- filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), lockfile)
+ filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), lockfile)
proc = client_remote.run(
args=[
'sudo', 'chown', 'ubuntu.ubuntu', filepath
for client in clients:
(client_remote,) = ctx.cluster.only(client).remotes.iterkeys()
(_, _, client_id) = client.partition('.')
- filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"])
+ filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
proc = client_remote.run(
args=[
- 'rm', '-rf', '/tmp/cephtest/lockfile',
+ 'rm', '-rf', '{tdir}/lockfile'.format(tdir=testdir),
run.Raw(';'),
'sudo', 'rm', '-rf', filepath
],
result = None
(client_remote,) = ctx.cluster.only(op['client']).remotes.iterkeys()
(_, _, client_id) = op['client'].partition('.')
- filepath = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=client_id), op["lockfile"])
+ testdir = teuthology.get_testdir(ctx)
+ filepath = os.path.join(testdir, 'mnt.{id}'.format(id=client_id), op["lockfile"])
if "maxwait" in op:
timeout = gevent.Timeout(seconds=float(op["maxwait"]))
try:
proc = client_remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/daemon-helper',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
'kill',
- '/tmp/cephtest/lockfile/sclockandhold',
+ '{tdir}/lockfile/sclockandhold'.format(tdir=testdir),
filepath,
'{holdtime}'.format(holdtime=op["holdtime"]),
'{offset}'.format(offset=op.get("offset", '0')),
import logging
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
(client,) = ctx.cluster.only(config[1]).remotes
( _, _, host_id) = config[0].partition('.')
( _, _, client_id) = config[1].partition('.')
- hostmnt = '/tmp/cephtest/mnt.{id}'.format(id=host_id)
- clientmnt = '/tmp/cephtest/mnt.{id}'.format(id=client_id)
+ testdir = teuthology.get_testdir(ctx)
+ hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id)
+ clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id)
try:
for client_name in config:
args=[
# explicitly does not support multiple autotest tasks
# in a single run; the result archival would conflict
- 'mkdir', '/tmp/cephtest/archive/locktest',
+ 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir),
run.Raw('&&'),
- 'mkdir', '/tmp/cephtest/locktest',
+ 'mkdir', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
'wget',
'-nv',
'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c',
- '-O', '/tmp/cephtest/locktest/locktest.c',
+ '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
- 'g++', '/tmp/cephtest/locktest/locktest.c',
- '-o', '/tmp/cephtest/locktest/locktest'
+ 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
+ '-o', '{tdir}/locktest/locktest'.format(tdir=testdir)
],
logger=log.getChild('locktest_client.{id}'.format(id=client_name)),
)
log.info('starting on host')
hostproc = host.run(
args=[
- '/tmp/cephtest/locktest/locktest',
+ '{tdir}/locktest/locktest'.format(tdir=testdir),
'-p', '6788',
'-d',
'{mnt}/locktestfile'.format(mnt=hostmnt),
(_,_,hostaddr) = host.name.partition('@')
clientproc = client.run(
args=[
- '/tmp/cephtest/locktest/locktest',
+ '{tdir}/locktest/locktest'.format(tdir=testdir),
'-p', '6788',
'-d',
'-h', hostaddr,
log.info('cleaning up host dir')
host.run(
args=[
- 'mkdir', '-p', '/tmp/cephtest/locktest',
+ 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
- 'rm', '-f', '/tmp/cephtest/locktest/locktest.c',
+ 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
- 'rm', '-f', '/tmp/cephtest/locktest/locktest',
+ 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
run.Raw('&&'),
- 'rmdir', '/tmp/cephtest/locktest'
+ 'rmdir', '{tdir}/locktest'
],
logger=log.getChild('.{id}'.format(id=config[0])),
)
log.info('cleaning up client dir')
client.run(
args=[
- 'mkdir', '-p', '/tmp/cephtest/locktest',
+ 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir),
run.Raw('&&'),
- 'rm', '-f', '/tmp/cephtest/locktest/locktest.c',
+ 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir),
run.Raw('&&'),
- 'rm', '-f', '/tmp/cephtest/locktest/locktest',
+ 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir),
run.Raw('&&'),
- 'rmdir', '/tmp/cephtest/locktest'
+ 'rmdir', '{tdir}/locktest'.format(tdir=testdir)
],
logger=log.getChild('.{id}'.format(\
id=config[1])),
log = logging.getLogger(__name__)
-def rados(remote, cmd):
+def rados(ctx, remote, cmd):
+ testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
manager.mark_out_osd(2)
# kludge to make sure they get a map
- rados(mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
# create old objects
for f in range(1, 10):
- rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
- rados(mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(mon, ['-p', 'data', 'rm', 'existed_%d' % f])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'rm', 'existed_%d' % f])
# delay recovery, and make the pg log very long (to prevent backfill)
manager.raw_cluster_cmd(
manager.mark_down_osd(0)
for f in range(1, 10):
- rados(mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
- rados(mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
- rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'new_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existed_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
# bring osd.0 back up, let it peer, but don't replicate the new
# objects...
# verify result
for f in range(1, 10):
- err = rados(mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'new_%d' % f, '-'])
assert err
- err = rados(mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existed_%d' % f, '-'])
assert err
- err = rados(mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
+ err = rados(ctx, mon, ['-p', 'data', 'get', 'existing_%d' % f, '-'])
assert not err
# see if osd.1 can cope
time: 360
"""
+ testdir = teuthology.get_testdir(ctx)
+
log.info('creating {n} pools'.format(n=config))
poolnum = int(config)
log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
proc = remote.run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role_),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_),
'--name', role_,
'mkpool', 'pool{num}'.format(num=poolnum), '-1',
run.Raw('&&'),
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role_),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_),
'--name', role_,
'--pool', 'pool{num}'.format(num=poolnum),
'bench', '0', 'write', '-t', '16', '--block-size', '1'
- ceph-fuse:
- pexec:
clients:
- - ln -s /tmp/cephtest/mnt.* /tmp/cephtest/gmnt
+ - ln -s {testdir}/mnt.* {testdir}/gmnt
- ssh_keys:
- mpi:
exec: fsx-mpi
- workdir: /tmp/cephtest/gmnt
+ workdir: {testdir}/gmnt
- pexec:
clients:
- - rm -f /tmp/cephtest/gmnt
+ - rm -f {testdir}/gmnt
"""
assert isinstance(config, dict), 'task mpi got invalid config'
log.info('mpi rank 0 is: {name}'.format(name=master_remote.name))
+ testdir = teuthology.get_testdir(ctx)
+
# write out the mpi hosts file
log.info('mpi nodes: [%s]' % (', '.join(hosts)))
- teuthology.write_file(remote=master_remote, path='/tmp/cephtest/mpi-hosts', data='\n'.join(hosts))
+ teuthology.write_file(remote=master_remote,
+ path='{tdir}/mpi-hosts'.format(tdir=testdir),
+ data='\n'.join(hosts))
log.info('mpiexec on {name}: {cmd}'.format(name=master_remote.name, cmd=mpiexec))
- args=['mpiexec', '-f', '/tmp/cephtest/mpi-hosts']
+ args=['mpiexec', '-f', '{tdir}/mpi-hosts'.format(tdir=testdir)]
args.extend(workdir)
args.extend(mpiexec.split(' '))
master_remote.run(args=args, )
log.info('mpi task completed')
- master_remote.run(args=['rm', '/tmp/cephtest/mpi-hosts'])
+ master_remote.run(args=['rm', '{tdir}/mpi-hosts'.format(tdir=testdir)])
clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+ testdir = teuthology.get_testdir(ctx)
for id_, remote in clients:
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
client_config = config.get("client.%s" % id_)
if client_config is None:
client_config = {}
server = client_config.get('server');
svr_id = server[len('client.'):]
- svr_mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=svr_id))
+ svr_mnt = os.path.join(testdir, 'mnt.{id}'.format(id=svr_id))
svr_remote = None
all_config = ['client.{id}'.format(id=tmpid)
log.info('Unmounting nfs clients...')
for id_, remote in clients:
log.debug('Unmounting nfs client client.{id}...'.format(id=id_))
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
remote.run(
args=[
'sudo',
log = logging.getLogger(__name__)
-def rados(remote, cmd):
+def rados(testdir, remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
+ testdir = teuthology.get_testdir(ctx)
+
# kludge to make sure they get a map
- rados(mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+ rados(testdir, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
# create old objects
for f in range(1, 10):
- rados(mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(testdir, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
manager.mark_out_osd(3)
manager.wait_till_active()
import logging
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
assert isinstance(config, dict), \
"please list clients to run on"
omapbench = {}
+ testdir = teuthology.get_testdir(ctx)
print(str(config.get('increment',-1)))
for role in config.get('clients', ['client.0']):
assert isinstance(role, basestring)
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/omapbench',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role),
+ " ".join(['CEPH_CONF={tdir}/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
+ '{tdir}/enable-coredump',
+ '{tdir}/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/archive/coverage',
+ '{tdir}/binary/usr/local/bin/omapbench',
+ '-k', '{tdir}/data/{role}.keyring'.format(role=role),
'--name', role[len(PREFIX):],
'-t', str(config.get('threads', 30)),
'-o', str(config.get('objects', 1000)),
'--valsize', str(config.get('valsize',1000)),
'--inc', str(config.get('increment',10)),
'--omaptype', str(config.get('omaptype','uniform'))
- ]),
+ ]).format(tdir=testdir),
],
logger=log.getChild('omapbench.{id}'.format(id=id_)),
stdin=run.PIPE,
log = logging.getLogger(__name__)
-def rados_start(remote, cmd):
+def rados_start(ctx, remote, cmd):
log.info("rados %s" % ' '.join(cmd))
+ testdir = teuthology.get_testdir(ctx)
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
manager.wait_for_clean()
# write some data
- p = rados_start(mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
+ p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
'--no-cleanup'])
err = p.exitstatus.get();
log.info('err is %d' % err)
manager.wait_for_recovery()
# write some new data
- p = rados_start(mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096',
+ p = rados_start(ctx, mon, ['-p', 'data', 'bench', '30', 'write', '-b', '4096',
'--no-cleanup'])
time.sleep(15)
log = logging.getLogger(__name__)
-def rados_start(remote, cmd):
+def rados_start(testdir, remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
config = {}
assert isinstance(config, dict), \
'task only accepts a dict for configuration'
+ testdir = teuthology.get_testdir(ctx)
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager.raw_cluster_cmd('osd', 'unset', 'nodown')
# write some new data
- p = rados_start(mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096',
+ p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '60', 'write', '-b', '4096',
'--no-cleanup'])
time.sleep(15)
manager.wait_for_active_or_down()
# write some more (make sure osd.2 really is divergent)
- p = rados_start(mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
+ p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
p.exitstatus.get();
# revive divergent osd
"""
Test handling of incomplete pgs. Requires 4 osds.
"""
+ testdir = teuthology.get_testdir(ctx)
if config is None:
config = {}
assert isinstance(config, dict), \
# few objects in metadata pool (with pg log, normal recovery)
for f in range(1, 20):
- p = rados_start(mon, ['-p', 'metadata', 'put',
+ p = rados_start(testdir, mon, ['-p', 'metadata', 'put',
'foo.%d' % f, '/etc/passwd'])
p.exitstatus.get()
log = logging.getLogger(__name__)
-def rados(remote, cmd):
+def rados(ctx, remote, cmd):
+ testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
];
pre.extend(cmd)
proc = remote.run(
manager.mark_down_osd(2)
# kludge to make sure they get a map
- rados(mon, ['-p', 'data', 'get', 'dummy', '-'])
+ rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-'])
manager.raw_cluster_cmd('tell', 'osd.0', 'flush_pg_stats')
manager.raw_cluster_cmd('tell', 'osd.1', 'flush_pg_stats')
tasks:
- pexec:
all:
- - grep FAIL /tmp/cephtest/archive/log/*
+ - grep FAIL {testdir}/archive/log/*
Or if you want to run in parallel on all clients:
tasks:
- pexec:
clients:
- - dd if=/dev/zero of=/tmp/cephtest/mnt.* count=1024 bs=1024
+ - dd if=/dev/zero of={testdir}/mnt.* count=1024 bs=1024
You can also ensure that parallel commands are synchronized with the
special 'barrier' statement:
tasks:
- pexec:
clients:
- - cd /tmp/cephtest/mnt.*
+ - cd {testdir}/mnt.*
- while true; do
- barrier
- dd if=/dev/zero of=./foo count=1024 bs=1024
@contextlib.contextmanager
def create_dirs(ctx, config):
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
assert 'test' in client_config, 'You must specify a test to run'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'install', '-d', '-m0755', '--',
- '/tmp/cephtest/qemu',
- '/tmp/cephtest/archive/qemu',
+ '{tdir}/qemu'.format(tdir=testdir),
+ '{tdir}/archive/qemu'.format(tdir=testdir),
]
)
try:
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
- 'rmdir', '/tmp/cephtest/qemu', run.Raw('||'), 'true',
+ 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true',
]
)
@contextlib.contextmanager
def generate_iso(ctx, config):
log.info('generating iso...')
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
assert 'test' in client_config, 'You must specify a test to run'
src_dir = os.path.dirname(__file__)
- userdata_path = os.path.join('/tmp/cephtest/qemu', 'userdata.' + client)
- metadata_path = os.path.join('/tmp/cephtest/qemu', 'metadata.' + client)
+ userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
+ metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
test_setup = ''.join(f.readlines())
with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
teuthology.write_file(remote, metadata_path, f)
- test_file = '/tmp/cephtest/qemu/{client}.test.sh'.format(client=client)
+ test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
remote.run(
args=[
'wget', '-nv', '-O', test_file,
args=[
'genisoimage', '-quiet', '-input-charset', 'utf-8',
'-volid', 'cidata', '-joliet', '-rock',
- '-o', '/tmp/cephtest/qemu/{client}.iso'.format(client=client),
+ '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
'-graft-points',
'user-data={userdata}'.format(userdata=userdata_path),
'meta-data={metadata}'.format(metadata=metadata_path),
remote.run(
args=[
'rm', '-f',
- '/tmp/cephtest/qemu/{client}.iso'.format(client=client),
- os.path.join('/tmp/cephtest/qemu', 'userdata.' + client),
- os.path.join('/tmp/cephtest/qemu', 'metadata.' + client),
- '/tmp/cephtest/qemu/{client}.test.sh'.format(client=client),
+ '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
+ os.path.join(testdir, 'qemu', 'userdata.' + client),
+ os.path.join(testdir, 'qemu', 'metadata.' + client),
+ '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client),
],
)
@contextlib.contextmanager
def download_image(ctx, config):
log.info('downloading base image')
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
(remote,) = ctx.cluster.only(client).remotes.keys()
- base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format(client=client)
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
remote.run(
args=[
'wget', '-nv', '-O', base_file, DEFAULT_IMAGE_URL,
finally:
log.debug('cleaning up base image files')
for client in config.iterkeys():
- base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format(
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(
+ tdir=testdir,
client=client,
)
(remote,) = ctx.cluster.only(client).remotes.keys()
@contextlib.contextmanager
def run_qemu(ctx, config):
procs = []
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
(remote,) = ctx.cluster.only(client).remotes.keys()
- log_dir = '/tmp/cephtest/archive/qemu/{client}'.format(client=client)
+ log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client)
remote.run(
args=[
'mkdir', log_dir,
]
)
- base_file = '/tmp/cephtest/qemu/base.{client}.qcow2'.format(client=client)
+ base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
args=[
- run.Raw('LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib'),
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/daemon-helper',
+ run.Raw('LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir)),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
'term',
'kvm', '-enable-kvm', '-nographic',
'-m', str(client_config.get('memory', DEFAULT_MEM)),
'-drive',
'file={base},format=qcow2,if=virtio'.format(base=base_file),
# cd holding metadata for cloud-init
- '-cdrom', '/tmp/cephtest/qemu/{client}.iso'.format(client=client),
+ '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client),
# virtio 9p fs for logging
'-fsdev',
'local,id=log,path={log},security_model=none'.format(log=log_dir),
args.extend([
'-drive',
'file=rbd:rbd/{img}:conf={conf}:id={id},format=rbd,if=virtio,cache={cachemode}'.format(
- conf='/tmp/cephtest/ceph.conf',
+ conf='{tdir}/ceph.conf'.format(tdir=testdir),
img='{client}.{num}'.format(client=client, num=i),
id=client[len('client.'):],
cachemode=cachemode,
remote.run(
args=[
'test', '-f',
- '/tmp/cephtest/archive/qemu/{client}/success'.format(
+ '{tdir}/archive/qemu/{client}/success'.format(
+ tdir=testdir,
client=client
),
],
import logging
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
object_size = int(config.get('object_size', 4000000))
op_weights = config.get('op_weights', {})
+ testdir = teuthology.get_testdir(ctx)
args = [
- 'CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/testrados',
+ 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/testrados'.format(tdir=testdir),
'--op', 'read', str(op_weights.get('read', 100)),
'--op', 'write', str(op_weights.get('write', 100)),
'--op', 'delete', str(op_weights.get('delete', 10)),
import logging
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
"please list clients to run on"
radosbench = {}
+ testdir = teuthology.get_testdir(ctx)
+
for role in config.get('clients', ['client.0']):
assert isinstance(role, basestring)
PREFIX = 'client.'
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role),
+ " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
+ '{tdir}/enable-coredump',
+ '{tdir}/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/archive/coverage',
+ '{tdir}/binary/usr/local/bin/rados',
+ '-c', '{tdir}/ceph.conf',
+ '-k', '{tdir}/data/{role}.keyring'.format(role=role),
'--name', role,
'mkpool', str(config.get('pool', 'data'))
- ]),
+ ]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
stdin=run.PIPE,
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role),
+ " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
+ '{tdir}/enable-coredump',
+ '{tdir}/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/archive/coverage',
+ '{tdir}/binary/usr/local/bin/rados',
+ '-c', '{tdir}/ceph.conf',
+ '-k', '{tdir}/data/{role}.keyring'.format(role=role),
'--name', role,
'-p' , str(config.get('pool', 'data')),
'bench', str(config.get('time', 360)), 'write',
- ]),
+ ]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
stdin=run.PIPE,
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rados',
- '-c', '/tmp/cephtest/ceph.conf',
- '-k', '/tmp/cephtest/data/{role}.keyring'.format(role=role),
+ " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
+ '{tdir}/enable-coredump',
+ '{tdir}/binary/usr/local/bin/ceph-coverage',
+ '{tdir}/archive/coverage',
+ '{tdir}/binary/usr/local/bin/rados',
+ '-c', '{tdir}/ceph.conf',
+ '-k', '{tdir}/data/{role}.keyring'.format(role=role),
'--name', role,
'rmpool', str(config.get('pool', 'data'))
- ]),
+ ]).format(tdir=testdir),
],
logger=log.getChild('radosbench.{id}'.format(id=id_)),
stdin=run.PIPE,
def rgwadmin(ctx, client, cmd):
log.info('radosgw-admin: %s' % cmd)
+ testdir = teuthology.get_testdir(ctx)
pre = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/radosgw-admin',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--log-to-stderr',
'--format', 'json',
]
else:
images = [(role, None) for role in config]
+ testdir = teuthology.get_testdir(ctx)
for role, properties in images:
if properties is None:
properties = {}
log.info('Creating image {name} with size {size}'.format(name=name,
size=size))
args = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rbd',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'-p', 'rbd',
'create',
'--size', str(size),
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rbd',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'-p', 'rbd',
'rm',
name,
role_images = [(role, None) for role in config]
log.info('Creating rbd block devices...')
+
+ testdir = teuthology.get_testdir(ctx)
+
for role, image in role_images:
if image is None:
image = default_image_name(role)
remote.run(
args=[
'echo',
- 'KERNEL=="rbd[0-9]*", PROGRAM="/tmp/cephtest/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"',
+ 'KERNEL=="rbd[0-9]*", PROGRAM="{tdir}/binary/usr/local/bin/ceph-rbdnamer %n", SYMLINK+="rbd/%c{1}/%c{2}"'.format(tdir=testdir),
run.Raw('>'),
- '/tmp/cephtest/51-rbd.rules',
+ '{tdir}/51-rbd.rules'.format(tdir=testdir),
],
)
remote.run(
args=[
'sudo',
'mv',
- '/tmp/cephtest/51-rbd.rules',
+ '{tdir}/51-rbd.rules'.format(tdir=testdir),
'/etc/udev/rules.d/',
],
)
- secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role)
- teuthology.write_secret_file(remote, role, secretfile)
+ secretfile = '{tdir}/data/{role}.secret'.format(tdir=testdir, role=role)
+ teuthology.write_secret_file(ctx, remote, role, secretfile)
remote.run(
args=[
'sudo',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rbd',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--user', role.rsplit('.')[-1],
'--secret', secretfile,
'-p', 'rbd',
remote.run(
args=[
'sudo',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/rbd',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'-p', 'rbd',
'unmap',
'/dev/rbd/rbd/{imgname}'.format(imgname=image),
id_ = role[len(PREFIX):]
return id_
- mnt_template = '/tmp/cephtest/mnt.{id}'
+ testdir = teuthology.get_testdir(ctx)
+
+ mnt_template = '{tdir}/mnt.{id}'
for role, image in role_images:
if image is None:
image = default_image_name(role)
(remote,) = ctx.cluster.only(role).remotes.keys()
id_ = strip_client_prefix(role)
- mnt = mnt_template.format(id=id_)
+ mnt = mnt_template.format(tdir=testdir, id=id_)
remote.run(
args=[
'mkdir',
yield
def run_xfstests_one_client(ctx, role, properties):
+ testdir = teuthology.get_testdir(ctx)
try:
count = properties.get('count')
test_dev = properties.get('test_dev')
(remote,) = ctx.cluster.only(role).remotes.keys()
# Fetch the test script
- test_root = '/tmp/cephtest'
+ test_root = teuthology.get_testdir(ctx)
test_script = 'run_xfstests.sh'
test_path = os.path.join(test_root, test_script)
# readlink -f <path> in order to get their canonical
# pathname (so it matches what the kernel remembers).
args = [
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
'/usr/bin/sudo',
'/bin/bash',
test_path,
import logging
from teuthology.parallel import parallel
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
yield
def _run_one_client(ctx, config, role):
+ testdir = teuthology.get_testdir(ctx)
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
remote.run(
args=[
- 'CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/test_librbd_fsx',
+ 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/test_librbd_fsx'.format(tdir=testdir),
'-d',
'-W', '-R', # mmap doesn't work with rbd
'-p', str(config.get('progress_interval', 100)), # show progress
- '-P', '/tmp/cephtest/archive',
+ '-P', '{tdir}/archive'.format(tdir=testdir),
'-t', str(config.get('truncbdy',1)),
'-l', str(config.get('size', 1073741824)),
'-S', str(config.get('seed', 0)),
osd = str(random.choice(self.osds))
(osd_remote,) = self.ceph_manager.ctx.cluster.only('osd.%s' % osd).remotes.iterkeys()
+ testdir = teuthology.get_testdir(self.ceph_manager.ctx)
+
# create the objects
osd_remote.run(
args=[
- 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/smalliobench',
+ 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
'--use-prefix', 'recovery_bench',
'--init-only', '1',
'--num-objects', str(num_objects),
log.info('non-recovery (baseline)')
p = osd_remote.run(
args=[
- 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/smalliobench',
+ 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
'--use-prefix', 'recovery_bench',
'--do-not-init', '1',
'--duration', str(duration),
log.info('recovery active')
p = osd_remote.run(
args=[
- 'env', 'CEPH_CONF=/tmp/cephtest/ceph.conf',
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/smalliobench',
+ 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
'--use-prefix', 'recovery_bench',
'--do-not-init', '1',
'--duration', str(duration),
@contextlib.contextmanager
def run_tests(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
client_config['extra_args'] = [
's3tests.functional.test_s3:test_bucket_list_return_data',
]
# args = [
-# 'S3TEST_CONF=/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client),
-# '/tmp/cephtest/s3-tests/virtualenv/bin/nosetests',
+# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
# '-w',
-# '/tmp/cephtest/s3-tests',
+# '{tdir}/s3-tests'.format(tdir=testdir),
# '-v',
# 's3tests.functional.test_s3:test_bucket_list_return_data',
# ]
args = [
'netcat',
'-w', '5',
- '-U', '/tmp/cephtest/rgw.opslog.sock',
+ '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
],
stdout = netcat_out,
)
@contextlib.contextmanager
def create_dirs(ctx, config):
log.info('Creating apache directories...')
+ testdir = teuthology.get_testdir(ctx)
for client in config.iterkeys():
ctx.cluster.only(client).run(
args=[
'mkdir',
'-p',
- '/tmp/cephtest/apache/htdocs',
- '/tmp/cephtest/apache/tmp',
+ '{tdir}/apache/htdocs'.format(tdir=testdir),
+ '{tdir}/apache/tmp'.format(tdir=testdir),
run.Raw('&&'),
'mkdir',
- '/tmp/cephtest/archive/apache',
+ '{tdir}/archive/apache'.format(tdir=testdir),
],
)
try:
args=[
'rm',
'-rf',
- '/tmp/cephtest/apache/tmp',
+ '{tdir}/apache/tmp'.format(tdir=testdir),
run.Raw('&&'),
'rmdir',
- '/tmp/cephtest/apache/htdocs',
+ '{tdir}/apache/htdocs'.format(tdir=testdir),
run.Raw('&&'),
'rmdir',
- '/tmp/cephtest/apache',
+ '{tdir}/apache'.format(tdir=testdir),
],
)
@contextlib.contextmanager
def ship_config(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
log.info('Shipping apache config and rgw.fcgi...')
- src = os.path.join(os.path.dirname(__file__), 'apache.conf')
+ src = os.path.join(os.path.dirname(__file__), 'apache.conf.template')
for client in config.iterkeys():
(remote,) = ctx.cluster.only(client).remotes.keys()
with file(src, 'rb') as f:
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/apache/apache.conf',
- data=f,
+ path='{tdir}/apache/apache.conf'.format(tdir=testdir),
+ data=f.format(testdir=testdir),
)
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/apache/htdocs/rgw.fcgi',
+ path='{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir),
data="""#!/bin/sh
ulimit -c unlimited
-export LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib
-exec /tmp/cephtest/binary/usr/local/bin/radosgw -f -c /tmp/cephtest/ceph.conf
-"""
+export LD_LIBRARY_PATH={tdir}/binary/usr/local/lib
+exec {tdir}/binary/usr/local/bin/radosgw -f -c {tdir}/ceph.conf
+""".format(tdir=testdir)
)
remote.run(
args=[
'chmod',
'a=rx',
- '/tmp/cephtest/apache/htdocs/rgw.fcgi',
+ '{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir),
],
)
try:
args=[
'rm',
'-f',
- '/tmp/cephtest/apache/apache.conf',
+ '{tdir}/apache/apache.conf'.format(tdir=testdir),
run.Raw('&&'),
'rm',
'-f',
- '/tmp/cephtest/apache/htdocs/rgw.fcgi',
+ '{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir),
],
)
@contextlib.contextmanager
def start_rgw(ctx, config):
log.info('Starting rgw...')
+ testdir = teuthology.get_testdir(ctx)
rgws = {}
for client in config.iterkeys():
(remote,) = ctx.cluster.only(client).remotes.iterkeys()
log.info("rgw %s config is %s", client, client_config)
run_cmd=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/daemon-helper',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
'term',
]
run_cmd_tail=[
- '/tmp/cephtest/binary/usr/local/bin/radosgw',
- '-c', '/tmp/cephtest/ceph.conf',
- '--log-file', '/tmp/cephtest/archive/log/rgw.log',
- '--rgw_ops_log_socket_path', '/tmp/cephtest/rgw.opslog.sock',
- '/tmp/cephtest/apache/apache.conf',
+ '{tdir}/binary/usr/local/bin/radosgw'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ '--log-file', '{tdir}/archive/log/rgw.log'.format(tdir=testdir),
+ '--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
+ '{tdir}/apache/apache.conf'.format(tdir=testdir),
'--foreground',
run.Raw('>'),
- '/tmp/cephtest/archive/log/rgw.stdout',
+ '{tdir}/archive/log/rgw.stdout'.format(tdir=testdir),
run.Raw('2>&1'),
]
args=[
'rm',
'-rf',
- '/tmp/cephtest/rgw.opslog.sock',
+ '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
],
)
@contextlib.contextmanager
def start_apache(ctx, config):
log.info('Starting apache...')
+ testdir = teuthology.get_testdir(ctx)
apaches = {}
for client in config.iterkeys():
(remote,) = ctx.cluster.only(client).remotes.keys()
proc = remote.run(
args=[
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/daemon-helper',
- 'kill',
- 'apache2',
- '-X',
- '-f',
- '/tmp/cephtest/apache/apache.conf',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
+ 'kill'.format(tdir=testdir),
+ 'apache2'.format(tdir=testdir),
+ '-X'.format(tdir=testdir),
+ '-f'.format(tdir=testdir),
+ '{tdir}/apache/apache.conf'.format(tdir=testdir),
],
logger=log.getChild(client),
stdin=run.PIPE,
'git', 'clone',
# 'https://github.com/ceph/s3-tests.git',
'git://ceph.com/git/s3-tests.git',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
],
)
try:
args=[
'rm',
'-rf',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
],
)
def create_users(ctx, config):
assert isinstance(config, dict)
log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('readwrite', {})
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/radosgw-admin',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
remote.run(
args=[
'cd',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)),
run.Raw('&&'),
'./bootstrap',
],
yaml.safe_dump(conf, conf_fp, default_flow_style=False)
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/archive/s3readwrite.{client}.config.yaml'.format(client=client),
+ path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client),
data=conf_fp.getvalue(),
)
yield
@contextlib.contextmanager
def run_tests(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
(remote,) = ctx.cluster.only(client).remotes.keys()
- conf = teuthology.get_file(remote, '/tmp/cephtest/archive/s3readwrite.{client}.config.yaml'.format(client=client))
+ conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
- '/tmp/cephtest/s3-tests/virtualenv/bin/s3tests-test-readwrite',
+ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir),
]
if client_config is not None and 'extra_args' in client_config:
args.extend(client_config['extra_args'])
def download(ctx, config):
assert isinstance(config, list)
log.info('Downloading s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
for client in config:
ctx.cluster.only(client).run(
args=[
'git', 'clone',
# 'https://github.com/ceph/s3-tests.git',
'git://ceph.com/git/s3-tests.git',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
],
)
try:
args=[
'rm',
'-rf',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
],
)
def create_users(ctx, config):
assert isinstance(config, dict)
log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('roundtrip', {})
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/radosgw-admin',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
def configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring s3-roundtrip-tests...')
+ testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].iteritems():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
remote.run(
args=[
'cd',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
yaml.safe_dump(conf, conf_fp, default_flow_style=False)
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/archive/s3roundtrip.{client}.config.yaml'.format(client=client),
+ path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
yield
@contextlib.contextmanager
def run_tests(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
(remote,) = ctx.cluster.only(client).remotes.keys()
- conf = teuthology.get_file(remote, '/tmp/cephtest/archive/s3roundtrip.{client}.config.yaml'.format(client=client))
+ conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client))
args = [
- '/tmp/cephtest/s3-tests/virtualenv/bin/s3tests-test-roundtrip',
+ '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir),
]
if client_config is not None and 'extra_args' in client_config:
args.extend(client_config['extra_args'])
def do_download(ctx, config):
assert isinstance(config, dict)
log.info('Downloading s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
for (client, cconf) in config.items():
branch = cconf.get('branch', 'master')
sha1 = cconf.get('sha1')
'-b', branch,
# 'https://github.com/ceph/s3-tests.git',
'git://ceph.com/git/s3-tests.git',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
],
)
if sha1 is not None:
ctx.cluster.only(client).run(
args=[
- 'cd', '/tmp/cephtest/s3-tests',
+ 'cd', '{tdir}/s3-tests'.format(tdir=testdir),
run.Raw('&&'),
'git', 'reset', '--hard', sha1,
],
args=[
'rm',
'-rf',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
],
)
def do_create_users(ctx, config):
assert isinstance(config, dict)
log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
for client in config['clients']:
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('fixtures', {})
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/radosgw-admin',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
def do_configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring s3-tests...')
+ testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].iteritems():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
remote.run(
args=[
'cd',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
s3tests_conf.write(conf_fp)
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client),
+ path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
yield
def do_run_tests(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
args = [
- 'S3TEST_CONF=/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client),
- '/tmp/cephtest/s3-tests/virtualenv/bin/nosetests',
+ 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+ '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
'-w',
- '/tmp/cephtest/s3-tests',
+ '{tdir}/s3-tests'.format(tdir=testdir),
'-v',
'-a', '!fails_on_rgw',
]
log.info('messing with PG %s on osd %d' % (victim, osd))
(osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.iterkeys()
- data_path = os.path.join('/tmp/cephtest/data',
+ data_path = os.path.join('{tdir}/data'.format(tdir=teuthology.get_testdir(ctx)),
'osd.{id}.data'.format(id=osd),
'current',
'{pg}_head'.format(pg=victim)
@contextlib.contextmanager
def download(ctx, config):
+ testdir = teuthology.get_testdir(ctx)
assert isinstance(config, list)
log.info('Downloading swift...')
for client in config:
args=[
'git', 'clone',
'git://ceph.com/git/swift.git',
- '/tmp/cephtest/swift',
+ '{tdir}/swift'.format(tdir=testdir),
],
)
try:
args=[
'rm',
'-rf',
- '/tmp/cephtest/swift',
+ '{tdir}/swift'.format(tdir=testdir),
],
)
def create_users(ctx, config):
assert isinstance(config, dict)
log.info('Creating rgw users...')
+ testdir = teuthology.get_testdir(ctx)
for client in config['clients']:
testswift_conf = config['testswift_conf'][client]
for user, suffix in [('foo', ''), ('bar', '2')]:
_config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix)
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH=/tmp/cephtest/binary/usr/local/lib',
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
- '/tmp/cephtest/binary/usr/local/bin/radosgw-admin',
- '-c', '/tmp/cephtest/ceph.conf',
+ 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
+ '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'user', 'create',
'--subuser', '{account}:{user}'.format(account=testswift_conf['func_test']['account{s}'.format(s=suffix)],user=user),
'--display-name', testswift_conf['func_test']['display_name{s}'.format(s=suffix)],
def configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring testswift...')
+ testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].iteritems():
print 'client={c}'.format(c=client)
print 'config={c}'.format(c=config)
remote.run(
args=[
'cd',
- '/tmp/cephtest/swift',
+ '{tdir}/swift'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
testswift_conf.write(conf_fp)
teuthology.write_file(
remote=remote,
- path='/tmp/cephtest/archive/testswift.{client}.conf'.format(client=client),
+ path='{tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
yield
@contextlib.contextmanager
def run_tests(ctx, config):
assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
for client, client_config in config.iteritems():
args = [
- 'SWIFT_TEST_CONFIG_FILE=/tmp/cephtest/archive/testswift.{client}.conf'.format(client=client),
- '/tmp/cephtest/swift/virtualenv/bin/nosetests',
+ 'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client),
+ '{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir),
'-w',
- '/tmp/cephtest/swift/test/functional',
+ '{tdir}/swift/test/functional'.format(tdir=testdir),
'-v',
'-a', '!fails_on_rgw',
]
import proc_thrasher
from ..orchestra import run
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
testwatch = {}
remotes = []
+
+ testdir = teuthology.get_testdir(ctx)
+
for role in config.get('clients', ['client.0']):
assert isinstance(role, basestring)
PREFIX = 'client.'
remotes.append(remote)
args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
- 'CEPH_CONF=/tmp/cephtest/ceph.conf',
+ 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')),
- 'LD_PRELOAD=/tmp/cephtest/binary/usr/local/lib/librados.so.2',
- '/tmp/cephtest/daemon-helper', 'kill',
- '/tmp/cephtest/binary/usr/local/bin/multi_stress_watch foo foo'
+ 'LD_PRELOAD={tdir}/binary/usr/local/lib/librados.so.2'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
+ '{tdir}/binary/usr/local/bin/multi_stress_watch foo foo'.format(tdir=testdir)
]
log.info("args are %s" % (args,))
log.debug("getting remote for {id} role {role_}".format(id=id_, role_=role))
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
dir_owner = remote.shortname.split('@', 1)[0]
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
# if neither kclient nor ceph-fuse are required for a workunit,
# mnt may not exist. Stat and create the directory if it doesn't.
try:
p.spawn(_run_tests, ctx, refspec, role, [unit], env, subdir)
def _run_tests(ctx, refspec, role, tests, env, subdir=None):
+ testdir = teuthology.get_testdir(ctx)
assert isinstance(role, basestring)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
- mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_))
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
# subdir so we can remove and recreate this a lot without sudo
if subdir is None:
scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp')
else:
scratch_tmp = os.path.join(mnt, subdir)
- srcdir = '/tmp/cephtest/workunit.{role}'.format(role=role)
- secretfile = '/tmp/cephtest/data/{role}.secret'.format(role=role)
- teuthology.write_secret_file(remote, role, secretfile)
+ srcdir = '{tdir}/workunit.{role}'.format(tdir=testdir, role=role)
+ secretfile = '{tdir}/data/{role}.secret'.format(tdir=testdir, role=role)
+ teuthology.write_secret_file(ctx, remote, role, secretfile)
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi',
run.Raw('&&'),
'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir),
- run.Raw('>/tmp/cephtest/workunits.list'),
+ run.Raw('>{tdir}/workunits.list'.format(tdir=testdir)),
],
)
- workunits = sorted(teuthology.get_file(remote, '/tmp/cephtest/workunits.list').split('\0'))
+ workunits = sorted(teuthology.get_file(
+ remote,
+ '{tdir}/workunits.list'.format(tdir=testdir)).split('\0'))
assert workunits
try:
'cd', '--', scratch_tmp,
run.Raw('&&'),
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
- run.Raw('PATH="$PATH:/tmp/cephtest/binary/usr/local/bin"'),
- run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib"'),
- run.Raw('CEPH_JAVA_PATH="/tmp/cephtest/binary/usr/local/share/java"'),
- run.Raw('CEPH_CONF="/tmp/cephtest/ceph.conf"'),
+ run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)),
+ run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)),
+ run.Raw('CEPH_JAVA_PATH="{tdir}/binary/usr/local/share/java"'.format(tdir=testdir)),
+ run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)),
run.Raw('CEPH_SECRET_FILE="{file}"'.format(file=secretfile)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
- run.Raw('PYTHONPATH="$PYTHONPATH:/tmp/cephtest/binary/usr/local/lib/python2.7/dist-packages:/tmp/cephtest/binary/usr/local/lib/python2.6/dist-packages"'),
+ run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)),
]
if env is not None:
for var, val in env.iteritems():
env_arg = '{var}={val}'.format(var=var, val=quoted_val)
args.append(run.Raw(env_arg))
args.extend([
- '/tmp/cephtest/enable-coredump',
- '/tmp/cephtest/binary/usr/local/bin/ceph-coverage',
- '/tmp/cephtest/archive/coverage',
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ '{tdir}/archive/coverage'.format(tdir=testdir),
'{srcdir}/{workunit}'.format(
srcdir=srcdir,
workunit=workunit,
remote.run(
logger=log.getChild(role),
args=[
- 'rm', '-rf', '--', '/tmp/cephtest/workunits.list', srcdir,
+ 'rm', '-rf', '--', '{tdir}/workunits.list'.format(tdir=testdir), srcdir,
],
)