[global]
- keyring = {testdir}/ceph.keyring
log file = {testdir}/archive/log/$name.log
chdir = ""
pid file = $name.pid
osd journal = {testdir}/data/osd.$id.journal
osd journal size = 100
keyring = {testdir}/data/osd.$id.keyring
- osd class dir = {testdir}/binary/usr/local/lib/rados-classes
+ osd class dir = /usr/lib/rados-classes
osd scrub load threshold = 5.0
osd scrub max interval = 600
mds debug frag = true
[client]
- keyring = {testdir}/data/client.$id.keyring
rgw socket path = {testdir}/apache/tmp/fastcgi_sock/rgw_sock
rgw cache enabled = true
rgw enable ops log = true
rgw enable usage log = true
-
-[client.admin]
- keyring = {testdir}/ceph.keyring
testdir = get_testdir(ctx)
args = [
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/monmaptool'.format(tdir=testdir),
+ 'monmaptool',
'--create',
'--clobber',
]
stdin=data,
)
-def sudo_write_file(remote, path, data):
+def sudo_write_file(remote, path, data, perms=None):
+ permargs = []
+ if perms:
+ permargs=[run.Raw('&&'), 'sudo', 'chmod', perms, path]
remote.run(
args=[
'sudo',
'-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
path,
- ],
+ ] + permargs,
stdin=data,
)
-def get_file(remote, path):
+def get_file(remote, path, sudo=False):
"""
Read a file from remote host into memory.
"""
r = remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'health',
'--concise',
],
r = remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'--concise',
'osd', 'dump', '--format=json'
],
remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--name={role}'.format(role=role),
'--print-key',
'{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role),
log.info('Waiting for %s to restart syslog...', name)
proc.exitstatus.get()
+def remove_installed_packages(ctx, log):
+ from teuthology.task import ceph as ceph_task
+
+ debs = ['ceph', 'ceph-test', 'ceph-fuse', 'python-ceph']
+ ceph_task.remove_debs(ctx, debs)
+ ceph_task.remove_sources(ctx)
+
def remove_testing_tree(ctx, log):
from teuthology.misc import get_testdir_base
+ from .orchestra import run
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
- 'sudo', 'rm', '-rf',
- get_testdir_base(ctx),
+ 'sudo', 'rm', '-rf', get_testdir_base(ctx),
+ # just for old time's sake
+ run.Raw('&&'),
+ 'sudo', 'rm', '-rf', '/tmp/cephtest',
+ run.Raw('&&'),
+ 'sudo', 'rm', '-rf', '/etc/ceph',
],
wait=False,
)
log.info('Clearing filesystem of test data...')
remove_testing_tree(ctx, log)
log.info('Filesystem Cleared.')
+ remove_installed_packages(ctx, log)
+ log.info('Installed packages removed.')
class CommandFailedError(Exception):
- def __init__(self, command, exitstatus):
+ def __init__(self, command, exitstatus, node=None):
self.command = command
self.exitstatus = exitstatus
+ self.node = node
def __str__(self):
- return "Command failed with status {status}: {command!r}".format(
+ return "Command failed on {node} with status {status}: {command!r}".format(
+ node=self.node,
status=self.exitstatus,
command=self.command,
)
# signal; sadly SSH does not tell us which signal
raise CommandCrashedError(command=r.command)
if status != 0:
- raise CommandFailedError(command=r.command, exitstatus=status)
+ (host,port) = client.get_transport().getpeername()
+ raise CommandFailedError(command=r.command, exitstatus=status, node=host)
return status
if wait:
testdir = teuthology.get_testdir(ctx)
remote.run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-k', '{tdir}/ceph.keyring'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'--admin-daemon', socket_path,
command,
] + args,
from teuthology import misc as teuthology
from ..orchestra import run
+from teuthology.task import ceph as ceph_task
log = logging.getLogger(__name__)
clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
for id_, remote in clients:
+ # install ceph fuse package
+ ceph_task.install_debs(ctx, ['ceph-fuse'], config.get('branch', 'master'))
+
mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
id=id_, remote=remote,mnt=mnt))
run_cmd=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
]
run_cmd_tail=[
- '{tdir}/binary/usr/local/bin/ceph-fuse'.format(tdir=testdir),
+ 'ceph-fuse',
'-f',
'--name', 'client.{id}'.format(id=id_),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
# TODO ceph-fuse doesn't understand dash dash '--',
mnt,
]
mnt,
],
)
+
+ # remove ceph-fuse package
+ ceph_task.remove_debs(ctx, ['ceph-fuse'])
import argparse
import contextlib
-import errno
import logging
import os
-import shutil
-import subprocess
import sys
-import tempfile
from teuthology import misc as teuthology
from teuthology import contextutil
),
)
-def _download_binaries(ctx, remote, ceph_bindir_url):
- testdir = teuthology.get_testdir(ctx)
- remote.run(
- args=[
- 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir),
- run.Raw('&&'),
- 'uname', '-m',
- run.Raw('|'),
- 'sed', '-e', 's/^/ceph./; s/$/.tgz/',
- run.Raw('|'),
- 'wget',
- '-nv',
- '-O-',
- '--base={url}'.format(url=ceph_bindir_url),
- # need to use --input-file to make wget respect --base
- '--input-file=-',
- run.Raw('|'),
- 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir),
- ],
+def _update_deb_package_list_and_install(remote, debs, branch):
+ """
+ updates the package list so that apt-get can
+ download the appropriate packages
+ """
+
+ # run gpg just to initialize
+ r = remote.run(
+ args=[
+ 'gpg', '-K',
+ ],
+ )
+
+ # check for ceph release key
+ r = remote.run(
+ args=[
+ 'apt-key', 'list', run.Raw('|'), 'grep', 'Ceph',
+ ],
+ stdout=StringIO(),
)
+ if r.stdout.getvalue().find('Ceph Release Key') == -1:
+ # if it doesn't exist, add it
+ remote.run(
+ args=[
+ 'wget', '-q', '-O-',
+ 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc',
+ run.Raw('|'),
+ 'sudo', 'apt-key', 'add', '-',
+ ],
+ stdout=StringIO(),
+ )
-@contextlib.contextmanager
-def binaries(ctx, config):
- path = config.get('path')
- tmpdir = None
+ # get ubuntu release (precise, quantal, etc.)
+ r = remote.run(
+ args=['lsb_release', '-sc'],
+ stdout=StringIO(),
+ )
- testdir = teuthology.get_testdir(ctx)
+ out = r.stdout.getvalue().strip()
+ log.info("release type:" + out)
- if path is None:
- # fetch from gitbuilder gitbuilder
- log.info('Fetching and unpacking ceph binaries from gitbuilder...')
- sha1, ceph_bindir_url = teuthology.get_ceph_binary_url(
- package='ceph',
- branch=config.get('branch'),
- tag=config.get('tag'),
- sha1=config.get('sha1'),
- flavor=config.get('flavor'),
- format=config.get('format'),
- dist=config.get('dist'),
- arch=config.get('arch'),
+ remote.run(
+ args=[
+ 'echo', 'deb',
+ 'http://gitbuilder.ceph.com/ceph-deb-' + out + '-x86_64-basic/ref/' + branch,
+ out, 'main', run.Raw('|'),
+ 'sudo', 'tee', '/etc/apt/sources.list.d/ceph.list'
+ ],
+ stdout=StringIO(),
+ )
+ remote.run(
+ args=[
+ 'sudo', 'apt-get', 'update', run.Raw('&&'),
+ 'sudo', 'apt-get', '-y', '--force-yes',
+ 'install',
+ ] + debs,
+ stdout=StringIO(),
)
- ctx.summary['ceph-sha1'] = sha1
- if ctx.archive is not None:
- with file(os.path.join(ctx.archive, 'ceph-sha1'), 'w') as f:
- f.write(sha1 + '\n')
- with parallel() as p:
- for remote in ctx.cluster.remotes.iterkeys():
- p.spawn(_download_binaries, ctx, remote, ceph_bindir_url)
- else:
- with tempfile.TemporaryFile(prefix='teuthology-tarball-', suffix='.tgz') as tar_fp:
- tmpdir = tempfile.mkdtemp(prefix='teuthology-tarball-')
- try:
- log.info('Installing %s to %s...' % (path, tmpdir))
- subprocess.check_call(
- args=[
- 'make',
- 'install',
- 'DESTDIR={tmpdir}'.format(tmpdir=tmpdir),
- ],
- cwd=path,
- )
- try:
- os.symlink('.', os.path.join(tmpdir, 'usr', 'local'))
- except OSError as e:
- if e.errno == errno.EEXIST:
- pass
- else:
- raise
- log.info('Building ceph binary tarball from %s...', tmpdir)
- subprocess.check_call(
- args=[
- 'tar',
- 'cz',
- '.',
- ],
- cwd=tmpdir,
- stdout=tar_fp,
- )
- finally:
- shutil.rmtree(tmpdir, ignore_errors=True)
- log.info('Pushing tarball...')
- tar_fp.seek(0)
- writes = ctx.cluster.run(
+def install_debs(ctx, debs, branch):
+ """
+ installs Debian packages.
+ The following items were added to the config yaml file:
+
+ install-deb: true
+ deb-branch: argonaut
+
+ It is probably possible to get the deb-branch value from somewhere else,
+ it was added for expediency.
+ """
+ log.info("Installing ceph debian packages: {debs}".format(debs=', '.join(debs)))
+ with parallel() as p:
+ for remote in ctx.cluster.remotes.iterkeys():
+ p.spawn(_update_deb_package_list_and_install, remote, debs, branch)
+
+def _remove_deb(remote, debs):
+ for d in debs:
+ r = remote.run(
args=[
- 'install', '-d', '-m0755', '--', '{tdir}/binary'.format(tdir=testdir),
- run.Raw('&&'),
- 'tar', '-xzf', '-', '-C', '{tdir}/binary'.format(tdir=testdir)
+ 'sudo', 'dpkg', '-l', d,
],
- stdin=run.PIPE,
+ stdout=StringIO(),
wait=False,
)
- teuthology.feed_many_stdins_and_close(tar_fp, writes)
- run.wait(writes)
+ if r.exitstatus.get() == 0:
+ remote.run(
+ args=[
+ 'sudo', 'apt-get', '-y', '--force-yes',
+ 'purge',
+ ],
+ stdout=StringIO(),
+ )
+ remote.run(
+ args=[
+ 'sudo', 'apt-get', '-y', '--force-yes',
+ 'autoremove',
+ ],
+ stdout=StringIO(),
+ )
+
+def remove_debs(ctx, debs):
+ log.info("Removing/purging debian packages {debs}".format(debs=', '.join(debs)))
+ with parallel() as p:
+ for remote in ctx.cluster.remotes.iterkeys():
+ p.spawn(_remove_deb, remote, debs)
+
+def _remove_sources_list(remote):
+ remote.run(
+ args=[
+ 'sudo', 'rm', '-f', '/etc/apt/sources.list.d/ceph.list', run.Raw('&&'),
+ 'sudo', 'apt-get', 'update',
+ ],
+ stdout=StringIO(),
+ )
+
+def remove_sources(ctx):
+ log.info("Removing ceph sources list from apt")
+ with parallel() as p:
+ for remote in ctx.cluster.remotes.iterkeys():
+ p.spawn(_remove_sources_list, remote)
+
+
+@contextlib.contextmanager
+def binaries(ctx, config):
+ debs = ['ceph', 'python-ceph', 'ceph-test']
+ branch = config.get('branch', 'master')
+ log.info('branch: {b}'.format(b=branch))
+ install_debs(ctx, debs, branch)
try:
yield
finally:
- log.info('Removing ceph binaries...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'rm',
- '-rf',
- '--',
- '{tdir}/binary'.format(tdir=testdir),
- ],
- wait=False,
- ),
- )
-
+ remove_debs(ctx, debs)
+ remove_sources(ctx)
def assign_devs(roles, devs):
return dict(zip(roles, devs))
ctx.ceph = argparse.Namespace()
ctx.ceph.conf = conf
+ conf_path = config.get('conf_path', '/etc/ceph/ceph.conf')
+ keyring_path = config.get('keyring_path', '/etc/ceph/ceph.keyring')
+
log.info('Writing configs...')
conf_fp = StringIO()
conf.write(conf_fp)
conf_fp.seek(0)
writes = ctx.cluster.run(
args=[
- 'python',
+ 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'),
+ 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'),
+ 'sudo', 'python',
'-c',
'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
- '{tdir}/ceph.conf'.format(tdir=testdir),
+ conf_path,
+ run.Raw('&&'),
+ 'sudo', 'chmod', '0644', conf_path,
],
stdin=run.PIPE,
wait=False,
log.info('Setting up %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
+ 'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--create-keyring',
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ keyring_path,
],
)
ctx.cluster.only(firstmon).run(
args=[
+ 'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--gen-key',
'--name=mon.',
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ keyring_path,
+ ],
+ )
+ ctx.cluster.only(firstmon).run(
+ args=[
+ 'sudo',
+ 'chmod',
+ '0644',
+ keyring_path,
],
)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
log.info('Creating admin key on %s...' % firstmon)
ctx.cluster.only(firstmon).run(
args=[
+ 'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--gen-key',
'--name=client.admin',
'--set-uid=0',
'--cap', 'mon', 'allow *',
'--cap', 'osd', 'allow *',
'--cap', 'mds', 'allow',
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ keyring_path,
],
)
log.info('Copying monmap to all nodes...')
keyring = teuthology.get_file(
remote=mon0_remote,
- path='{tdir}/ceph.keyring'.format(tdir=testdir),
+ path=keyring_path,
)
monmap = teuthology.get_file(
remote=mon0_remote,
for rem in ctx.cluster.remotes.iterkeys():
# copy mon key and initial monmap
log.info('Sending monmap to node {remote}'.format(remote=rem))
- teuthology.write_file(
+ teuthology.sudo_write_file(
remote=rem,
- path='{tdir}/ceph.keyring'.format(tdir=testdir),
+ path=keyring_path,
data=keyring,
+ perms='0644'
)
teuthology.write_file(
remote=rem,
mons.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/osdmaptool'.format(tdir=testdir),
- '-c',
- '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'osdmaptool',
+ '-c', conf_path,
'--clobber',
'--createsimple', '{num:d}'.format(
num=teuthology.num_instances_of_type(ctx.cluster, 'osd'),
remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=osd.{id}'.format(id=id_),
remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--create-keyring',
'--gen-key',
'--name=mds.{id}'.format(id=id_),
remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
+ 'ceph-authtool',
'--create-keyring',
'--gen-key',
# TODO this --name= is not really obeyed, all unknown "types" are munged to "client"
log.info('Adding keys to all mons...')
writes = mons.run(
args=[
- 'cat',
- run.Raw('>>'),
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ 'sudo', 'tee', '-a',
+ keyring_path,
],
stdin=run.PIPE,
wait=False,
+ stdout=StringIO(),
)
keys_fp.seek(0)
teuthology.feed_many_stdins_and_close(keys_fp, writes)
run.wait(
mons.run(
args=[
+ 'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-authtool'.format(tdir=testdir),
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ 'ceph-authtool',
+ keyring_path,
'--name={type}.{id}'.format(
type=type_,
id=id_,
remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-mon'.format(tdir=testdir),
+ 'ceph-mon',
'--mkfs',
'-i', id_,
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--monmap={tdir}/monmap'.format(tdir=testdir),
'--osdmap={tdir}/osdmap'.format(tdir=testdir),
- '--keyring={tdir}/ceph.keyring'.format(tdir=testdir),
+ '--keyring={kpath}'.format(kpath=keyring_path),
],
)
args=[
'sudo',
'apt-get', 'install', '-y', package
- ]
+ ],
+ stdout=StringIO(),
)
remote.run(args= ['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev])
log.info('mount %s on %s -o %s' % (dev, remote,
args=[
'MALLOC_CHECK_=3',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph-osd'.format(tdir=testdir),
+ 'ceph-osd',
'--mkfs',
'-i', id_,
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
'--monmap', '{tdir}/monmap'.format(tdir=testdir),
],
)
run.wait(
ctx.cluster.run(
args=[
+ 'sudo',
'rm',
'-rf',
'--',
- '{tdir}/ceph.conf'.format(tdir=testdir),
- '{tdir}/ceph.keyring'.format(tdir=testdir),
+ conf_path,
+ keyring_path,
'{tdir}/data'.format(tdir=testdir),
'{tdir}/monmap'.format(tdir=testdir),
run.Raw('{tdir}/asok.*'.format(tdir=testdir))
run_cmd = [
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
'{tdir}/daemon-helper'.format(tdir=testdir),
daemon_signal,
]
run_cmd_tail = [
- '%s/binary/usr/local/bin/ceph-%s' % (testdir, type_),
+ 'ceph-%s' % (type_),
'-f',
- '-i', id_,
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir)]
+ '-i', id_]
if config.get('valgrind') is not None:
valgrind_args = None
if type_ == 'mds':
firstmon = teuthology.get_first_mon(ctx, config)
(mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys()
+
mon0_remote.run(args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
coverage_dir,
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'mds', 'set_max_mds', str(num_active)])
try:
lambda: ceph_log(ctx=ctx, config=None),
lambda: ship_utilities(ctx=ctx, config=None),
lambda: binaries(ctx=ctx, config=dict(
- branch=config.get('branch'),
+ branch=config.get('branch', 'master'),
tag=config.get('tag'),
sha1=config.get('sha1'),
- path=config.get('path'),
flavor=flavor,
dist=config.get('dist', dist),
format=format,
def raw_cluster_cmd(self, *args):
testdir = teuthology.get_testdir(self.ctx)
ceph_args = [
- 'LD_LIBRARY_PRELOAD={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-k', '{tdir}/ceph.keyring'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'--concise',
]
ceph_args.extend(args)
def do_rados(self, remote, cmd):
testdir = teuthology.get_testdir(self.ctx)
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
remote = _remote
assert remote is not None
args=[
- 'LD_LIBRARY_PRELOAD={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph'.format(tdir=testdir),
- '-k', '{tdir}/ceph.keyring'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'ceph',
'--admin-daemon',
"%s/asok.osd.%s"%(testdir,str(osdnum),)]
args.extend(command)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
- run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)),
- run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)),
- run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
def rados(testdir, remote, cmd, wait=True):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
args=[
'cd', dir,
run.Raw('&&'),
- run.Raw('PATH="{tdir}/binary/usr/local/bin:$PATH"'.format(tdir=testdir)),
'./run_seed_to_range.sh', seed, '50', '300',
],
wait=False,
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote, hadoopEnvFile,
'''export JAVA_HOME=/usr/lib/jvm/default-java
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib:/usr/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:{tdir}/binary/usr/local/lib/libcephfs.jar:{tdir}/hadoop/build/hadoop-core*.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
<name>fs.default.name</name>
<value>{default_fs}</value>
</property>
- <property>
- <name>ceph.conf.file</name>
- <value>{tdir}/ceph.conf</value>
- </property>
</configuration>
'''.format(tdir=teuthology.get_testdir(ctx), default_fs=default_fs_string))
args=[
'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/sbin/mount.ceph'.format(tdir=testdir),
+ '/sbin/mount.ceph',
'{mons}:/'.format(mons=','.join(mons)),
mnt,
'-v',
proc = client_remote.run(
args=[
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/daemon-helper'.format(tdir=testdir),
'kill',
testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
proc = remote.run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
- '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_),
+ 'rados',
'--name', role_,
'mkpool', 'pool{num}'.format(num=poolnum), '-1',
run.Raw('&&'),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
- '-k', '{tdir}/data/{role}.keyring'.format(tdir=testdir, role=role_),
+ 'rados',
'--name', role_,
'--pool', 'pool{num}'.format(num=poolnum),
'bench', '0', 'write', '-t', '16', '--block-size', '1'
def rados(testdir, remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['CEPH_CONF={tdir}/ceph.conf',
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
- '{tdir}/enable-coredump',
- '{tdir}/binary/usr/local/bin/ceph-coverage',
+ " ".join(['{tdir}/enable-coredump',
+ 'ceph-coverage',
'{tdir}/archive/coverage',
- '{tdir}/binary/usr/local/bin/omapbench',
- '-k', '{tdir}/data/{role}.keyring'.format(role=role),
+ 'omapbench',
'--name', role[len(PREFIX):],
'-t', str(config.get('threads', 30)),
'-o', str(config.get('objects', 1000)),
log.info("rados %s" % ' '.join(cmd))
testdir = teuthology.get_testdir(ctx)
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
def rados_start(testdir, remote, cmd):
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
testdir = teuthology.get_testdir(ctx)
log.info("rados %s" % ' '.join(cmd))
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rados'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rados',
];
pre.extend(cmd)
proc = remote.run(
base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client)
args=[
- run.Raw('LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir)),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/daemon-helper'.format(tdir=testdir),
'term',
for i in xrange(client_config.get('num_rbd', DEFAULT_NUM_RBD)):
args.extend([
'-drive',
- 'file=rbd:rbd/{img}:conf={conf}:id={id},format=rbd,if=virtio,cache={cachemode}'.format(
- conf='{tdir}/ceph.conf'.format(tdir=testdir),
+ 'file=rbd:rbd/{img}:id={id},format=rbd,if=virtio,cache={cachemode}'.format(
img='{client}.{num}'.format(client=client, num=i),
id=client[len('client.'):],
cachemode=cachemode,
op_weights = config.get('op_weights', {})
testdir = teuthology.get_testdir(ctx)
args = [
- 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph_test_rados'.format(tdir=testdir),
+ 'testrados',
'--op', 'read', str(op_weights.get('read', 100)),
'--op', 'write', str(op_weights.get('write', 100)),
'--op', 'delete', str(op_weights.get('delete', 10)),
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
- '{tdir}/enable-coredump',
- '{tdir}/binary/usr/local/bin/ceph-coverage',
+ " ".join(['{tdir}/enable-coredump',
+ 'ceph-coverage',
'{tdir}/archive/coverage',
- '{tdir}/binary/usr/local/bin/rados',
- '-c', '{tdir}/ceph.conf',
- '-k', '{tdir}/data/{role}.keyring'.format(role=role),
+ 'rados',
'--name', role,
'mkpool', str(config.get('pool', 'data'))
]).format(tdir=testdir),
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
- '{tdir}/enable-coredump',
- '{tdir}/binary/usr/local/bin/ceph-coverage',
+ " ".join(['{tdir}/enable-coredump',
+ 'ceph-coverage',
'{tdir}/archive/coverage',
- '{tdir}/binary/usr/local/bin/rados',
- '-c', '{tdir}/ceph.conf',
- '-k', '{tdir}/data/%s.keyring' % role,
+ 'rados',
'--name', role,
'-p' , str(config.get('pool', 'data')),
'bench', str(config.get('time', 360)), 'write',
proc = remote.run(
args=[
"/bin/sh", "-c",
- " ".join(['LD_LIBRARY_PATH={tdir}/binary/usr/local/lib',
- '{tdir}/enable-coredump',
- '{tdir}/binary/usr/local/bin/ceph-coverage',
+ " ".join(['{tdir}/enable-coredump',
+ 'ceph-coverage',
'{tdir}/archive/coverage',
- '{tdir}/binary/usr/local/bin/rados',
- '-c', '{tdir}/ceph.conf',
- '-k', '{tdir}/data/{role}.keyring'.format(role=role),
+ 'rados',
'--name', role,
'rmpool', str(config.get('pool', 'data'))
]).format(tdir=testdir),
log.info('radosgw-admin: %s' % cmd)
testdir = teuthology.get_testdir(ctx)
pre = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage'.format(tdir=testdir),
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw-admin'.format(tdir=testdir),
'--log-to-stderr',
'--format', 'json',
]
log.info('Creating image {name} with size {size}'.format(name=name,
size=size))
args = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage'.format(tdir=testdir),
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rbd',
'-p', 'rbd',
'create',
'--size', str(size),
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rbd',
'-p', 'rbd',
'rm',
name,
remote.run(
args=[
'echo',
- 'KERNEL=="rbd[0-9]*", PROGRAM="%s/binary/usr/local/bin/ceph-rbdnamer %%n", SYMLINK+="rbd/%%c{1}/%%c{2}"' % testdir,
+ 'KERNEL=="rbd[0-9]*", PROGRAM="ceph-rbdnamer %%n", SYMLINK+="rbd/%%c{1}/%%c{2}"',
run.Raw('>'),
'{tdir}/51-rbd.rules'.format(tdir=testdir),
],
remote.run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rbd',
'--user', role.rsplit('.')[-1],
'--secret', secretfile,
'-p', 'rbd',
(remote,) = ctx.cluster.only(role).remotes.keys()
remote.run(
args=[
+<<<<<<< HEAD
'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
+=======
+>>>>>>> Install ceph debs and use installed debs
'sudo',
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/rbd'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'rbd',
'-p', 'rbd',
'unmap',
'/dev/rbd/rbd/{imgname}'.format(imgname=image),
# readlink -f <path> in order to get their canonical
# pathname (so it matches what the kernel remembers).
args = [
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'/usr/bin/sudo',
'/bin/bash',
(remote,) = ctx.cluster.only(role).remotes.iterkeys()
remote.run(
args=[
- 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph_test_librbd_fsx'.format(tdir=testdir),
+ 'ceph_test_librbd_fsx',
'-d',
'-W', '-R', # mmap doesn't work with rbd
'-p', str(config.get('progress_interval', 100)), # show progress
# create the objects
osd_remote.run(
args=[
- 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
+ 'smalliobench'.format(tdir=testdir),
'--use-prefix', 'recovery_bench',
'--init-only', '1',
'--num-objects', str(num_objects),
log.info('non-recovery (baseline)')
p = osd_remote.run(
args=[
- 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
+ 'smalliobench',
'--use-prefix', 'recovery_bench',
'--do-not-init', '1',
'--duration', str(duration),
log.info('recovery active')
p = osd_remote.run(
args=[
- 'env', 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/smalliobench'.format(tdir=testdir),
+ 'smalliobench',
'--use-prefix', 'recovery_bench',
'--do-not-init', '1',
'--duration', str(duration),
path='{tdir}/apache/htdocs/rgw.fcgi'.format(tdir=testdir),
data="""#!/bin/sh
ulimit -c unlimited
-export LD_LIBRARY_PATH={tdir}/binary/usr/local/lib
-exec {tdir}/binary/usr/local/bin/radosgw -f -c {tdir}/ceph.conf
+exec radosgw -f
""".format(tdir=testdir)
)
remote.run(
log.info("rgw %s config is %s", client, client_config)
run_cmd=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/daemon-helper'.format(tdir=testdir),
'term',
]
run_cmd_tail=[
- '{tdir}/binary/usr/local/bin/radosgw'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw',
'--log-file', '{tdir}/archive/log/rgw.log'.format(tdir=testdir),
'--rgw_ops_log_socket_path', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
'{tdir}/apache/apache.conf'.format(tdir=testdir),
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw-admin',
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw-admin',
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw-admin',
'user', 'create',
'--uid', s3tests_conf[section]['user_id'],
'--display-name', s3tests_conf[section]['display_name'],
_config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix)
ctx.cluster.only(client).run(
args=[
- 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir),
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/radosgw-admin'.format(tdir=testdir),
- '-c', '{tdir}/ceph.conf'.format(tdir=testdir),
+ 'radosgw-admin',
'user', 'create',
'--subuser', '{account}:{user}'.format(account=testswift_conf['func_test']['account{s}'.format(s=suffix)],user=user),
'--display-name', testswift_conf['func_test']['display_name{s}'.format(s=suffix)],
remotes.append(remote)
args =['CEPH_CLIENT_ID={id_}'.format(id_=id_),
- 'CEPH_CONF={tdir}/ceph.conf'.format(tdir=testdir),
'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')),
- 'LD_PRELOAD={tdir}/binary/usr/local/lib/librados.so.2'.format(tdir=testdir),
'{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
- '{tdir}/binary/usr/local/bin/multi_stress_watch foo foo'.format(tdir=testdir)
+ 'multi_stress_watch foo foo'
]
log.info("args are %s" % (args,))
'cd', '--', scratch_tmp,
run.Raw('&&'),
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
- run.Raw('PATH="$PATH:{tdir}/binary/usr/local/bin"'.format(tdir=testdir)),
- run.Raw('LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{tdir}/binary/usr/local/lib"'.format(tdir=testdir)),
run.Raw('CEPH_JAVA_PATH="{tdir}/binary/usr/local/share/java"'.format(tdir=testdir)),
- run.Raw('CEPH_CONF="{tdir}/ceph.conf"'.format(tdir=testdir)),
run.Raw('CEPH_SECRET_FILE="{file}"'.format(file=secretfile)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PYTHONPATH="$PYTHONPATH:{tdir}/binary/usr/local/lib/python2.7/dist-packages:{tdir}/binary/usr/local/lib/python2.6/dist-packages"'.format(tdir=testdir)),
args.append(run.Raw(env_arg))
args.extend([
'{tdir}/enable-coredump'.format(tdir=testdir),
- '{tdir}/binary/usr/local/bin/ceph-coverage'.format(tdir=testdir),
+ 'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{srcdir}/{workunit}'.format(
srcdir=srcdir,