import logging
+import time
+
import ceph_manager
from teuthology import misc as teuthology
-import time
+from teuthology.task_util.rados import rados
log = logging.getLogger(__name__)
-
-def rados(testdir, remote, cmd, wait=True):
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False,
- wait=wait
- )
- if wait:
- return proc.exitstatus
- else:
- return proc
-
def task(ctx, config):
"""
Test handling of divergent entries with prior_version
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- testdir = teuthology.get_testdir(ctx)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
log.info('writing initial objects')
# write 1000 objects
for i in range(1000):
- rados(testdir, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
+ rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile])
manager.wait_for_clean()
# write 1 (divergent) object
log.info('writing divergent object existing_0')
rados(
- testdir, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
+ ctx, mon, ['-p', 'foo', 'put', 'existing_0', dummyfile2],
wait=False)
time.sleep(10)
mon.run(
# write 1 non-divergent object (ensure that old divergent one is divergent)
log.info('writing non-divergent object existing_1')
- rados(testdir, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
+ rados(ctx, mon, ['-p', 'foo', 'put', 'existing_1', dummyfile2])
manager.wait_for_recovery()
manager.mark_in_osd(divergent)
log.info('wait for peering')
- rados(testdir, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
+ rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile])
log.info("killing divergent %d", divergent)
manager.kill_osd(divergent)
manager.set_config(i, osd_recovery_delay_start=0)
log.info('reading existing_0')
- exit_status = rados(testdir, mon,
+ exit_status = rados(ctx, mon,
['-p', 'foo', 'get', 'existing_0',
'-o', '/tmp/existing'])
assert exit_status is 0
import logging
import ceph_manager
from teuthology import misc as teuthology
-
+from teuthology.task_util.rados import rados
log = logging.getLogger(__name__)
-
-def rados(ctx, remote, cmd):
- testdir = teuthology.get_testdir(ctx)
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False
- )
- return proc.exitstatus
-
def task(ctx, config):
"""
Test handling of lost objects.
import logging
import ceph_manager
from teuthology import misc as teuthology
-
+from teuthology.task_util.rados import rados
log = logging.getLogger(__name__)
-
-def rados(testdir, remote, cmd):
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False
- )
- return proc.exitstatus
-
def task(ctx, config):
"""
Test handling of object location going down
'--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000'
)
- testdir = teuthology.get_testdir(ctx)
-
# kludge to make sure they get a map
- rados(testdir, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile])
# create old objects
for f in range(1, 10):
- rados(testdir, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
+ rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile])
manager.mark_out_osd(3)
manager.wait_till_active()
from cStringIO import StringIO
import logging
-import ceph_manager
-from teuthology import misc as teuthology
import time
+
+import ceph_manager
from ..orchestra import run
+from teuthology.task_util.rados import rados
+from teuthology import misc as teuthology
log = logging.getLogger(__name__)
-
-def rados(testdir, remote, cmd, wait=True):
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False,
- wait=wait
- )
- if wait:
- return proc.exitstatus
- else:
- return proc
-
def task(ctx, config):
"""
Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
- testdir = teuthology.get_testdir(ctx)
manager = ceph_manager.CephManager(
mon,
ctx=ctx,
log.info('3. Verify write failure when exceeding full_ratio')
# Write data should fail
- ret = rados(testdir, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
+ ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile])
assert ret != 0, 'Expected write failure but it succeeded with exit status 0'
# Put back default
log.info('4. Verify write success when NOT exceeding full_ratio')
# Write should succeed
- ret = rados(testdir, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
+ ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2])
assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret
log.info('5. Verify warning messages again when exceeding nearfull_ratio')
import logging
-import ceph_manager
import json
-from teuthology import misc as teuthology
+import ceph_manager
+from teuthology import misc as teuthology
+from teuthology.task_util.rados import rados
log = logging.getLogger(__name__)
-
-def rados(ctx, remote, cmd):
- testdir = teuthology.get_testdir(ctx)
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False
- )
- return proc.exitstatus
-
def task(ctx, config):
"""
Test peering.
from cStringIO import StringIO
+import ceph_manager
+from ..orchestra import run
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.task_util.rgw import rgwadmin
-from ..orchestra import run
-import ceph_manager
+from teuthology.task_util.rados import rados
log = logging.getLogger(__name__)
-# this was lifted from lost_unfound.py
-def rados(ctx, remote, cmd):
- testdir = teuthology.get_testdir(ctx)
- log.info("rados %s" % ' '.join(cmd))
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'rados',
- ];
- pre.extend(cmd)
- proc = remote.run(
- args=pre,
- check_status=False
- )
-
- return proc.exitstatus
-
@contextlib.contextmanager
def create_dirs(ctx, config):
log.info('Creating apache directories...')
--- /dev/null
+import logging
+
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def rados(ctx, remote, cmd, wait=True, check_status=False):
+ testdir = teuthology.get_testdir(ctx)
+ log.info("rados %s" % ' '.join(cmd))
+ pre = [
+ '{tdir}/enable-coredump'.format(tdir=testdir),
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'rados',
+ ];
+ pre.extend(cmd)
+ proc = remote.run(
+ args=pre,
+ check_status=check_status,
+ wait=wait,
+ )
+ if wait:
+ return proc.exitstatus
+ else:
+ return proc