add_subdirectory(src)
+add_subdirectory(qa)
+
add_subdirectory(doc)
if(WITH_MANPAGE)
add_subdirectory(man)
--- /dev/null
+set(CEPH_BUILD_VIRTUALENV $ENV{TMPDIR})
+if(NOT CEPH_BUILD_VIRTUALENV)
+ set(CEPH_BUILD_VIRTUALENV ${CMAKE_BINARY_DIR})
+endif()
+
+if(WITH_TESTS)
+ include(AddCephTest)
+ add_tox_test(qa flake8)
+endif()
def decode(s):
return s.decode('utf-8')
- def check_output(*args, **kwargs):
+ def check_output(*args, **kwargs): # noqa
return decode(subprocess.check_output(*args, **kwargs))
else:
def decode(s):
def check_transaction_ops(ops, enum, tnum):
- if len(ops) is 0:
+ if len(ops) == 0:
logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum))
errors = 0
for onum in range(len(ops)):
os.unlink(TMPFILE)
journal_errors = check_journal(jsondict)
- if journal_errors is not 0:
+ if journal_errors != 0:
logging.error(jsondict)
ERRORS += journal_errors
for line in output.strip().split('\n'):
print(line)
linev = re.split('\s+', line)
- if linev[0] is '':
+ if linev[0] == '':
linev.pop(0)
print('linev %s' % linev)
weights.append(float(linev[2]))
import argparse
import contextlib
import logging
-import string
import httplib
from urlparse import urlparse
import json
from teuthology import misc as teuthology
from teuthology import contextutil
-from teuthology import safepath
from teuthology.orchestra import run
from teuthology.exceptions import ConfigError
# start the public endpoint
client_public_with_id = 'barbican.public' + '.' + client_id
- client_public_with_cluster = cluster_name + '.' + client_public_with_id
run_cmd = ['cd', get_barbican_dir(ctx), run.Raw('&&'),
'.', '.barbicanenv/bin/activate', run.Raw('&&'),
keystone_role = cconfig.get('use-keystone-role', None)
keystone_host, keystone_port = ctx.keystone.public_endpoints[keystone_role]
- keystone_url = 'http://{host}:{port}/v2.0'.format(host=keystone_host,
- port=keystone_port)
barbican_host, barbican_port = ctx.barbican.endpoints[cclient]
barbican_url = 'http://{host}:{port}'.format(host=barbican_host,
port=barbican_port)
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
- clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
import yaml
from teuthology import misc
-from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.task import Task
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
- except OSError as e:
+ except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
- except OSError as e:
+ except OSError:
pass
try:
teuthology.pull_directory(remote, '/var/lib/ceph/crash',
os.path.join(sub, 'crash'))
- except ReadError as e:
+ except ReadError:
pass
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
- except OSError as e:
+ except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.shortname)
try:
os.makedirs(sub)
- except OSError as e:
+ except OSError:
pass
teuthology.pull_directory(remote, '/var/log/ceph',
os.path.join(sub, 'log'))
@contextlib.contextmanager
def cephfs_setup(ctx, config):
cluster_name = config['cluster']
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
first_mon = teuthology.get_first_mon(ctx, config, cluster_name)
(mon_remote,) = ctx.cluster.only(first_mon).remotes.keys()
from teuthology import misc as teuthology
from cephfs.fuse_mount import FuseMount
-from tasks.cephfs.filesystem import Filesystem
log = logging.getLogger(__name__)
Decrease the size of the pool
"""
pool = self.ceph_manager.get_pool()
- orig_pg_num = self.ceph_manager.get_pool_pg_num(pool)
+ _ = self.ceph_manager.get_pool_pg_num(pool)
self.log("Shrinking pool %s" % (pool,))
if self.ceph_manager.contract_pool(
pool,
Random action selector.
"""
chance_down = self.config.get('chance_down', 0.4)
- chance_test_min_size = self.config.get('chance_test_min_size', 0)
+ _ = self.config.get('chance_test_min_size', 0)
chance_test_backfill_full = \
self.config.get('chance_test_backfill_full', 0)
if isinstance(chance_down, int):
while True:
proc = self.admin_socket(service_type, service_id,
args, check_status=False, stdout=stdout)
- if proc.exitstatus is 0:
+ if proc.exitstatus == 0:
return proc
else:
tries += 1
import argparse
import configobj
import contextlib
-import errno
import logging
import os
import json
-import time
-import gevent
-import re
-import socket
import uuid
-from paramiko import SSHException
-from ceph_manager import CephManager, write_conf
+from ceph_manager import CephManager
from tarfile import ReadError
-from tasks.cephfs.filesystem import Filesystem
from teuthology import misc as teuthology
from teuthology import contextutil
-from teuthology import exceptions
from teuthology.orchestra import run
-import ceph_client as cclient
from teuthology.orchestra.daemon import DaemonGroup
-from tasks.daemonwatchdog import DaemonWatchdog
from teuthology.config import config as teuth_config
# these items we use from ceph.py should probably eventually move elsewhere
@contextlib.contextmanager
def download_cephadm(ctx, config, ref):
cluster_name = config['cluster']
- testdir = teuthology.get_testdir(ctx)
if config.get('cephadm_mode') != 'cephadm-package':
ref = config.get('cephadm_branch', ref)
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
- except OSError as e:
+ except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.name)
try:
os.makedirs(sub)
- except OSError as e:
+ except OSError:
pass
teuthology.pull_directory(remote, '/var/log/ceph/' + fsid,
os.path.join(sub, 'log'))
path = os.path.join(ctx.archive, 'remote')
try:
os.makedirs(path)
- except OSError as e:
+ except OSError:
pass
for remote in ctx.cluster.remotes.keys():
sub = os.path.join(path, remote.name)
try:
os.makedirs(sub)
- except OSError as e:
+ except OSError:
pass
try:
teuthology.pull_directory(remote,
'/var/lib/ceph/%s/crash' % fsid,
os.path.join(sub, 'crash'))
- except ReadError as e:
+ except ReadError:
pass
@contextlib.contextmanager
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
- testdir = teuthology.get_testdir(ctx)
num_mons = 1
try:
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
- testdir = teuthology.get_testdir(ctx)
try:
nodes = []
"""
cluster_name = config['cluster']
fsid = ctx.ceph[cluster_name].fsid
- testdir = teuthology.get_testdir(ctx)
nodes = []
daemons = {}
"""
Execute (shell) commands
"""
- testdir = teuthology.get_testdir(ctx)
cluster_name = config.get('cluster', 'ceph')
if 'all' in config and len(config) == 1:
healthy(ctx=ctx, config=dict(cluster=cluster))
if config.get('wait-for-osds-up', False):
for cluster in clusters:
- wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster))
+ ctx.managers[cluster].wait_for_all_osds_up()
yield
@contextlib.contextmanager
def dirfrag_exists(self, ino, frag):
try:
self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)])
- except CommandFailedError as e:
+ except CommandFailedError:
return False
else:
return True
-
from StringIO import StringIO
import json
import time
from teuthology.orchestra import run
from teuthology.orchestra.run import CommandFailedError
from .mount import CephFSMount
-from tasks.cephfs.filesystem import Filesystem
log = logging.getLogger(__name__)
try:
self.inst = status['inst_str']
self.addr = status['addr_str']
- except KeyError as e:
+ except KeyError:
sessions = self.fs.rank_asok(['session', 'ls'])
for s in sessions:
if s['id'] == self.id:
from time import sleep
from StringIO import StringIO
from tasks.cephfs.cephfs_test_case import CephFSTestCase
-from tasks.cephfs.fuse_mount import FuseMount
-from teuthology.exceptions import CommandFailedError
from teuthology.misc import sudo_write_file
log = logging.getLogger(__name__)
path_prefix='')
args = ['du', '/']
- for path in path_to_files:
- args.append(path)
+ for p in path_to_files:
+ args.append(p)
du_output = self.get_cephfs_shell_cmd_output(args)
for expected_output in expected_patterns_in_output:
SESSION_AUTOCLOSE = 50
time_at_beg = time.time()
mount_a_gid = self.mount_a.get_global_id()
- mount_a_pid = self.mount_a.client_pid
+ _ = self.mount_a.client_pid
self.fs.set_var('session_timeout', SESSION_TIMEOUT)
self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE)
self.assert_session_count(2, self.fs.mds_asok(['session', 'ls']))
status = self.fs.wait_for_daemons()
rank1 = self.fs.get_rank(rank=1, status=status)
- name1 = 'mds.'+rank1['name']
# Create a directory that is pre-exported to rank 1
self.mount_a.run_shell(["mkdir", "-p", "a/aa"])
import time
import signal
-import json
import logging
from random import randint
from cephfs_test_case import CephFSTestCase
from teuthology.exceptions import CommandFailedError
-from teuthology import misc as teuthology
from tasks.cephfs.fuse_mount import FuseMount
log = logging.getLogger(__name__)
log.info("status = {0}".format(status))
original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
- original_standbys = set([info['gid'] for info in status.get_standbys()])
+ _ = set([info['gid'] for info in status.get_standbys()])
oldmax = self.fs.get_var('max_mds')
self.assertTrue(n > oldmax)
log.info("status = {0}".format(status))
original_ranks = set([info['gid'] for info in status.get_ranks(fscid)])
- original_standbys = set([info['gid'] for info in status.get_standbys()])
+ _ = set([info['gid'] for info in status.get_standbys()])
oldmax = self.fs.get_var('max_mds')
self.assertTrue(n < oldmax)
def _confirm_no_replay(self):
status = self.fs.status()
- standby_count = len(list(status.get_standbys()))
+ _ = len(list(status.get_standbys()))
self.assertEqual(0, len(list(self.fs.get_replays(status=status))))
return status
import time
import json
import logging
-import time
log = logging.getLogger(__name__)
def get_pool_df(fs, name):
try:
return fs.get_pool_df(name)['objects'] > 0
- except RuntimeError as e:
+ except RuntimeError:
return False
self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30)
out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get',
pool_name, 'size',
'-f', 'json-pretty')
- j = json.loads(out)
- pool_size = int(j['size'])
+ _ = json.loads(out)
proc = self.mount_a.run_shell(['df', '.'])
output = proc.stdout.getvalue()
-
"""
Test our tools for recovering metadata from the data pool into an alternate pool
"""
-import json
import logging
-import os
-from textwrap import dedent
import traceback
-from collections import namedtuple, defaultdict
+from collections import namedtuple
from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
log = logging.getLogger(__name__)
Test CephFS scrub (distinct from OSD scrub) functionality
"""
import logging
-import os
-import traceback
from collections import namedtuple
-from teuthology.orchestra.run import CommandFailedError
-from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
+from tasks.cephfs.cephfs_test_case import CephFSTestCase
log = logging.getLogger(__name__)
log.info("client_path: {0}".format(client_path))
log.info("Cloning repo into place")
- repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+ TestScrubChecks.clone_repo(self.mount_a, client_path)
out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
self.assertNotEqual(out_json, None)
log.info("client_path: {0}".format(client_path))
log.info("Cloning repo into place")
- repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+ _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
self.assertNotEqual(out_json, None)
log.info("client_path: {0}".format(client_path))
log.info("Cloning repo into place")
- repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path)
+ _ = TestScrubChecks.clone_repo(self.mount_a, client_path)
out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"])
self.assertNotEqual(out_json, None)
success, errstring = validator(jout, 0)
if not success:
- raise AsokCommandFailedError(command, rout, jout, errstring)
+ raise AsokCommandFailedError(command, 0, jout, errstring)
return jout
def asok_command(self, mds_rank, command, validator):
-from StringIO import StringIO
import time
import json
import logging
import sys
import logging
import signal
-import time
-import errno
from textwrap import dedent
from tasks.cephfs.fuse_mount import FuseMount
from tasks.cephfs.cephfs_test_case import CephFSTestCase
self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)])
try:
self.create_snap_dir(sname)
- except CommandFailedError as e:
+ except CommandFailedError:
# after reducing limit we expect the new snapshot creation to fail
pass
self.delete_dir_and_snaps("accounts", new_limit + 1)
from textwrap import dedent
import datetime
import gevent
-import datetime
from teuthology.orchestra.run import CommandFailedError, Raw
from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology
size_unit = 1024 # small, numerous files
file_multiplier = 200
else:
- raise NotImplemented(throttle_type)
+ raise NotImplementedError(throttle_type)
# Pick up config changes
self.fs.mds_fail_restart()
num_strays_purging, mds_max_purge_files
))
else:
- raise NotImplemented(throttle_type)
+ raise NotImplementedError(throttle_type)
log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format(
num_strays_purging, num_strays,
import json
import logging
-import time
import os
from textwrap import dedent
from tasks.cephfs.cephfs_test_case import CephFSTestCase
volume_prefix = "/myprefix"
group_id = "grpid"
volume_id = "volid"
- mount_path = self._volume_client_python(vc_mount, dedent("""
+ self._volume_client_python(vc_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False)
print(create_result['mount_path'])
nsize = usedsize/2
try:
self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize))
- except CommandFailedError as ce:
+ except CommandFailedError:
raise RuntimeError("expected the 'fs subvolume resize' command to succeed")
# verify the quota
else:
raise RuntimeError('expected a yum based or a apt based system')
- proc = self.mount_a.client_remote.run(args=args, omit_sudo=False)
+ self.mount_a.client_remote.run(args=args, omit_sudo=False)
def create_reqd_users(self):
self.mount_a.client_remote.run(args=['sudo', 'useradd', 'fsgqa'],
from teuthology.task import Task
from teuthology import misc
-import ceph_manager
log = logging.getLogger(__name__)
for ns in namespace:
def object_name(i):
nslength = 0
- if namespace is not '':
+ if namespace != '':
nslength = len(namespace)
numstr = str(i)
fillerlen = l - nslength - len(prefix) - len(numstr)
import logging
import signal
import time
-import random
from gevent import sleep
from gevent.greenlet import Greenlet
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
- assert exit_status is 0
+ assert exit_status == 0
log.info("success")
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
- assert exit_status is 0
+ assert exit_status == 0
cmd = 'rm {file}'.format(file=expfile)
exp_remote.run(args=cmd, wait=True)
Dump_stuck command
"""
import logging
-import re
import time
import ceph_manager
import contextlib
from teuthology import misc as teuthology
-from teuthology import contextutil
log = logging.getLogger(__name__)
CephFS sub-tasks.
"""
-import contextlib
import logging
import re
-import time
from tasks.cephfs.filesystem import Filesystem
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
from teuthology.packaging import install_package
from teuthology.packaging import remove_package
from teuthology.exceptions import ConfigError
# start the public endpoint
client_public_with_id = 'keystone.public' + '.' + client_id
- client_public_with_cluster = cluster_name + '.' + client_public_with_id
public_host, public_port = ctx.keystone.public_endpoints[client]
run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public',
-
+# FIXME: this file has many undefined vars which are accessed!
+# flake8: noqa
import logging
import contextlib
import time
def test_cephfs_evict_client_does_not_exist(self):
fs_id = self.get_fs_id()
- data = self._delete("/api/cephfs/{}/client/1234".format(fs_id))
+ self._delete("/api/cephfs/{}/client/1234".format(fs_id))
self.assertStatus(404)
def test_cephfs_get(self):
from __future__ import absolute_import
-import time
-from .helper import DashboardTestCase, JObj, JLeaf, JList
+from .helper import DashboardTestCase
class GaneshaTest(DashboardTestCase):
# -*- coding: utf-8 -*-
from __future__ import absolute_import
-import os
import json
from .helper import DashboardTestCase
id = self.create_image_in_trash('rbd', 'test_rbd')
self.assertStatus(200)
- img = self.get_image('rbd', None, 'test_rbd')
+ self.get_image('rbd', None, 'test_rbd')
self.assertStatus(404)
time.sleep(1)
assert cls.mgr_cluster is not None
if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED:
- self.skipTest("Only have {0} manager daemons, "
- "{1} are required".format(
- len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
+ cls.skipTest(
+ "Only have {0} manager daemons, {1} are required".format(
+ len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
cls.setup_mgrs()
-import json
import logging
-from tempfile import NamedTemporaryFile
-from teuthology.exceptions import CommandFailedError
from mgr_test_case import MgrTestCase
log = logging.getLogger(__name__)
import errno
import json
import logging
-from tempfile import NamedTemporaryFile
from time import sleep
from teuthology.exceptions import CommandFailedError
Handle clock skews in monitors.
"""
import logging
-import contextlib
import ceph_manager
import time
-import gevent
-from StringIO import StringIO
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
import logging
import contextlib
-from teuthology import misc as teuthology
from cStringIO import StringIO
-from teuthology.orchestra import run
-from teuthology import contextutil
from paramiko import SSHException
import socket
import time
try:
self.packet_drop()
log.info('link down')
- except SSHException as e:
+ except SSHException:
log.debug('Failed to run command')
self.stop_event.wait(timeout=self.interval)
try:
delete_dev(self.remote, self.interface)
log.info('link up')
- except SSHException as e:
+ except SSHException:
log.debug('Failed to run command')
def begin(self, gname):
4. delete a pool, verify pgs go active.
"""
pg_num = config.get('pg_num', 1)
- pool_size = config.get('pool_size', 2)
from_primary = config.get('from_primary', True)
manager = ctx.managers['ceph']
def task(ctx, config):
assert isinstance(config, dict), \
'osd_max_pg_per_osd task only accepts a dict for config'
- manager = ctx.managers['ceph']
if config.get('test_create_from_mon', True):
test_create_from_mon(ctx, config)
else:
pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name)
osize = config.get('objectsize', 65536)
- if osize is 0:
+ if osize == 0:
objectsize = []
else:
objectsize = ['-O', str(osize)]
log.info('joining radosbench (timing out after %ss)', timeout)
run.wait(radosbench.itervalues(), timeout=timeout)
- if pool is not 'data' and create_pool:
+ if pool != 'data' and create_pool:
manager.remove_pool(pool)
# python qa/tasks/radosgw_admin.py [USER] HOSTNAME
#
-import copy
import json
import logging
import time
import boto.exception
import boto.s3.connection
import boto.s3.acl
-from boto.utils import RequestHook
import httplib2
-import util.rgw as rgw_utils
from util.rgw import rgwadmin, get_user_summary, get_user_successful_ops
display_name2='Fud'
display_name3='Bar'
email='foo@foo.com'
- email2='bar@bar.com'
access_key='9te6NH5mcdcq0Tc5i8i1'
secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
access_key2='p5YnriCv1nAtykxBrupQ'
# TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds'
(err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True)
-import sys
-from tasks.radosgw_admin import task
from teuthology.config import config
from teuthology.orchestra import cluster, remote
import argparse;
grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
"""
-from cStringIO import StringIO
import logging
-import json
+
import boto.exception
import boto.s3.connection
import random
import string
-import util.rgw as rgw_utils
-
from teuthology import misc as teuthology
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
log = logging.getLogger(__name__)
import contextlib
import logging
-from teuthology.orchestra import run
+from teuthology.exceptions import ConfigError
from teuthology.parallel import parallel
from teuthology import misc as teuthology
from gevent.greenlet import Greenlet
from gevent.event import Event
-from teuthology import misc
from teuthology.exceptions import CommandFailedError
-from teuthology.task import Task
from teuthology.orchestra import run
from tasks.thrasher import Thrasher
for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE):
exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i,
'/tmp/existing'])
- assert exit_status is 0
+ assert exit_status == 0
(remote,) = ctx.\
cluster.only('osd.{o}'.format(o=divergent)).remotes.keys()
"""
import argparse
import contextlib
-import json
import logging
-import os
-import errno
-import util.rgw as rgw_utils
from teuthology.orchestra import run
from teuthology import misc as teuthology
from teuthology.exceptions import ConfigError
from util import get_remote_for_role
from util.rgw import rgwadmin, wait_for_radosgw
-from util.rados import (rados, create_ec_pool,
- create_replicated_pool,
- create_cache_pool)
+from util.rados import (create_ec_pool,
+ create_replicated_pool,
+ create_cache_pool)
log = logging.getLogger(__name__)
rgw multisite configuration routines
"""
import argparse
-import contextlib
import logging
import random
import string
# insert zone args before the first |
pipe = args.index(run.Raw('|'))
args = args[0:pipe] + zone.zone_args() + args[pipe:]
- except ValueError, e:
+ except ValueError:
args += zone.zone_args()
self.daemon.command_kwargs['args'] = args
rgw multisite testing
"""
import logging
-import sys
import nose.core
import nose.config
import contextlib
import logging
-import time
from teuthology import misc
from teuthology.orchestra import run
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
from teuthology.exceptions import ConfigError
log = logging.getLogger(__name__)
import random
import string
import yaml
-import socket
import getpass
from teuthology import misc as teuthology
-from teuthology.exceptions import ConfigError
from teuthology.task import Task
from teuthology.orchestra import run
-from teuthology.orchestra.remote import Remote
log = logging.getLogger(__name__)
stdout=StringIO()
)
- if gr is not 'All':
+ if gr != 'All':
self.ctx.cluster.only(client).run(
args=args + ['--tests'] + [gr] + extra_args,
stdout=StringIO()
from teuthology import contextutil
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
-from teuthology.orchestra.connection import split_user
log = logging.getLogger(__name__)
from teuthology import misc as teuthology
from teuthology import contextutil
-from teuthology.config import config as teuth_config
+from teuthology.exceptions import ConfigError
from teuthology.orchestra import run
log = logging.getLogger(__name__)
config = all_clients
if isinstance(config, list):
config = dict.fromkeys(config)
- clients = config.keys()
overrides = ctx.config.get('overrides', {})
# merge each client section, not the top level.
import logging
from teuthology import misc as teuthology
-from teuthology import contextutil
from teuthology.orchestra import run
log = logging.getLogger(__name__)
from cStringIO import StringIO
import logging
import json
-import requests
import time
-from requests.packages.urllib3 import PoolManager
-from requests.packages.urllib3.util import Retry
-from urlparse import urlparse
-
-from teuthology.orchestra.connection import split_user
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
import re
import os
import time
-import json
import sys
import errno
from unittest import suite, loader
except Exception as e:
self.client_remote.run(args=[
'sudo',
- run.Raw('PATH=/usr/sbin:$PATH'),
+ Raw('PATH=/usr/sbin:$PATH'),
'lsof',
- run.Raw(';'),
+ Raw(';'),
'ps', 'auxf',
], timeout=(15*60), omit_sudo=False)
raise e
max_required_mgr = 0
require_memstore = False
- for suite, case in enumerate_methods(overall_suite):
+ for suite_, case in enumerate_methods(overall_suite):
max_required_mds = max(max_required_mds,
getattr(case, "MDSS_REQUIRED", 0))
max_required_clients = max(max_required_clients,
opt_teardown_cluster = False
global opt_log_ps_output
opt_log_ps_output = False
- opt_clear_old_log = False
use_kernel_client = False
args = sys.argv[1:]
elif f == '--log-ps-output':
opt_log_ps_output = True
elif f == '--clear-old-log':
- opt_clear_old_log = True
clear_old_log()
elif f == "--kclient":
use_kernel_client = True
import argparse
import os
-import sys
-import time
def main():
parser = argparse.ArgumentParser()
from __future__ import print_function
-import json
import subprocess
import shlex
-from StringIO import StringIO
import errno
import sys
import os
import re
-import rados
-from ceph_argparse import *
+from ceph_argparse import * # noqa
keyring_base = '/tmp/cephtest-caps.keyring'
import sys
if sys.version_info[0] == 2:
- from cStringIO import StringIO
-
range = xrange
elif sys.version_info[0] == 3:
- from io import StringIO
-
range = range
import rados as rados
if r != 0:
raise Exception
-import ConfigParser
-import contextlib
class _TrimIndentFile(object):
def __init__(self, fp):
bt = decode(binbt)
+ ind = 0
if bt['ino'] != ino:
raise VerifyFailure('inode mismatch: {bi} != {ino}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(
bi=bt['ancestors'][ind]['dname'], ino=ino, bt=bt, i=ino, v=values))
- ind = 0
for (n, i) in values:
if bt['ancestors'][ind]['dirino'] != i:
raise VerifyFailure('ancestor dirino mismatch: {b} != {ind}\n\tbacktrace:\n\t\t{bt}\n\tfailed verify against:\n\t\t{i}, {v}'.format(