-"""
+"""
Run an autotest test on the ceph cluster.
"""
import json
import logging
import os
+import six
+
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
log.info('Making a separate scratch dir for every client...')
for role in config.keys():
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
"""
Spawned to run test on remote site
"""
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
# case we will see connection errors that we should ignore.
log.debug("Missed logrotate, node '{0}' is offline".format(
e.node))
- except EOFError as e:
+ except EOFError:
# Paramiko sometimes raises this when it fails to
# connect to a node during open_session. As with
# ConnectionLostError, we ignore this because nodes
# are allowed to get power cycled during tests.
log.debug("Missed logrotate, EOFError")
- except SSHException as e:
+ except SSHException:
log.debug("Missed logrotate, SSHException")
except socket.error as e:
if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET):
testdir = teuthology.get_testdir(ctx)
remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir
rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
- with file(rotate_conf_path, 'rb') as f:
+ with open(rotate_conf_path, 'rb') as f:
conf = ""
for daemon, size in daemons.items():
log.info('writing logrotate stanza for {}'.format(daemon))
from teuthology.orchestra.remote import Remote
from teuthology.orchestra import run
from teuthology.exceptions import CommandFailedError
+import six
try:
from subprocess import DEVNULL # py3k
:param erasure_code_use_overwrites: if true, allow overwrites
"""
with self.lock:
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
assert isinstance(pg_num, int)
assert pool_name not in self.pools
self.log("creating pool_name %s" % (pool_name,))
:param pool_name: Pool to be removed
"""
with self.lock:
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
assert pool_name in self.pools
self.log("removing pool_name %s" % (pool_name,))
del self.pools[pool_name]
Return the number of pgs in the pool specified.
"""
with self.lock:
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
if pool_name in self.pools:
return self.pools[pool_name]
return 0
:returns: property as an int value.
"""
with self.lock:
- assert isinstance(pool_name, basestring)
- assert isinstance(prop, basestring)
+ assert isinstance(pool_name, six.string_types)
+ assert isinstance(prop, six.string_types)
output = self.raw_cluster_cmd(
'osd',
'pool',
This routine retries if set operation fails.
"""
with self.lock:
- assert isinstance(pool_name, basestring)
- assert isinstance(prop, basestring)
+ assert isinstance(pool_name, six.string_types)
+ assert isinstance(prop, six.string_types)
assert isinstance(val, int)
tries = 0
while True:
Increase the number of pgs in a pool
"""
with self.lock:
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
assert isinstance(by, int)
assert pool_name in self.pools
if self.get_num_creating() > 0:
with self.lock:
self.log('contract_pool %s by %s min %s' % (
pool_name, str(by), str(min_pgs)))
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
assert isinstance(by, int)
assert pool_name in self.pools
if self.get_num_creating() > 0:
Set pgpnum property of pool_name pool.
"""
with self.lock:
- assert isinstance(pool_name, basestring)
+ assert isinstance(pool_name, six.string_types)
assert pool_name in self.pools
if not force and self.get_num_creating() > 0:
return False
"""
mdsmap = self.get_mds_map(status)
result = []
- for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(mdsmap['info'].values(),
+ key=lambda _: _['rank']):
if mds_status['state'] == state or state is None:
result.append(mds_status['name'])
def get_all_mds_rank(self, status=None):
mdsmap = self.get_mds_map(status)
result = []
- for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(mdsmap['info'].values(),
+ key=lambda _: _['rank']):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['rank'])
"""
mdsmap = self.get_mds_map(status)
result = []
- for mds_status in sorted(mdsmap['info'].values(), lambda a, b: cmp(a['rank'], b['rank'])):
+ for mds_status in sorted(mdsmap['info'].values(),
+ key=lambda _: _['rank']):
if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay':
result.append(mds_status['name'])
self.mount_a.wait_until_mounted()
def test_dir_fsync(self):
- self._test_fsync(True);
+ self._test_fsync(True);
def test_create_fsync(self):
- self._test_fsync(False);
+ self._test_fsync(False);
def _test_fsync(self, dirfsync):
"""
p = self.mount_a.client_remote.sh('uname -r'), wait=True)
dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin")
log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin)
- if str(p.stdout.getvalue()) < "5" and not(dir_pin):
- self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
+ if str(p.stdout.getvalue()) < "5" and not(dir_pin):
+ self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin")
self.assertTrue(self.mount_a.getfattr("1", "ceph.dir.pin") == "1")
self.assertTrue(self.mount_a.getfattr("1/2", "ceph.dir.pin") == "0")
if (len(self.fs.get_active_names()) > 2):
self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir),
lambda j, r: self.json_validator(j, r, "return_code", 0))
- # wait a few second for background repair
- time.sleep(10)
+ # wait a few second for background repair
+ time.sleep(10)
- # fragstat should be fixed
- self.mount_a.run_shell(["sudo", "rmdir", test_dir])
+ # fragstat should be fixed
+ self.mount_a.run_shell(["sudo", "rmdir", test_dir])
@staticmethod
def json_validator(json_out, rc, element, expected_value):
# Mount objects, sorted by ID
if hasattr(ctx, 'mounts'):
- mounts = [v for k, v in sorted(ctx.mounts.items(), lambda a, b: cmp(a[0], b[0]))]
+ mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])]
else:
# The test configuration has a filesystem but no fuse/kclient mounts
mounts = []
import logging
import os
+import six
+
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import misc as teuthology
:param ctx: Context
:param role: Roles
"""
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
for (client, _) in config.items():
run_in_keystone_dir(ctx, client,
[ 'source',
- '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
+ '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)),
run.Raw('&&'),
'tox', '-e', 'venv', '--notest'
])
poolprocs=dict()
while (remaining_pools > 0):
log.info('{n} pools remaining to create'.format(n=remaining_pools))
- for remote, role_ in creator_remotes:
+ for remote, role_ in creator_remotes:
poolnum = remaining_pools
remaining_pools -= 1
if remaining_pools < 0:
continue
log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_))
- proc = remote.run(
- args=[
- 'ceph',
- '--name', role_,
- 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
- run.Raw('&&'),
- 'rados',
- '--name', role_,
- '--pool', 'pool{num}'.format(num=poolnum),
- 'bench', '0', 'write', '-t', '16', '--block-size', '1'
- ],
- wait = False
- )
+ proc = remote.run(
+ args=[
+ 'ceph',
+ '--name', role_,
+ 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8',
+ run.Raw('&&'),
+ 'rados',
+ '--name', role_,
+ '--pool', 'pool{num}'.format(num=poolnum),
+ 'bench', '0', 'write', '-t', '16', '--block-size', '1'
+ ],
+ wait = False
+ )
log.info('waiting for pool and object creates')
poolprocs[remote] = proc
import contextlib
import logging
+import six
+
from teuthology.orchestra import run
from teuthology import misc as teuthology
testdir = teuthology.get_testdir(ctx)
print(str(config.get('increment',-1)))
for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if isinstance(num_disks, list):
num_disks = len(num_disks)
- for i in xrange(num_disks):
+ for i in range(num_disks):
create_config = {
client: {
'image_name':
userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client)
metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client)
- with file(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
+ with open(os.path.join(src_dir, 'userdata_setup.yaml'), 'rb') as f:
test_setup = ''.join(f.readlines())
# configuring the commands to setup the nfs mount
mnt_dir = "/export/{client}".format(client=client)
mnt_dir=mnt_dir
)
- with file(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
+ with open(os.path.join(src_dir, 'userdata_teardown.yaml'), 'rb') as f:
test_teardown = ''.join(f.readlines())
user_data = test_setup
num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if isinstance(num_disks, list):
num_disks = len(num_disks)
- for i in xrange(1, num_disks):
+ for i in range(1, num_disks):
dev_letter = chr(ord('a') + i)
user_data += """
- |
ceph_sha1=ctx.config.get('sha1'))
teuthology.write_file(remote, userdata_path, StringIO(user_data))
- with file(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
+ with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f:
teuthology.write_file(remote, metadata_path, f)
test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client)
num_disks = client_config.get('disks', DEFAULT_NUM_DISKS)
if isinstance(num_disks, list):
num_disks = len(num_disks)
- for i in xrange(num_disks):
+ for i in range(num_disks):
suffix = '-clone' if clone else ''
args.extend([
'-drive',
import gevent
from teuthology import misc as teuthology
+import six
+
from teuthology.orchestra import run
log = logging.getLogger(__name__)
existing_pools = config.get('pools', [])
created_pools = []
for role in config.get('clients', clients):
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
from teuthology.orchestra import run
from teuthology import misc as teuthology
+import six
+
log = logging.getLogger(__name__)
@contextlib.contextmanager
create_pool = config.get('create_pool', True)
for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
from teuthology.orchestra import run
from teuthology import misc as teuthology
+import six
+
log = logging.getLogger(__name__)
log.info(' repetition =' + str(rep))
for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
x2 = s2['total']
except Exception as ex:
r.append("malformed summary looking for totals for user "
- + e['user'] + " " + str(ex))
+ + e['user'] + " " + str(ex))
break
usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user'])
return r
assert len(out['placement_pools']) == orig_placement_pools + 1
zonecmd = ['zone', 'placement', 'rm',
- '--rgw-zone', 'default',
- '--placement-id', 'new-placement']
+ '--rgw-zone', 'default',
+ '--placement-id', 'new-placement']
(err, out) = rgwadmin(ctx, client, zonecmd, check_status=True)
ragweed_repo = ctx.config.get('ragweed_repo', teuth_config.ceph_git_base_url + 'ragweed.git')
if suite_branch in s3_branches:
branch = cconf.get('branch', 'ceph-' + suite_branch)
- else:
+ else:
branch = cconf.get('branch', suite_branch)
if not branch:
raise ValueError(
ragweed_conf[section].setdefault('user_id', user)
ragweed_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
ragweed_conf['rgw']['host'] = 'localhost'
if properties is not None and 'slow_backend' in properties:
- ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
+ ragweed_conf['fixtures']['slow backend'] = properties['slow_backend']
conf_fp = StringIO()
ragweed_conf.write(conf_fp)
log.info('Configuring boto...')
boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
- for client, properties in config['clients'].iteritems():
- with file(boto_src, 'rb') as f:
+ for client, properties in config['clients'].items():
+ with open(boto_src, 'rb') as f:
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = f.read().format(
idle_timeout=config.get('idle_timeout', 30)
from teuthology.task.common_fs_utils import generic_mount
from teuthology.task.common_fs_utils import default_image_name
+import six
+
#V1 image unsupported but required for testing purposes
os.environ["RBD_FORCE_ALLOW_V1"] = "1"
except:
exc_info = sys.exc_info()
if exc_info:
- raise exc_info[0], exc_info[1], exc_info[2]
+ six.reraise(exc_info[0], exc_info[1], exc_info[2])
yield
def run_xfstests_one_client(ctx, role, properties):
s3tests_conf[section].setdefault('user_id', user)
s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
@contextlib.contextmanager
s3tests_conf[section].setdefault('user_id', user)
s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
@contextlib.contextmanager
s3tests_conf[section].setdefault('user_id', user)
s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user))
s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user))
- s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in xrange(20)))
+ s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.uppercase) for i in range(20)))
s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)))
- s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in xrange(10)))
+ s3tests_conf[section].setdefault('totp_serial', ''.join(random.choice(string.digits) for i in range(10)))
s3tests_conf[section].setdefault('totp_seed', base64.b32encode(os.urandom(40)))
s3tests_conf[section].setdefault('totp_seconds', '5')
s3tests_conf['DEFAULT']['host'] = 'localhost'
if properties is not None and 'slow_backend' in properties:
- s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
+ s3tests_conf['fixtures']['slow backend'] = properties['slow_backend']
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
log.info('Configuring boto...')
boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template')
- for client, properties in config['clients'].iteritems():
- with file(boto_src, 'rb') as f:
+ for client, properties in config['clients'].items():
+ with open(boto_src, 'rb') as f:
(remote,) = ctx.cluster.only(client).remotes.keys()
conf = f.read().format(
idle_timeout=config.get('idle_timeout', 30)
import sys
import time
+import six
+
from teuthology import misc as teuthology
from teuthology.orchestra import run
from teuthology.orchestra.daemon import DaemonGroup
:param roles: roles for this test (extracted from yaml files)
"""
for role in roles:
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'samba.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
exc_info = sys.exc_info()
log.exception('Saw exception from %s.%s', d.role, d.id_)
if exc_info != (None, None, None):
- raise exc_info[0], exc_info[1], exc_info[2]
+ six.reraise(exc_info[0], exc_info[1], exc_info[2])
for id_, remote in samba_servers:
remote.run(
to_config(cconfig, params, 'identity', cpar)
to_config(cconfig, params, 'object-storage', cpar)
to_config(cconfig, params, 'object-storage-feature-enabled', cpar)
- cpar.write(file(local_conf, 'w+'))
+ cpar.write(open(local_conf, 'w+'))
remote.put_file(local_conf, tetcdir + '/tempest.conf')
yield
from teuthology.orchestra.run import Raw, quote
from teuthology.orchestra.daemon import DaemonGroup
from teuthology.config import config as teuth_config
-
+import six
import logging
log = logging.getLogger(__name__)
else:
# Sanity check that we've got a list of strings
for arg in args:
- if not isinstance(arg, basestring):
+ if not isinstance(arg, six.string_types):
raise RuntimeError("Oops, can't handle arg {0} type {1}".format(
arg, arg.__class__
))
env=env)
if stdin:
- if not isinstance(stdin, basestring):
+ if not isinstance(stdin, six.string_types):
raise RuntimeError("Can't handle non-string stdins on a vstart cluster")
# Hack: writing to stdin is not deadlock-safe, but it "always" works
import contextlib
import logging
+import six
+
from teuthology.orchestra import run
from teuthology.contextutil import safe_while
clients = config.get('clients', ['client.0'])
assert len(clients) == 1
role = clients[0]
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
(remote,) = ctx.cluster.only(role).remotes.keys()
import contextlib
import logging
+import six
from teuthology.orchestra import run
from teuthology.task import proc_thrasher
remotes = []
for role in config.get('clients', ['client.0']):
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
import os
import re
-from tasks.util import get_remote_for_role
-from tasks.util.workunit import get_refspec_after_overrides
+import six
+
+from util import get_remote_for_role
+from util.workunit import get_refspec_after_overrides
from teuthology import misc
from teuthology.config import config as teuth_config
# Create scratch dirs for any non-all workunits
log.info('Making a separate scratch dir for every client...')
for role in clients.keys():
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
if role == "all":
continue
to False is passed, the 'timeout' command is not used.
"""
testdir = misc.get_testdir(ctx)
- assert isinstance(role, basestring)
+ assert isinstance(role, six.string_types)
cluster, type_, id_ = misc.split_role(role)
assert type_ == 'client'
remote = get_remote_for_role(ctx, role)
[tox]
-envlist = flake8
+envlist = flake8-py2, flake8-py3
skipsdist = True
-[testenv:flake8]
+[testenv:flake8-py2]
+basepython = python2
+deps=
+ flake8
+commands=flake8 --select=F,E9 --exclude=venv,.tox
+
+[testenv:flake8-py3]
+basepython = python3
deps=
flake8
commands=flake8 --select=F,E9 --exclude=venv,.tox
import io
import re
+import six
from ceph_argparse import * # noqa
if isinstance(cmd, list):
self.cmd = ' '.join(cmd)
else:
- assert isinstance(cmd, str) or isinstance(cmd, unicode), \
+ assert isinstance(cmd, str) or isinstance(cmd, six.text_type), \
'cmd needs to be either a list or a str'
self.cmd = cmd
self.cmd = str(self.cmd)
def call(cmd):
if isinstance(cmd, list):
args = cmd
- elif isinstance(cmd, str) or isinstance(cmd, unicode):
+ elif isinstance(cmd, str) or isinstance(cmd, six.text_type):
args = shlex.split(cmd)
else:
assert False, 'cmd is not a string/unicode nor a list!'
'expected result doesn\'t match and no exception was thrown!'
with io.open(out_file, mode) as file:
- file.write(unicode(p.stdout.read()))
+ file.write(six.text_type(p.stdout.read()))
return p
self.args = []
for s in j['sig']:
if not isinstance(s, dict):
- assert isinstance(s, str) or isinstance(s,unicode), \
+ assert isinstance(s, str) or isinstance(s,six.text_type), \
'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j)
if len(self.sig) > 0:
self.sig += ' '
import json
import shlex
import subprocess
-import sys
-if sys.version_info[0] == 2:
- string = basestring
- unicode = unicode
-elif sys.version_info[0] == 3:
- string = str
- unicode = str
+import six
class UnexpectedReturn(Exception):
if isinstance(cmd, list):
self.cmd = ' '.join(cmd)
else:
- assert isinstance(cmd, string) or isinstance(cmd, unicode), \
+ assert isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type), \
'cmd needs to be either a list or a str'
self.cmd = cmd
self.cmd = str(self.cmd)
def call(cmd):
if isinstance(cmd, list):
args = cmd
- elif isinstance(cmd, string) or isinstance(cmd, unicode):
+ elif isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type):
args = shlex.split(cmd)
else:
assert False, 'cmd is not a string/unicode nor a list!'
keys = []
values = []
- for x in xrange(20000):
+ for x in range(20000):
keys.append(str(x))
values.append(buffer)
import sys
if sys.version_info[0] == 2:
- range = xrange
+ range = xrange # noqa
elif sys.version_info[0] == 3:
range = range