ctx.cluster.only(ceph_admin).run(
args=[
'git', 'clone', '-b', ceph_deploy_branch,
- 'git://ceph.com/ceph-deploy.git',
+ config.ceph_git_base_url + 'ceph-deploy.git',
'{tdir}/ceph-deploy'.format(tdir=testdir),
],
)
args=[
'git', 'clone',
'-b', branch,
-# 'https://github.com/ceph/s3-tests.git',
- 'git://ceph.com/git/s3-tests.git',
+ config.ceph_git_base_url + 's3-tests.git',
'{tdir}/s3-tests'.format(tdir=testdir),
],
)
users = {'s3': 'foo'}
cached_client_user_names = dict()
for client in config['clients']:
- cached_client_user_names[client] = dict()
+ cached_client_user_names[client] = dict()
s3tests_conf = config['s3tests_conf'][client]
s3tests_conf.setdefault('readwrite', {})
s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-')
rwconf['files'].setdefault('stddev', 500)
for section, user in users.iteritems():
_config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client))
- log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
+ log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'],
client=client))
- # stash the 'delete_user' flag along with user name for easier cleanup
+ # stash the 'delete_user' flag along with user name for easier cleanup
delete_this_user = True
if 'delete_user' in s3tests_conf['s3']:
- delete_this_user = s3tests_conf['s3']['delete_user']
+ delete_this_user = s3tests_conf['s3']['delete_user']
log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user,client=client))
cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user)
from teuthology import misc as teuthology
from teuthology import contextutil
+from ..config import config as teuth_config
from ..orchestra import run
from ..orchestra.connection import split_user
ctx.cluster.only(client).run(
args=[
'git', 'clone',
-# 'https://github.com/ceph/s3-tests.git',
- 'git://ceph.com/git/s3-tests.git',
+ config.ceph_git_base_url + 's3-tests.git',
'{tdir}/s3-tests'.format(tdir=testdir),
],
)
return_dict = None
client = ctx.ceph.conf.get(client_name, None)
if client:
- current_client_zone = client.get('rgw zone', None)
+ current_client_zone = client.get('rgw zone', None)
if current_client_zone:
(endpoint_host, endpoint_port) = ctx.rgw.role_endpoints.get(client_name,(None,None))
# pull out the radosgw_agent stuff
return_dict['port'] = endpoint_port
return_dict['host'] = endpoint_host
- # The s3tests expect the sync_agent_[addr|port} to be
+ # The s3tests expect the sync_agent_[addr|port} to be
# set on the non-master node for some reason
if not region_data['is master']:
(rgwagent_host,rgwagent_port) = ctx.radosgw_agent.endpoint
# we'll assume that there's only one sync relationship (source / destination) with client.X
# as the key for now
- # Iterate through all of the radosgw_agent (rgwa) configs and see if a
- # given client is involved in a relationship.
- # If a given client isn't, skip it
+ # Iterate through all of the radosgw_agent (rgwa) configs and see if a
+ # given client is involved in a relationship.
+ # If a given client isn't, skip it
this_client_in_rgwa_config = False
for rgwa in ctx.radosgw_agent.config.keys():
rgwa_data = ctx.radosgw_agent.config[rgwa]
args=[
'git', 'clone',
'-b', branch,
-# 'https://github.com/ceph/s3-tests.git',
- 'git://ceph.com/git/s3-tests.git',
+ config.ceph_git_base_url + 's3-tests.git',
'{tdir}/s3-tests'.format(tdir=testdir),
],
)
ctx.cluster.only(client).run(
args=[
'git', 'clone',
- 'git://ceph.com/git/swift.git',
+ config.ceph_git_base_url + 'swift.git',
'{tdir}/swift'.format(tdir=testdir),
],
)