stdout=StringIO(),
)
+
def delete_file(remote, path, sudo=False, force=False):
args = []
if sudo:
if force:
args.extend(['-f'])
args.extend([
- '--',
- path,
- ])
- proc = remote.run(
+ '--',
+ path,
+ ])
+ remote.run(
args=args,
stdout=StringIO(),
)
+
def remove_lines_from_file(remote, path, line_is_valid_test, string_to_test_for):
# read in the specified file
in_data = get_file(remote, path, False)
data = proc.stdout.getvalue()
return data
+
def create_file(remote, path, data="", permissions=str(644), sudo=False):
"""
Create a file on the remote host.
if sudo:
args.append('sudo')
args.extend([
- 'touch',
- path,
- run.Raw('&&'),
- 'chmod',
- permissions,
- '--',
- path
- ])
- proc = remote.run(
+ 'touch',
+ path,
+ run.Raw('&&'),
+ 'chmod',
+ permissions,
+ '--',
+ path
+ ])
+ remote.run(
args=args,
stdout=StringIO(),
)
if "" != data:
append_lines_to_file(remote, path, data, sudo)
+
def get_file(remote, path, sudo=False):
"""
Read a file from remote host into memory.
host = ctx.teuthology_config.get('gitbuilder_host',
'gitbuilder.ceph.com')
dist_release = baseparms['dist_release']
- distro_release = baseparms['distro_release']
start_of_url = 'http://{host}/ceph-rpm-{distro_release}-{arch}-{flavor}/{uri}'.format(host=host, **baseparms)
ceph_release = 'ceph-release-{release}.{dist_release}.noarch'.format(
release=RELEASE, dist_release=dist_release)
log = logging.getLogger(__name__)
+
@contextlib.contextmanager
def task(ctx, config):
"""
for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
clients = list(teuthology.get_clients(ctx=ctx, roles=config))
- testdir = teuthology.get_testdir(ctx)
-
for id_, remote in clients:
# TODO: Don't have to run this more than once per node (remote)
log.info('Enable logging on client.{id} at {remote} ...'.format(
import os
from teuthology import misc as teuthology
-from ..orchestra import run
log = logging.getLogger(__name__)
+
@contextlib.contextmanager
def task(ctx, config):
"""
assert err
# delete should fail because ``key`` still exists
- fails = False
try:
bucket.delete()
except boto.exception.S3ResponseError as e:
log = logging.getLogger(__name__)
+
@contextlib.contextmanager
def run_rest_api_daemon(ctx, api_clients):
if not hasattr(ctx, 'daemons'):
ctx.daemons = CephState()
remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
for rems, roles in remotes.iteritems():
for whole_id_ in roles:
if whole_id_ in api_clients:
exc_info = sys.exc_info()
while stack:
mgr = stack.pop()
- endr = mgr.__exit__(*exc_info)
+ mgr.__exit__(*exc_info)
finally:
del exc_info
remotes = []
- testdir = teuthology.get_testdir(ctx)
-
for role in config.get('clients', ['client.0']):
assert isinstance(role, basestring)
PREFIX = 'client.'
def radosgw_agent_sync_all(ctx):
if ctx.radosgw_agent.procs:
for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- dest_zone = zone_for_client(ctx, agent_client)
+ zone_for_client(ctx, agent_client)
sync_host, sync_port = get_sync_agent(ctx, agent_client)
log.debug('doing a sync via {host1}'.format(host1=sync_host))
radosgw_agent_sync(ctx, sync_host, sync_port)