From 1634f3e4c24e3200aca816ec2add85457f0795b0 Mon Sep 17 00:00:00 2001 From: Tommi Virtanen Date: Fri, 3 Jun 2011 09:40:18 -0700 Subject: [PATCH] Move autotest running into a task. --- dbench.py | 77 +---------------------------- teuthology/task/autotest.py | 97 +++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 76 deletions(-) create mode 100644 teuthology/task/autotest.py diff --git a/dbench.py b/dbench.py index 2eaaa48577..7a7fd5cd52 100644 --- a/dbench.py +++ b/dbench.py @@ -4,7 +4,6 @@ from orchestra import monkey; monkey.patch_all() from cStringIO import StringIO import bunch -import json import logging import os import sys @@ -415,81 +414,6 @@ if __name__ == '__main__': # TODO rbd - log.info('Setting up autotest...') - run.wait( - clients.run( - args=[ - 'mkdir', '/tmp/cephtest/autotest', - run.Raw('&&'), - 'wget', - '-nv', - '--no-check-certificate', - 'https://github.com/tv42/autotest/tarball/ceph', - '-O-', - run.Raw('|'), - 'tar', - '-C', '/tmp/cephtest/autotest', - '-x', - '-z', - '-f-', - '--strip-components=1', - ], - wait=False, - ), - ) - - log.info('Making a separate scratch dir for every client...') - for idx, roles_for_host in enumerate(ROLES): - for id_ in teuthology.roles_of_type(roles_for_host, 'client'): - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) - scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) - run.run( - client=connections[idx], - args=[ - 'sudo', - 'install', - '-d', - '-m', '0755', - '--owner={user}'.format(user='ubuntu'), #TODO - '--', - scratch, - ], - ) - - testname = 'dbench' #TODO - log.info('Running autotest client test %s...', testname) - for id_ in teuthology.all_roles_of_type(ROLES, 'client'): - mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) - scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) - tag = '{testname}.client.{id}'.format( - testname=testname, - id=id_, - ) - control = '/tmp/cephtest/control.{tag}'.format(tag=tag) - (rem,) = cluster.only('client.{id}'.format(id=id_)).remotes.keys() - teuthology.write_file( - remote=rem, - path=control, - data='import json; data=json.loads({data!r}); job.run_test(**data)'.format( - data=json.dumps(dict( - url=testname, - dir=scratch, - # TODO perhaps tag - # results will be in /tmp/cephtest/autotest/client/results/dbench - # or /tmp/cephtest/autotest/client/results/dbench.{tag} - )), - ), - ) - rem.run( - args=[ - '/tmp/cephtest/autotest/client/bin/autotest', - '--harness=simple', - '--tag={tag}'.format(tag=tag), - control, - run.Raw('3>&1'), - ], - ) - ctx = bunch.Bunch( cluster=cluster, ) @@ -497,6 +421,7 @@ if __name__ == '__main__': run_tasks( tasks=[ {'cfuse': ['client.0']}, + {'autotest': {'client.0': ['dbench']}}, {'interactive': None}, ], ctx=ctx, diff --git a/teuthology/task/autotest.py b/teuthology/task/autotest.py new file mode 100644 index 0000000000..864ac63e69 --- /dev/null +++ b/teuthology/task/autotest.py @@ -0,0 +1,97 @@ +import contextlib +import json +import logging +import os + +from teuthology import misc as teuthology +from orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + assert isinstance(config, dict) + + log.info('Setting up autotest...') + for role in config.iterkeys(): + # TODO parallelize + ctx.cluster.only(role).run( + args=[ + 'mkdir', '/tmp/cephtest/autotest', + run.Raw('&&'), + 'wget', + '-nv', + '--no-check-certificate', + 'https://github.com/tv42/autotest/tarball/ceph', + '-O-', + run.Raw('|'), + 'tar', + '-C', '/tmp/cephtest/autotest', + '-x', + '-z', + '-f-', + '--strip-components=1', + ], + ) + + log.info('Making a separate scratch dir for every client...') + for role in config.iterkeys(): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + remote.run( + args=[ + 'sudo', + 'install', + '-d', + '-m', '0755', + '--owner={user}'.format(user='ubuntu'), #TODO + '--', + scratch, + ], + ) + + # TODO parallelize + for role, tests in config.iteritems(): + assert isinstance(role, basestring) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.iterkeys() + mnt = os.path.join('/tmp/cephtest', 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + + assert isinstance(tests, list) + for testname in tests: + log.info('Running autotest client test %s...', testname) + + tag = '{testname}.client.{id}'.format( + testname=testname, + id=id_, + ) + control = '/tmp/cephtest/control.{tag}'.format(tag=tag) + teuthology.write_file( + remote=remote, + path=control, + data='import json; data=json.loads({data!r}); job.run_test(**data)'.format( + data=json.dumps(dict( + url=testname, + dir=scratch, + # TODO perhaps tag + # results will be in /tmp/cephtest/autotest/client/results/dbench + # or /tmp/cephtest/autotest/client/results/dbench.{tag} + )), + ), + ) + remote.run( + args=[ + '/tmp/cephtest/autotest/client/bin/autotest', + '--harness=simple', + '--tag={tag}'.format(tag=tag), + control, + run.Raw('3>&1'), + ], + ) -- 2.39.5