From: Warren Usui Date: Sat, 12 Oct 2013 08:28:27 +0000 (-0700) Subject: Added docstrings, and improved some of the comments on several tasks. X-Git-Tag: v0.94.10~27^2^2~364^2~498^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=a1d8225b7d95a4631c1c6b650c4fb86e72c91fba;p=ceph.git Added docstrings, and improved some of the comments on several tasks. --- diff --git a/teuthology/task/admin_socket.py b/teuthology/task/admin_socket.py index dbb7a17cefcc5..20a670122a9b6 100644 --- a/teuthology/task/admin_socket.py +++ b/teuthology/task/admin_socket.py @@ -1,3 +1,6 @@ +""" +Admin Socket task -- used in rados, powercycle, and smoke testing +""" from cStringIO import StringIO import json @@ -11,6 +14,7 @@ from teuthology.parallel import parallel log = logging.getLogger(__name__) + def task(ctx, config): """ Run an admin socket command, make sure the output is json, and run @@ -54,18 +58,30 @@ def task(ctx, config): Note that there must be a ceph client with an admin socket running before this task is run. The tests are parallelized at the client level. Tests for a single client are run serially. + + :param ctx: Context + :param config: Configuration """ assert isinstance(config, dict), \ 'admin_socket task requires a dict for configuration' teuthology.replace_all_with_clients(ctx.cluster, config) - with parallel() as p: + with parallel() as ptask: for client, tests in config.iteritems(): - p.spawn(_run_tests, ctx, client, tests) + ptask.spawn(_run_tests, ctx, client, tests) + def _socket_command(ctx, remote, socket_path, command, args): """ Run an admin socket command and return the result as a string. + + :param ctx: Context + :param remote: Remote site + :param socket_path: path to socket + :param command: command to be run remotely + :param args: command arguments + + :returns: output of command in json format """ json_fp = StringIO() testdir = teuthology.get_testdir(ctx) @@ -87,7 +103,8 @@ def _socket_command(ctx, remote, socket_path, command, args): break assert max_tries > 0 max_tries -= 1 - log.info('ceph cli returned an error, command not registered yet? sleeping and retrying ...') + log.info('ceph cli returned an error, command not registered yet?') + log.info('sleeping and retrying ...') time.sleep(1) out = json_fp.getvalue() json_fp.close() @@ -95,6 +112,15 @@ def _socket_command(ctx, remote, socket_path, command, args): return json.loads(out) def _run_tests(ctx, client, tests): + """ + Create a temp directory and wait for a client socket to be created. + For each test, copy the executable locally and run the test. + Remove temp directory when finished. + + :param ctx: Context + :param client: client machine to run the test + :param tests: list of tests to run + """ testdir = teuthology.get_testdir(ctx) log.debug('Running admin socket tests on %s', client) (remote,) = ctx.cluster.only(client).remotes.iterkeys() @@ -164,4 +190,3 @@ def _run_tests(ctx, client, tests): 'rm', '-rf', '--', tmp_dir, ], ) - diff --git a/teuthology/task/autotest.py b/teuthology/task/autotest.py index d1b22c6a4fd9c..24a7675df277f 100644 --- a/teuthology/task/autotest.py +++ b/teuthology/task/autotest.py @@ -1,3 +1,6 @@ +""" +Run an autotest test on the ceph cluster. +""" import json import logging import os @@ -70,6 +73,9 @@ def task(ctx, config): p.spawn(_run_tests, testdir, remote, role, tests) def _download(testdir, remote): + """ + Download. Does not explicitly support muliple tasks in a single run. + """ remote.run( args=[ # explicitly does not support multiple autotest tasks @@ -94,6 +100,9 @@ def _download(testdir, remote): ) def _run_tests(testdir, remote, role, tests): + """ + Spawned to run test on remote site + """ assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) diff --git a/teuthology/task/ceph-deploy.py b/teuthology/task/ceph-deploy.py index 1e6427162debb..7ada79f7402ae 100644 --- a/teuthology/task/ceph-deploy.py +++ b/teuthology/task/ceph-deploy.py @@ -1,3 +1,6 @@ +""" +Execute ceph-deploy as a task +""" from cStringIO import StringIO import contextlib @@ -82,6 +85,7 @@ def is_healthy(ctx, config): time.sleep(1) def get_nodes_using_roles(ctx, config, role): + """Extract the names of nodes that match a given role from a cluster""" newl = [] for _remote, roles_for_host in ctx.cluster.remotes.iteritems(): for id_ in teuthology.roles_of_type(roles_for_host, role): @@ -95,6 +99,7 @@ def get_nodes_using_roles(ctx, config, role): return newl def get_dev_for_osd(ctx, config): + """Get a list of all osd device names.""" osd_devs = [] for remote, roles_for_host in ctx.cluster.remotes.iteritems(): host = remote.name.split('@')[-1] @@ -109,6 +114,7 @@ def get_dev_for_osd(ctx, config): return osd_devs def get_all_nodes(ctx, config): + """Return a string of node names separated by blanks""" nodelist = [] for t, k in ctx.config['targets'].iteritems(): host = t.split('@')[-1] @@ -118,6 +124,7 @@ def get_all_nodes(ctx, config): return nodelist def execute_ceph_deploy(ctx, config, cmd): + """Remotely execute a ceph_deploy command""" testdir = teuthology.get_testdir(ctx) ceph_admin = teuthology.get_first_mon(ctx, config) exec_cmd = cmd @@ -136,6 +143,7 @@ def execute_ceph_deploy(ctx, config, cmd): @contextlib.contextmanager def build_ceph_cluster(ctx, config): + """Build a ceph cluster""" log.info('Building ceph cluster using ceph-deploy...') testdir = teuthology.get_testdir(ctx) ceph_branch = None @@ -178,12 +186,12 @@ def build_ceph_cluster(ctx, config): if config.get('conf') is not None: confp = config.get('conf') for section, keys in confp.iteritems(): - lines = '[{section}]\n'.format(section=section) + lines = '[{section}]\n'.format(section=section) + teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True) + for key, value in keys.iteritems(): + log.info("[%s] %s = %s" % (section, key, value)) + lines = '{key} = {value}\n'.format(key=key, value=value) teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True) - for key, value in keys.iteritems(): - log.info("[%s] %s = %s" % (section, key, value)) - lines = '{key} = {value}\n'.format(key=key, value=value) - teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True) estatus_install = execute_ceph_deploy(ctx, config, install_nodes) if estatus_install != 0: diff --git a/teuthology/task/ceph-fuse.py b/teuthology/task/ceph-fuse.py index eeff1c0e28541..6031313a45523 100644 --- a/teuthology/task/ceph-fuse.py +++ b/teuthology/task/ceph-fuse.py @@ -1,3 +1,6 @@ +""" +Ceph FUSE client task +""" import contextlib import logging import os @@ -41,6 +44,8 @@ def task(ctx, config): valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] - interactive: + :param ctx: Context + :param config: Configuration """ log.info('Mounting ceph-fuse clients...') fuse_daemons = {} diff --git a/teuthology/task/ceph.py b/teuthology/task/ceph.py index 2337b0e6443f6..dcab9731033aa 100644 --- a/teuthology/task/ceph.py +++ b/teuthology/task/ceph.py @@ -1,3 +1,8 @@ +""" +Ceph cluster task. + +Handle the setup, starting, and clean-up of a Ceph cluster. +""" from cStringIO import StringIO import argparse @@ -14,7 +19,19 @@ import ceph_client as cclient log = logging.getLogger(__name__) class DaemonState(object): + """ + Daemon State. A daemon exists for each instance of each role. + """ def __init__(self, remote, role, id_, *command_args, **command_kwargs): + """ + Pass remote command information as parameters to remote site + + :param remote: Remote site + :param role: Role (osd, rgw, mon, mds) + :param id_: Id within role (osd.1, osd.2, for eaxmple) + :param command_args: positional arguments (used in restart commands) + :param command_kwargs: keyword arguments (used in restart commands) + """ self.remote = remote self.command_args = command_args self.command_kwargs = command_kwargs @@ -25,6 +42,8 @@ class DaemonState(object): def stop(self): """ + Stop this daemon instance. + Note: this can raise a run.CommandFailedError, run.CommandCrashedError, or run.ConnectionLostError. """ @@ -38,6 +57,12 @@ class DaemonState(object): self.log.info('Stopped') def restart(self, *args, **kwargs): + """ + Restart with a new command passed in the arguments + + :param args: positional arguments passed to remote.run + :param kwargs: keyword arguments passed to remote.run + """ self.log.info('Restarting') if self.proc is not None: self.log.debug('stopping old one...') @@ -50,6 +75,11 @@ class DaemonState(object): self.log.info('Started') def restart_with_args(self, extra_args): + """ + Restart, adding new paramaters to the current command. + + :param extra_args: Extra keyword arguments to be added. + """ self.log.info('Restarting') if self.proc is not None: self.log.debug('stopping old one...') @@ -65,25 +95,57 @@ class DaemonState(object): self.log.info('Started') def signal(self, sig): + """ + Send a signal to associated remote commnad + + :param sig: signal to send + """ self.proc.stdin.write(struct.pack('!b', sig)) self.log.info('Sent signal %d', sig) def running(self): + """ + Are we running? + :return: True if remote run command value is set, False otherwise. + """ return self.proc is not None def reset(self): + """ + clear remote run command value. + """ self.proc = None def wait_for_exit(self): + """ + clear remote run command value after waiting for exit. + """ if self.proc: run.wait([self.proc]) self.proc = None class CephState(object): + """ + Collection of daemon state instances + """ def __init__(self): + """ + self.daemons is a dictionary indexed by role. Each entry is a dictionary of + DaemonState values indexcd by an id parameter. + """ self.daemons = {} def add_daemon(self, remote, role, id_, *args, **kwargs): + """ + Add a daemon. If there already is a daemon for this id_ and role, stop that + daemon and. Restart the damon once the new value is set. + + :param remote: Remote site + :param role: Role (osd, mds, mon, rgw, for example) + :param id_: Id (index into role dictionary) + :param args: Daemonstate positional parameters + :param kwargs: Daemonstate keyword parameters + """ if role not in self.daemons: self.daemons[role] = {} if id_ in self.daemons[role]: @@ -93,16 +155,35 @@ class CephState(object): self.daemons[role][id_].restart() def get_daemon(self, role, id_): + """ + get the daemon associated with this id_ for this role. + + :param role: Role (osd, mds, mon, rgw, for example) + :param id_: Id (index into role dictionary) + """ if role not in self.daemons: return None return self.daemons[role].get(str(id_), None) def iter_daemons_of_role(self, role): + """ + Iterate through all daemon instances for this role. Return dictionary of + daemon values. + + :param role: Role (osd, mds, mon, rgw, for example) + """ return self.daemons.get(role, {}).values() @contextlib.contextmanager def ceph_log(ctx, config): + """ + Create /var/log/ceph log directory that is open to everyone. + Add valgrind and profiling-logger directories. + + :param ctx: Context + :param config: Configuration + """ log.info('Making ceph log dir writeable by non-root...') run.wait( ctx.cluster.run( @@ -148,6 +229,14 @@ def ceph_log(ctx, config): @contextlib.contextmanager def ship_utilities(ctx, config): + """ + Write a copy of valgrind.supp to each of the remote sites. Set executables used + by Ceph in /usr/local/bin. When finished (upon exit of the teuthology run), remove + these files. + + :param ctx: Context + :param config: Configuration + """ assert config is None testdir = teuthology.get_testdir(ctx) filenames = [] @@ -207,10 +296,25 @@ def ship_utilities(ctx, config): def assign_devs(roles, devs): + """ + Create a dictionary of devs indexed by roles + + :param roles: List of roles + :param devs: Corresponding list of devices. + :returns: Dictionary of devs indexed by roles. + """ return dict(zip(roles, devs)) @contextlib.contextmanager def valgrind_post(ctx, config): + """ + After the tests run, look throught all the valgrind logs. Exceptions are raised + if textual errors occured in the logs, or if valgrind exceptions were detected in + the logs. + + :param ctx: Context + :param config: Configuration + """ try: yield finally: @@ -259,6 +363,13 @@ def valgrind_post(ctx, config): def mount_osd_data(ctx, remote, osd): + """ + Mount a remote OSD + + :param ctx: Context + :param remote: Remote site + :param ods: Osd name + """ log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote)) if remote in ctx.disk_config.remote_to_roles_to_dev and osd in ctx.disk_config.remote_to_roles_to_dev[remote]: dev = ctx.disk_config.remote_to_roles_to_dev[remote][osd] @@ -281,6 +392,12 @@ def mount_osd_data(ctx, remote, osd): ) def make_admin_daemon_dir(ctx, remote): + """ + Create /var/run/ceph directory on remote site. + + :param ctx: Context + :param remote: Remote site + """ remote.run( args=[ 'sudo', @@ -290,6 +407,29 @@ def make_admin_daemon_dir(ctx, remote): @contextlib.contextmanager def cluster(ctx, config): + """ + Handle the creation and removal of a ceph cluster. + + On startup: + Create directories needed for the cluster. + Create remote journals for all osds. + Create and set keyring. + Copy the monmap to tht test systems. + Setup mon nodes. + Setup mds nodes. + Mkfs osd nodes. + Add keyring information to monmaps + Mkfs mon nodes. + + On exit: + If errors occured, extract a failure message and store in ctx.summary. + Unmount all test files and temporary journaling files. + Save the monitor information and archive all ceph logs. + Cleanup the keyring setup, and remove all monitor map and data files left over. + + :param ctx: Context + :param config: Configuration + """ testdir = teuthology.get_testdir(ctx) log.info('Creating ceph cluster...') run.wait( @@ -747,6 +887,14 @@ def cluster(ctx, config): log.info('Checking cluster log for badness...') def first_in_ceph_log(pattern, excludes): + """ + Find the first occurence of the pattern specified in the Ceph log, + Returns None if none found. + + :param pattern: Pattern scanned for. + :param excludes: Patterns to ignore. + :return: First line of text (or None if not found) + """ args = [ 'sudo', 'egrep', pattern, @@ -871,6 +1019,16 @@ def cluster(ctx, config): @contextlib.contextmanager def run_daemon(ctx, config, type_): + """ + Run daemons for a role type. Handle the startup and termination of a a daemon. + On startup -- set coverages, cpu_profile, valgrind values for all remotes, + and a max_mds value for one mds. + On cleanup -- Stop all existing daemons of this type. + + :param ctx: Context + :param config: Configuration + :paran type_: Role type + """ log.info('Starting %s daemons...' % type_) testdir = teuthology.get_testdir(ctx) daemons = ctx.cluster.only(teuthology.is_type(type_)) @@ -941,6 +1099,12 @@ def run_daemon(ctx, config, type_): teuthology.stop_daemons_of_type(ctx, type_) def healthy(ctx, config): + """ + Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK. + + :param ctx: Context + :param config: Configuration + """ log.info('Waiting until ceph is healthy...') firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() @@ -955,6 +1119,12 @@ def healthy(ctx, config): ) def wait_for_osds_up(ctx, config): + """ + Wait for all osd's to come up. + + :param ctx: Context + :param config: Configuration + """ log.info('Waiting until ceph osds are all up...') firstmon = teuthology.get_first_mon(ctx, config) (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() @@ -965,6 +1135,12 @@ def wait_for_osds_up(ctx, config): ) def wait_for_mon_quorum(ctx, config): + """ + Check renote ceph status until all monitors are up. + + :param ctx: Context + :param config: Configuration + """ import json import time @@ -990,7 +1166,7 @@ def wait_for_mon_quorum(ctx, config): @contextlib.contextmanager def restart(ctx, config): - """ + """ restart ceph daemons For example:: @@ -1009,32 +1185,34 @@ def restart(ctx, config): wait-for-healthy: false wait-for-osds-up: true - """ - if config is None: - config = {} - if isinstance(config, list): - config = { 'daemons': config } - if 'daemons' not in config: - config['daemons'] = [] - type_daemon = ['mon', 'osd', 'mds', 'rgw'] - for d in type_daemon: - type_ = d - for daemon in ctx.daemons.iter_daemons_of_role(type_): - config['daemons'].append(type_ + '.' + daemon.id_) - - assert isinstance(config['daemons'], list) - daemons = dict.fromkeys(config['daemons']) - for i in daemons.keys(): - type_ = i.split('.', 1)[0] - id_ = i.split('.', 1)[1] - ctx.daemons.get_daemon(type_, id_).stop() - ctx.daemons.get_daemon(type_, id_).restart() - - if config.get('wait-for-healthy', True): - healthy(ctx=ctx, config=None) - if config.get('wait-for-osds-up', False): - wait_for_osds_up(ctx=ctx, config=None) - yield + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + if isinstance(config, list): + config = { 'daemons': config } + if 'daemons' not in config: + config['daemons'] = [] + type_daemon = ['mon', 'osd', 'mds', 'rgw'] + for d in type_daemon: + type_ = d + for daemon in ctx.daemons.iter_daemons_of_role(type_): + config['daemons'].append(type_ + '.' + daemon.id_) + + assert isinstance(config['daemons'], list) + daemons = dict.fromkeys(config['daemons']) + for i in daemons.keys(): + type_ = i.split('.', 1)[0] + id_ = i.split('.', 1)[1] + ctx.daemons.get_daemon(type_, id_).stop() + ctx.daemons.get_daemon(type_, id_).restart() + + if config.get('wait-for-healthy', True): + healthy(ctx=ctx, config=None) + if config.get('wait-for-osds-up', False): + wait_for_osds_up(ctx=ctx, config=None) + yield @contextlib.contextmanager def task(ctx, config): @@ -1128,6 +1306,8 @@ def task(ctx, config): - ceph: log-whitelist: ['foo.*bar', 'bad message'] + :param ctx: Context + :param config: Configuration """ if config is None: config = {} diff --git a/teuthology/task/ceph_client.py b/teuthology/task/ceph_client.py index e2ee59dcaf02e..8935fc8719304 100644 --- a/teuthology/task/ceph_client.py +++ b/teuthology/task/ceph_client.py @@ -1,3 +1,6 @@ +""" +Set up client keyring +""" import logging from teuthology import misc as teuthology @@ -6,6 +9,9 @@ from ..orchestra import run log = logging.getLogger(__name__) def create_keyring(ctx): + """ + Set up key ring on remote sites + """ log.info('Setting up client nodes...') clients = ctx.cluster.only(teuthology.is_type('client')) testdir = teuthology.get_testdir(ctx) diff --git a/teuthology/task/chef.py b/teuthology/task/chef.py index e32bb4b1857bc..db793c3939070 100644 --- a/teuthology/task/chef.py +++ b/teuthology/task/chef.py @@ -1,3 +1,6 @@ +""" +Chef-solo task +""" import logging from ..orchestra import run diff --git a/teuthology/task/cifs-mount.py b/teuthology/task/cifs-mount.py index 179738d1fea8a..ac58f31cc0a28 100644 --- a/teuthology/task/cifs-mount.py +++ b/teuthology/task/cifs-mount.py @@ -1,3 +1,6 @@ +""" +Mount cifs clients. Unmount when finished. +""" import contextlib import logging import os @@ -44,6 +47,9 @@ def task(ctx, config): - cifs-mount: client.0: share: cephfuse + + :param ctx: Context + :param config: Configuration """ log.info('Mounting cifs clients...') @@ -58,7 +64,7 @@ def task(ctx, config): from teuthology.task.samba import get_sambas samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')] sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) - (ip, port) = sambas[0][1].ssh.get_transport().getpeername() + (ip, _) = sambas[0][1].ssh.get_transport().getpeername() log.info('samba ip: {ip}'.format(ip=ip)) for id_, remote in clients: diff --git a/teuthology/task/clock.py b/teuthology/task/clock.py index 15ebf1bdb6c59..d7a26c42cbcc4 100644 --- a/teuthology/task/clock.py +++ b/teuthology/task/clock.py @@ -1,3 +1,6 @@ +""" +Clock synchronizer +""" import logging import contextlib @@ -22,6 +25,8 @@ def task(ctx, config): to sync. + :param ctx: Context + :param config: Configuration """ log.info('Syncing clocks and checking initial clock skew...') @@ -68,6 +73,12 @@ def task(ctx, config): @contextlib.contextmanager def check(ctx, config): + """ + Run ntpdc at the start and the end of the task. + + :param ctx: Context + :param config: Configuration + """ log.info('Checking initial clock skew...') for rem in ctx.cluster.remotes.iterkeys(): rem.run( diff --git a/teuthology/task/cram.py b/teuthology/task/cram.py index 64358d5105e88..05824d26ab0e6 100644 --- a/teuthology/task/cram.py +++ b/teuthology/task/cram.py @@ -1,3 +1,6 @@ +""" +Cram tests +""" import logging import os @@ -34,6 +37,9 @@ def task(ctx, config): - cram: clients: all: [http://ceph.com/qa/test.t] + + :param ctx: Context + :param config: Configuration """ assert isinstance(config, dict) assert 'clients' in config and isinstance(config['clients'], dict), \ @@ -99,6 +105,12 @@ def task(ctx, config): ) def _run_tests(ctx, role): + """ + For each role, check to make sure it's a client, then run the cram on that client + + :param ctx: Context + :param role: Roles + """ assert isinstance(role, basestring) PREFIX = 'client.' assert role.startswith(PREFIX) diff --git a/teuthology/task/die_on_err.py b/teuthology/task/die_on_err.py index 959f4b6b99e30..1dfd37073628d 100644 --- a/teuthology/task/die_on_err.py +++ b/teuthology/task/die_on_err.py @@ -1,3 +1,6 @@ +""" +Raise exceptions on osd coredumps or test err directories +""" import contextlib import logging import time diff --git a/teuthology/task/divergent_priors.py b/teuthology/task/divergent_priors.py index 18aca5b579edf..432614f233cb1 100644 --- a/teuthology/task/divergent_priors.py +++ b/teuthology/task/divergent_priors.py @@ -1,3 +1,6 @@ +""" +Special case divergence test +""" import logging import time diff --git a/teuthology/task/dump_stuck.py b/teuthology/task/dump_stuck.py index f86feb41806b3..9e1780f01565a 100644 --- a/teuthology/task/dump_stuck.py +++ b/teuthology/task/dump_stuck.py @@ -1,3 +1,6 @@ +""" +Dump_stuck command +""" import logging import re import time @@ -9,6 +12,17 @@ from teuthology import misc as teuthology log = logging.getLogger(__name__) def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10): + """ + Do checks. Make sure get_stuck_pgs return the right amout of information, then + extract health information from the raw_cluster_cmd and compare the results with + values passed in. This passes if all asserts pass. + + :param num_manager: Ceph manager + :param num_inactive: number of inaactive pages that are stuck + :param num_unclean: number of unclean pages that are stuck + :paran num_stale: number of stale pages that are stuck + :param timeout: timeout value for get_stuck_pgs calls + """ inactive = manager.get_stuck_pgs('inactive', timeout) assert len(inactive) == num_inactive unclean = manager.get_stuck_pgs('unclean', timeout) @@ -33,6 +47,8 @@ def task(ctx, config): """ Test the dump_stuck command. + :param ctx: Context + :param config: Configuration """ assert config is None, \ 'dump_stuck requires no configuration' diff --git a/teuthology/task/exec.py b/teuthology/task/exec.py index 4d384fcb6965d..f951f77a8a379 100644 --- a/teuthology/task/exec.py +++ b/teuthology/task/exec.py @@ -1,3 +1,6 @@ +""" +Exececute custom commands +""" import logging from teuthology import misc as teuthology @@ -17,6 +20,8 @@ def task(ctx, config): - echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control - interactive: + :param ctx: Context + :param config: Configuration """ log.info('Executing custom commands...') assert isinstance(config, dict), "task exec got invalid config" diff --git a/teuthology/task/filestore_idempotent.py b/teuthology/task/filestore_idempotent.py index fea790a765ceb..317e9a56fd0c6 100644 --- a/teuthology/task/filestore_idempotent.py +++ b/teuthology/task/filestore_idempotent.py @@ -1,3 +1,6 @@ +""" +Filestore/filejournal handler +""" import logging from ..orchestra import run import random @@ -12,6 +15,9 @@ def task(ctx, config): Currently this is a kludge; we require the ceph task preceeds us just so that we get the tarball installed to run the test binary. + + :param ctx: Context + :param config: Configuration """ assert config is None or isinstance(config, list) \ or isinstance(config, dict), \ diff --git a/teuthology/task/interactive.py b/teuthology/task/interactive.py index debbf51fbe7ed..c0aa7e0a11687 100644 --- a/teuthology/task/interactive.py +++ b/teuthology/task/interactive.py @@ -1,3 +1,6 @@ +""" +Drop into a python shell +""" import code import readline import rlcompleter diff --git a/teuthology/task/kclient.py b/teuthology/task/kclient.py index 9fcec524d9cd9..2229fe07ea1d5 100644 --- a/teuthology/task/kclient.py +++ b/teuthology/task/kclient.py @@ -1,3 +1,6 @@ +""" +Mount/unmount a ``kernel`` client. +""" import contextlib import logging import os @@ -30,6 +33,9 @@ def task(ctx, config): - ceph-fuse: [client.0] - kclient: [client.1] - interactive: + + :param ctx: Context + :param config: Configuration """ log.info('Mounting kernel clients...') assert config is None or isinstance(config, list), \ diff --git a/teuthology/task/kcon_most.py b/teuthology/task/kcon_most.py index ef5b06bfa9df9..819de3441104a 100644 --- a/teuthology/task/kcon_most.py +++ b/teuthology/task/kcon_most.py @@ -1,3 +1,6 @@ +""" +Most ceph console logging +""" import contextlib import logging diff --git a/teuthology/task/kernel.py b/teuthology/task/kernel.py index ce73f4d54a1a2..9d318055bde7e 100644 --- a/teuthology/task/kernel.py +++ b/teuthology/task/kernel.py @@ -1,3 +1,6 @@ +""" +Kernel installation task +""" from cStringIO import StringIO import logging @@ -45,6 +48,9 @@ def normalize_config(ctx, config): If config is None or just specifies a version to use, it is applied to all nodes. + + :param ctx: Context + :param config: Configuration """ if config is None or \ len(filter(lambda x: x in ['tag', 'branch', 'sha1', 'kdb', @@ -74,6 +80,12 @@ def normalize_config(ctx, config): def _find_arch_and_dist(ctx): """ Return the arch and distro value as a tuple. + + Currently this only returns armv7l on the quantal distro or x86_64 + on the precise distro + + :param ctx: Context + :returns: arch,distro """ info = ctx.config.get('machine_type', 'plana') if teuthology.is_arm(info): @@ -81,6 +93,13 @@ def _find_arch_and_dist(ctx): return ('x86_64', 'precise') def validate_config(ctx, config): + """ + Make sure that all kernels in the list of remove kernels + refer to the same kernel. + + :param ctx: Context + :param config: Configuration + """ for _, roles_for_host in ctx.cluster.remotes.iteritems(): kernel = None for role in roles_for_host: @@ -94,17 +113,27 @@ def validate_config(ctx, config): del config[role] def _vsplitter(version): - """kernels from Calxeda are named ...ceph-...highbank - kernels that we generate named ...-g + """Kernels from Calxeda are named ...ceph-...highbank. + Kernels that we generate are named ...-g. This routine finds the text in front of the sha1 that is used by need_to_install() to extract information from the kernel name. - """ + :param version: Name of the kernel + """ if version.endswith('highbank'): return 'ceph-' return '-g' def need_to_install(ctx, role, sha1): + """ + Check to see if we need to install a kernel. Get the version + of the currently running kernel, extract the sha1 value from + its name, and compare it against the value passed in. + + :param ctx: Context + :param role: machine associated with each role + :param sha1: sha1 to compare against (used in checking) + """ ret = True log.info('Checking kernel version of {role}, want {sha1}...'.format( role=role, @@ -137,6 +166,12 @@ def need_to_install(ctx, role, sha1): return ret def install_firmware(ctx, config): + """ + Go to the github to get the latest firmware. + + :param ctx: Context + :param config: Configuration + """ linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git' uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream fw_dir = '/lib/firmware/updates' @@ -183,6 +218,12 @@ def install_firmware(ctx, config): ) def download_deb(ctx, config): + """ + Download a Debian kernel and copy the assocated linux image. + + :param ctx: Context + :param config: Configuration + """ procs = {} for role, src in config.iteritems(): (role_remote,) = ctx.cluster.only(role).remotes.keys() @@ -239,6 +280,14 @@ def download_deb(ctx, config): def _no_grub_link(in_file, remote, kernel_ver): + """ + Copy and link kernel related files if grub cannot be used + (as is the case in Arm kernels) + + :param infile: kernel file or image file to be copied. + :param remote: remote machine + :param kernel_ver: kernel version + """ boot1 = '/boot/%s' % in_file boot2 = '%s.old' % boot1 remote.run( @@ -251,6 +300,17 @@ def _no_grub_link(in_file, remote, kernel_ver): ) def install_and_reboot(ctx, config): + """ + Install and reboot the kernel. This mostly performs remote + installation operations. The code does check for Arm images + and skips grub operations if the kernel is Arm. Otherwise, it + extracts kernel titles from submenu entries and makes the appropriate + grub calls. The assumptions here are somewhat simplified in that + it expects kernel entries to be present under submenu entries. + + :param ctx: Context + :param config: Configuration + """ procs = {} kernel_title = '' for role, src in config.iteritems(): @@ -390,6 +450,13 @@ def install_and_reboot(ctx, config): proc.exitstatus.get() def enable_disable_kdb(ctx, config): + """ + Enable kdb on remote machines in use. Disable on those that are + not in use. + + :param ctx: Context + :param config: Configuration + """ for role, enable in config.iteritems(): (role_remote,) = ctx.cluster.only(role).remotes.keys() if "mira" in role_remote.name: @@ -417,6 +484,10 @@ def wait_for_reboot(ctx, need_install, timeout): """ Loop reconnecting and checking kernel versions until they're all correct or the timeout is exceeded. + + :param ctx: Context + :param need_install: list of packages that we need to reinstall. + :param timeout: number of second before we timeout. """ import time starttime = time.time() @@ -484,6 +555,8 @@ def task(ctx, config): kernel: kdb: true + :param ctx: Context + :param config: Configuration """ assert config is None or isinstance(config, dict), \ "task kernel only supports a dictionary for configuration" diff --git a/teuthology/task/knfsd.py b/teuthology/task/knfsd.py index dc46dac1c08ac..55bb937f6c5cf 100644 --- a/teuthology/task/knfsd.py +++ b/teuthology/task/knfsd.py @@ -1,3 +1,6 @@ +""" +Export/Unexport a ``nfs server`` client. +""" import contextlib import logging import os @@ -51,6 +54,8 @@ def task(ctx, config): ro,sync,wdelay,hide,nocrossmnt,secure,root_squash,no_all_squash, no_subtree_check,secure_locks,acl,anonuid=65534,anongid=65534 + :param ctx: Context + :param config: Configuration """ log.info('Exporting nfs server...') @@ -114,7 +119,7 @@ def task(ctx, config): Prevent bogus clients from old runs from access our export. Specify all specify node addresses for this run. """ - ips = [host for (host, port) in (remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items())] + ips = [host for (host, _) in (remote.ssh.get_transport().getpeername() for (remote, roles) in ctx.cluster.remotes.items())] for ip in ips: args += [ '{ip}:{MNT}'.format(ip=ip, MNT=mnt) ] diff --git a/teuthology/task/localdir.py b/teuthology/task/localdir.py index 753362554cf48..8a84514651ce0 100644 --- a/teuthology/task/localdir.py +++ b/teuthology/task/localdir.py @@ -1,3 +1,6 @@ +""" +Localdir +""" import contextlib import logging import os @@ -24,6 +27,8 @@ def task(ctx, config): - localdir: [client.2] - interactive: + :param ctx: Context + :param config: Configuration """ log.info('Creating local mnt dirs...') diff --git a/teuthology/task/lockfile.py b/teuthology/task/lockfile.py index cf76950eb5f33..10ac1e8e91b29 100644 --- a/teuthology/task/lockfile.py +++ b/teuthology/task/lockfile.py @@ -1,3 +1,6 @@ +""" +Locking tests +""" import logging import os @@ -37,6 +40,9 @@ def task(ctx, config): In the past this test would have failed; there was a bug where waitlocks weren't cleaned up if the process failed. More involved scenarios are also possible. + + :param ctx: Context + :param config: Configuration """ log.info('Starting lockfile') try: @@ -177,6 +183,9 @@ def task(ctx, config): # task def lock_one(op, ctx): + """ + Perform the individual lock + """ log.debug('spinning up locker with op={op_}'.format(op_=op)) timeout = None proc = None diff --git a/teuthology/task/locktest.py b/teuthology/task/locktest.py index 06c8451258042..929ad45043dca 100755 --- a/teuthology/task/locktest.py +++ b/teuthology/task/locktest.py @@ -1,3 +1,6 @@ +""" +loctests +""" import logging from ..orchestra import run @@ -22,6 +25,9 @@ def task(ctx, config): [client.0, client.1] This task does not yield; there would be little point. + + :param ctx: Context + :param config: Configuration """ assert isinstance(config, list) diff --git a/teuthology/task/lost_unfound.py b/teuthology/task/lost_unfound.py index d7c0fc3c118c3..700a300bf033e 100644 --- a/teuthology/task/lost_unfound.py +++ b/teuthology/task/lost_unfound.py @@ -1,3 +1,6 @@ +""" +Lost_unfound +""" import logging import ceph_manager from teuthology import misc as teuthology @@ -8,6 +11,8 @@ log = logging.getLogger(__name__) def task(ctx, config): """ Test handling of lost objects. + + A pretty rigid cluseter is brought up andtested by this task """ if config is None: config = {} diff --git a/teuthology/task/manypools.py b/teuthology/task/manypools.py index cf9d5b54da56c..32b9d562bf46a 100644 --- a/teuthology/task/manypools.py +++ b/teuthology/task/manypools.py @@ -1,3 +1,6 @@ +""" +Force pg creation on all osds +""" from teuthology import misc as teuthology from ..orchestra import run import logging diff --git a/teuthology/task/mds_thrash.py b/teuthology/task/mds_thrash.py index 71b2207abba55..c878648c41137 100644 --- a/teuthology/task/mds_thrash.py +++ b/teuthology/task/mds_thrash.py @@ -1,3 +1,6 @@ +""" +Thrash mds by simulating failures +""" import logging import contextlib import ceph_manager @@ -100,13 +103,18 @@ class MDSThrasher: self.weight = weight def log(self, x): + """Write data to logger assigned to this MDThrasher""" self.logger.info(x) def do_join(self): + """Thread finished""" self.stopping = True self.thread.get() def do_thrash(self): + """ + Perform the random thrashing action + """ self.log('starting mds_do_thrash for failure group: ' + ', '.join(['mds.{_id}'.format(_id=_f) for _f in self.failure_group])) while not self.stopping: delay = self.max_thrash_delay diff --git a/teuthology/task/mon_recovery.py b/teuthology/task/mon_recovery.py index 514ea47e53dcd..bfa2cdf78f15d 100644 --- a/teuthology/task/mon_recovery.py +++ b/teuthology/task/mon_recovery.py @@ -1,3 +1,6 @@ +""" +Monitor recovery +""" import logging import ceph_manager from teuthology import misc as teuthology diff --git a/teuthology/task/mpi.py b/teuthology/task/mpi.py index 773ab7645ad20..6d2381ee58e09 100644 --- a/teuthology/task/mpi.py +++ b/teuthology/task/mpi.py @@ -1,3 +1,6 @@ +""" +Start mpi processes (and allow commands to be run inside process) +""" import logging from teuthology import misc as teuthology @@ -52,6 +55,8 @@ def task(ctx, config): clients: - rm -f {testdir}/gmnt + :param ctx: Context + :param config: Configuration """ assert isinstance(config, dict), 'task mpi got invalid config' assert 'exec' in config, 'task mpi got invalid config, missing exec' diff --git a/teuthology/task/multibench.py b/teuthology/task/multibench.py index 7d0199a44ece0..bc22b470593d4 100644 --- a/teuthology/task/multibench.py +++ b/teuthology/task/multibench.py @@ -1,3 +1,6 @@ +""" +Multibench testing +""" import contextlib import logging import radosbench @@ -33,6 +36,7 @@ def task(ctx, config): "please list clients to run on" def run_one(num): + """Run test spawn from gevent""" start = time.time() benchcontext = copy.copy(config.get('radosbench')) iterations = 0 diff --git a/teuthology/task/nfs.py b/teuthology/task/nfs.py index 52123f577a7d4..72a2981097349 100644 --- a/teuthology/task/nfs.py +++ b/teuthology/task/nfs.py @@ -1,3 +1,6 @@ +""" +Nfs client tester +""" import contextlib import logging import os diff --git a/teuthology/task/nop.py b/teuthology/task/nop.py index caa9deec2ff14..c7b181403f406 100644 --- a/teuthology/task/nop.py +++ b/teuthology/task/nop.py @@ -1,3 +1,6 @@ +""" +Null task +""" def task(ctx, config): """ This task does nothing. diff --git a/teuthology/task/object_source_down.py b/teuthology/task/object_source_down.py index 544b88640281a..1696c55214aac 100644 --- a/teuthology/task/object_source_down.py +++ b/teuthology/task/object_source_down.py @@ -1,3 +1,6 @@ +""" +Test Object locations going down +""" import logging import ceph_manager from teuthology import misc as teuthology diff --git a/teuthology/task/omapbench.py b/teuthology/task/omapbench.py index 0e8ca6a4bb00a..7d2535453231a 100644 --- a/teuthology/task/omapbench.py +++ b/teuthology/task/omapbench.py @@ -1,3 +1,6 @@ +""" +Run omapbench executable within teuthology +""" import contextlib import logging diff --git a/teuthology/task/osd_backfill.py b/teuthology/task/osd_backfill.py index e69884876cb97..d80ea22ef2248 100644 --- a/teuthology/task/osd_backfill.py +++ b/teuthology/task/osd_backfill.py @@ -1,3 +1,6 @@ +""" +Osd backfill test +""" import logging import ceph_manager import time @@ -8,6 +11,9 @@ log = logging.getLogger(__name__) def rados_start(ctx, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ log.info("rados %s" % ' '.join(cmd)) testdir = teuthology.get_testdir(ctx) pre = [ diff --git a/teuthology/task/osd_failsafe_enospc.py b/teuthology/task/osd_failsafe_enospc.py index 15ae7b5425251..39b5b5c530096 100644 --- a/teuthology/task/osd_failsafe_enospc.py +++ b/teuthology/task/osd_failsafe_enospc.py @@ -1,3 +1,6 @@ +""" +Handle osdfailsafe configuration settings (nearfull ratio and full ratio) +""" from cStringIO import StringIO import logging import time diff --git a/teuthology/task/osd_recovery.py b/teuthology/task/osd_recovery.py index f6d4225da2fb6..1ff17335b15e7 100644 --- a/teuthology/task/osd_recovery.py +++ b/teuthology/task/osd_recovery.py @@ -1,3 +1,6 @@ +""" +osd recovery +""" import logging import ceph_manager import time @@ -8,6 +11,9 @@ log = logging.getLogger(__name__) def rados_start(testdir, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ log.info("rados %s" % ' '.join(cmd)) pre = [ 'adjust-ulimits', diff --git a/teuthology/task/parallel.py b/teuthology/task/parallel.py index e441e9e010da9..4cfb67879e90f 100644 --- a/teuthology/task/parallel.py +++ b/teuthology/task/parallel.py @@ -1,3 +1,6 @@ +""" +Task to group parallel running tasks +""" import sys import logging @@ -28,7 +31,7 @@ def task(ctx, config): That is, if the entry is not a dict, we will look it up in the top-level config. - Sequential task and Parallel tasks can be nested. + Sequential tasks and Parallel tasks can be nested. """ log.info('starting parallel...') @@ -40,6 +43,7 @@ def task(ctx, config): p.spawn(_run_spawned, ctx, confg, taskname) def _run_spawned(ctx,config,taskname): + """Run one of the tasks (this runs in parallel with others)""" mgr = {} try: log.info('In parallel, running task %s...' % taskname) diff --git a/teuthology/task/parallel_example.py b/teuthology/task/parallel_example.py index 43fb187cb4257..04babfca9b24e 100644 --- a/teuthology/task/parallel_example.py +++ b/teuthology/task/parallel_example.py @@ -1,3 +1,6 @@ +""" +Parallel contextmanager test +""" import contextlib import logging diff --git a/teuthology/task/peer.py b/teuthology/task/peer.py index 3fb8b4b6f077d..8006c3812ad7a 100644 --- a/teuthology/task/peer.py +++ b/teuthology/task/peer.py @@ -1,3 +1,6 @@ +""" +Peer test (Single test, not much configurable here) +""" import logging import json diff --git a/teuthology/task/pexec.py b/teuthology/task/pexec.py index 9069ef74fa2d9..742ac0010bc22 100644 --- a/teuthology/task/pexec.py +++ b/teuthology/task/pexec.py @@ -1,3 +1,6 @@ +""" +Handle parallel execution on remote hosts +""" import logging from teuthology import misc as teuthology @@ -10,10 +13,11 @@ from gevent import queue as queue from gevent import event as event def _init_barrier(barrier_queue, remote): + """current just queues a remote host""" barrier_queue.put(remote) def _do_barrier(barrier, barrier_queue, remote): - # special case for barrier + """special case for barrier""" barrier_queue.get() if barrier_queue.empty(): barrier.set() @@ -29,6 +33,7 @@ def _do_barrier(barrier, barrier_queue, remote): barrier.wait() def _exec_host(barrier, barrier_queue, remote, sudo, testdir, ls): + """Execute command remotely""" log.info('Running commands on host %s', remote.name) args = [ 'TESTDIR={tdir}'.format(tdir=testdir), @@ -55,6 +60,7 @@ def _exec_host(barrier, barrier_queue, remote, sudo, testdir, ls): tor.wait([r]) def _generate_remotes(ctx, config): + """Return remote roles and the type of role specified in config""" if 'all' in config and len(config) == 1: ls = config['all'] for remote in ctx.cluster.remotes.iterkeys(): diff --git a/teuthology/task/qemu.py b/teuthology/task/qemu.py index 45685015288d4..db93107a9871a 100644 --- a/teuthology/task/qemu.py +++ b/teuthology/task/qemu.py @@ -1,3 +1,6 @@ +""" +Qemu task +""" from cStringIO import StringIO import contextlib @@ -17,6 +20,9 @@ DEFAULT_MEM = 4096 # in megabytes @contextlib.contextmanager def create_dirs(ctx, config): + """ + Handle directory creation and cleanup + """ testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): assert 'test' in client_config, 'You must specify a test to run' @@ -42,6 +48,7 @@ def create_dirs(ctx, config): @contextlib.contextmanager def generate_iso(ctx, config): + """Execute system commands to generate iso""" log.info('generating iso...') testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): @@ -120,6 +127,7 @@ def generate_iso(ctx, config): @contextlib.contextmanager def download_image(ctx, config): + """Downland base image, remove image file when done""" log.info('downloading base image') testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): @@ -148,6 +156,7 @@ def download_image(ctx, config): @contextlib.contextmanager def run_qemu(ctx, config): + """Setup kvm environment and start qemu""" procs = [] testdir = teuthology.get_testdir(ctx) for client, client_config in config.iteritems(): diff --git a/teuthology/task/rados.py b/teuthology/task/rados.py index 2082e1771778c..8e4f019a52e83 100644 --- a/teuthology/task/rados.py +++ b/teuthology/task/rados.py @@ -1,3 +1,6 @@ +""" +Rados modle-based integration tests +""" import contextlib import logging import gevent @@ -84,6 +87,7 @@ def task(ctx, config): ]) def thread(): + """Thread spawned by gevent""" if not hasattr(ctx, 'manager'): first_mon = teuthology.get_first_mon(ctx, config) (mon,) = ctx.cluster.only(first_mon).remotes.iterkeys() diff --git a/teuthology/task/radosbench.py b/teuthology/task/radosbench.py index ca5ace2c7b97d..85e85aaafe8a8 100644 --- a/teuthology/task/radosbench.py +++ b/teuthology/task/radosbench.py @@ -1,3 +1,6 @@ +""" +Rados benchmarking +""" import contextlib import logging diff --git a/teuthology/task/radosgw-admin.py b/teuthology/task/radosgw-admin.py index f91747305a6e6..e5945bf422e5d 100644 --- a/teuthology/task/radosgw-admin.py +++ b/teuthology/task/radosgw-admin.py @@ -1,3 +1,6 @@ +""" +Rgw admin testing against a running instance +""" # The test cases in this file have been annotated for inventory. # To extract the inventory (in csv format) use the command: # @@ -24,6 +27,7 @@ log = logging.getLogger(__name__) def successful_ops(out): + """Extract total from the first summary entry (presumed to be only one)""" summary = out['summary'] if len(summary) == 0: return 0 diff --git a/teuthology/task/radosgw-agent.py b/teuthology/task/radosgw-agent.py index 014a1b1441349..5b7dcdca1a75b 100644 --- a/teuthology/task/radosgw-agent.py +++ b/teuthology/task/radosgw-agent.py @@ -1,3 +1,6 @@ +""" +Run rados gateway agent in test mode +""" import contextlib import logging import argparse diff --git a/teuthology/task/rbd.py b/teuthology/task/rbd.py index 6164a229a8f73..6e8112732cbb0 100644 --- a/teuthology/task/rbd.py +++ b/teuthology/task/rbd.py @@ -1,3 +1,6 @@ +""" +Rbd testing task +""" import contextlib import logging import os @@ -11,6 +14,9 @@ from teuthology.parallel import parallel log = logging.getLogger(__name__) def default_image_name(role): + """ + Currently just append role to 'testimage.' string + """ return 'testimage.{role}'.format(role=role) @contextlib.contextmanager @@ -272,6 +278,7 @@ def mount(ctx, config): role_images = [(role, None) for role in config] def strip_client_prefix(role): + """Currently just removes 'client.' from start of role name""" PREFIX = 'client.' assert role.startswith(PREFIX) id_ = role[len(PREFIX):] @@ -325,10 +332,12 @@ def mount(ctx, config): ] ) -# Determine the canonical path for a given path on the host -# representing the given role. A canonical path contains no -# . or .. components, and includes no symbolic links. def canonical_path(ctx, role, path): + """ + Determine the canonical path for a given path on the host + representing the given role. A canonical path contains no + . or .. components, and includes no symbolic links. + """ version_fp = StringIO() ctx.cluster.only(role).run( args=[ 'readlink', '-f', path ], @@ -374,6 +383,9 @@ def run_xfstests(ctx, config): yield def run_xfstests_one_client(ctx, role, properties): + """ + Spawned routine to handle xfs tests for a single client + """ testdir = teuthology.get_testdir(ctx) try: count = properties.get('count') diff --git a/teuthology/task/rbd_fsx.py b/teuthology/task/rbd_fsx.py index 97d2850877290..f9e05ca7f6a16 100644 --- a/teuthology/task/rbd_fsx.py +++ b/teuthology/task/rbd_fsx.py @@ -1,3 +1,6 @@ +""" +Run fsx on an rbd image +""" import contextlib import logging @@ -38,6 +41,7 @@ def task(ctx, config): yield def _run_one_client(ctx, config, role): + """Spawned task that runs the client""" testdir = teuthology.get_testdir(ctx) (remote,) = ctx.cluster.only(role).remotes.iterkeys() remote.run( diff --git a/teuthology/task/rest-api.py b/teuthology/task/rest-api.py index c7ba628b52a2b..d34d31af6b335 100644 --- a/teuthology/task/rest-api.py +++ b/teuthology/task/rest-api.py @@ -1,3 +1,6 @@ +""" +Rest Api +""" import logging import contextlib import time @@ -12,6 +15,9 @@ log = logging.getLogger(__name__) @contextlib.contextmanager def run_rest_api_daemon(ctx, api_clients): + """ + Wrapper starts the rest api daemons + """ if not hasattr(ctx, 'daemons'): ctx.daemons = CephState() remotes = ctx.cluster.only(teuthology.is_type('client')).remotes diff --git a/teuthology/task/restart.py b/teuthology/task/restart.py index ee90fb20c16f2..87ca2b099e322 100644 --- a/teuthology/task/restart.py +++ b/teuthology/task/restart.py @@ -1,3 +1,6 @@ +""" +Daemon restart +""" import logging import pipes @@ -8,6 +11,9 @@ from ..orchestra import run log = logging.getLogger(__name__) def restart_daemon(ctx, config, role, id_, *args): + """ + Handle restart (including the execution of the command parameters passed) + """ log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_)) daemon = ctx.daemons.get_daemon(role, id_) log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_)) @@ -24,6 +30,7 @@ def restart_daemon(ctx, config, role, id_, *args): daemon.restart() def get_tests(ctx, config, role, remote, testdir): + """Download restart tests""" srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role) refspec = config.get('branch') diff --git a/teuthology/task/samba.py b/teuthology/task/samba.py index b6281b9617c11..c2e6e6a21747b 100644 --- a/teuthology/task/samba.py +++ b/teuthology/task/samba.py @@ -1,3 +1,6 @@ +""" +Samba +""" import contextlib import logging import sys @@ -8,6 +11,13 @@ from ..orchestra import run log = logging.getLogger(__name__) def get_sambas(ctx, roles): + """ + Scan for roles that are samba. Yield the id of the the samba role + (samba.0, samba.1...) and the associated remote site + + :param ctx: Context + :param roles: roles for this test (extracted from yaml files) + """ for role in roles: assert isinstance(role, basestring) PREFIX = 'samba.' @@ -65,6 +75,8 @@ def task(ctx, config): role, the default behavior is to enable the ceph UNC //localhost/ceph and use the ceph vfs module as the smbd backend. + :param ctx: Context + :param config: Configuration """ log.info("Setting up smbd with ceph vfs...") assert config is None or isinstance(config, list) or isinstance(config, dict), \ diff --git a/teuthology/task/scrub.py b/teuthology/task/scrub.py index c1401de6ed063..7a25300a677dc 100644 --- a/teuthology/task/scrub.py +++ b/teuthology/task/scrub.py @@ -1,3 +1,6 @@ +""" +Scrub osds +""" import contextlib import gevent import logging @@ -59,7 +62,13 @@ def task(ctx, config): scrub_proc.do_join() class Scrubber: + """ + Scrubbing is actually performed during initialzation + """ def __init__(self, manager, config): + """ + Spawn scrubbing thread upon completion. + """ self.ceph_manager = manager self.ceph_manager.wait_for_clean() @@ -72,6 +81,7 @@ class Scrubber: else: def tmp(x): + """Local display""" print x self.log = tmp @@ -82,10 +92,12 @@ class Scrubber: self.thread = gevent.spawn(self.do_scrub) def do_join(self): + """Scrubbing thread finished""" self.stopping = True self.thread.get() def do_scrub(self): + """Perform the scrub operation""" frequency = self.config.get("frequency", 30) deep = self.config.get("deep", 0) diff --git a/teuthology/task/scrub_test.py b/teuthology/task/scrub_test.py index 36967a86e939d..3443ae9f45e92 100644 --- a/teuthology/task/scrub_test.py +++ b/teuthology/task/scrub_test.py @@ -1,3 +1,4 @@ +"""Scrub testing""" from cStringIO import StringIO import logging diff --git a/teuthology/task/sequential.py b/teuthology/task/sequential.py index 505bb65d3e7e9..690d60f118514 100644 --- a/teuthology/task/sequential.py +++ b/teuthology/task/sequential.py @@ -1,3 +1,6 @@ +""" +Task sequencer +""" import sys import logging @@ -28,7 +31,10 @@ def task(ctx, config): That is, if the entry is not a dict, we will look it up in the top-level config. - Sequential task and Parallel tasks can be nested. + Sequential tasks and Parallel tasks can be nested. + + :param ctx: Context + :param config: Configuration """ stack = [] try: diff --git a/teuthology/task/sleep.py b/teuthology/task/sleep.py index cb07a3f35d907..4e36d599b6df5 100644 --- a/teuthology/task/sleep.py +++ b/teuthology/task/sleep.py @@ -1,3 +1,6 @@ +""" +Sleep task +""" import logging import time @@ -18,6 +21,8 @@ def task(ctx, config): duration: 10 - interactive: + :param ctx: Context + :param config: Configuration """ if not config: config = {} diff --git a/teuthology/task/tasktest.py b/teuthology/task/tasktest.py index 4b4155832bcd4..74a12c2f8f7eb 100644 --- a/teuthology/task/tasktest.py +++ b/teuthology/task/tasktest.py @@ -1,3 +1,7 @@ +""" +Parallel and sequential task tester. Not used by any ceph tests, but used to +unit test the parallel and sequential tasks +""" import logging import contextlib import time diff --git a/teuthology/task/thrashosds.py b/teuthology/task/thrashosds.py index 1702dbdfd5321..075e1f83c75cb 100644 --- a/teuthology/task/thrashosds.py +++ b/teuthology/task/thrashosds.py @@ -1,3 +1,6 @@ +""" +Thrash -- Simulate random osd failures. +""" import contextlib import logging import ceph_manager diff --git a/teuthology/task/timer.py b/teuthology/task/timer.py index 2a78bba0c0822..d47830f44e531 100644 --- a/teuthology/task/timer.py +++ b/teuthology/task/timer.py @@ -1,3 +1,6 @@ +""" +Timer task +""" import logging import contextlib import datetime diff --git a/teuthology/task/watch_notify_stress.py b/teuthology/task/watch_notify_stress.py index da52ba010fd87..ab611c3dd4a70 100644 --- a/teuthology/task/watch_notify_stress.py +++ b/teuthology/task/watch_notify_stress.py @@ -1,3 +1,6 @@ +""" +test_stress_watch task +""" import contextlib import logging import proc_thrasher diff --git a/teuthology/task/workunit.py b/teuthology/task/workunit.py index e84efb22215d3..3749d4716504e 100644 --- a/teuthology/task/workunit.py +++ b/teuthology/task/workunit.py @@ -1,3 +1,6 @@ +""" +Workunit task -- Run ceph on sets of specific clients +""" import logging import pipes import os @@ -10,7 +13,7 @@ log = logging.getLogger(__name__) def task(ctx, config): """ - Run ceph all workunits found under the specified path. + Run ceph on all workunits found under the specified path. For example:: @@ -48,6 +51,9 @@ def task(ctx, config): env: FOO: bar BAZ: quux + + :param ctx: Context + :param config: Configuration """ assert isinstance(config, dict) assert isinstance(config.get('clients'), dict), \ @@ -100,14 +106,22 @@ def task(ctx, config): PREFIX = 'client.' assert role.startswith(PREFIX) if created_dir_dict[role]: - _delete_dir(ctx, role, config.get('subdir')) + _delete_dir(ctx, role) + +def _delete_dir(ctx, role): + """ + Delete file used by this role, and delete the directory that this + role appeared in. -def _delete_dir(ctx, role, subdir): + :param ctx: Context + :param role: "role.#" where # is used for the role id. + """ PREFIX = 'client.' testdir = teuthology.get_testdir(ctx) id_ = role[len(PREFIX):] (remote,) = ctx.cluster.only(role).remotes.iterkeys() mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + # Is there any reason why this is not: join(mnt, role) ? client = os.path.join(mnt, 'client.{id}'.format(id=id_)) try: remote.run( @@ -135,6 +149,14 @@ def _delete_dir(ctx, role, subdir): log.exception("Caught an execption deleting dir {dir}".format(dir=mnt)) def _make_scratch_dir(ctx, role, subdir): + """ + Make scratch directories for this role. This also makes the mount + point if that directory does not exist. + + :param ctx: Context + :param role: "role.#" where # is used for the role id. + :param subdir: use this subdir (False if not used) + """ retVal = False PREFIX = 'client.' id_ = role[len(PREFIX):] @@ -200,6 +222,17 @@ def _make_scratch_dir(ctx, role, subdir): return retVal def _spawn_on_all_clients(ctx, refspec, tests, env, subdir): + """ + Make a scratch directory for each client in the cluster, and then for each + test spawn _run_tests for each role. + + :param ctx: Context + :param refspec: branch, sha1, or version tag used to identify this + build + :param tests: specific tests specified. + :param env: evnironment set in yaml file. Could be None. + :param subdir: subdirectory set in yaml file. Could be None + """ client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client') client_remotes = list() for client in client_generator: @@ -215,9 +248,21 @@ def _spawn_on_all_clients(ctx, refspec, tests, env, subdir): # cleanup the generated client directories client_generator = teuthology.all_roles_of_type(ctx.cluster, 'client') for client in client_generator: - _delete_dir(ctx, 'client.{id}'.format(id=client), subdir) + _delete_dir(ctx, 'client.{id}'.format(id=client)) def _run_tests(ctx, refspec, role, tests, env, subdir=None): + """ + Run the individual test. Create a scratch directory and then extract the workunits + from the git-hub. Make the executables, and then run the tests. + Clean up (remove files created) after the tests are finished. + + :param ctx: Context + :param refspec: branch, sha1, or version tag used to identify this + build + :param tests: specific tests specified. + :param env: evnironment set in yaml file. Could be None. + :param subdir: subdirectory set in yaml file. Could be None + """ testdir = teuthology.get_testdir(ctx) assert isinstance(role, basestring) PREFIX = 'client.'