subtask = 'task'
if '.' in taskname:
(submod, subtask) = taskname.rsplit('.', 1)
+
+ # Teuthology configs may refer to modules like ceph_deploy as ceph-deploy
+ submod = submod.replace('-', '_')
+
parent = __import__('teuthology.task', globals(), locals(), [submod], 0)
mod = getattr(parent, submod)
fn = getattr(mod, subtask)
+++ /dev/null
-from cStringIO import StringIO
-
-import contextlib
-import os
-import time
-import logging
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-import ceph as ceph_fn
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download_ceph_deploy(ctx, config):
- """
- Downloads ceph-deploy from the ceph.com git mirror and (by default)
- switches to the master branch. If the `ceph-deploy-branch` is specified, it
- will use that instead.
- """
- log.info('Downloading ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- default_cd_branch = {'ceph-deploy-branch': 'master'}
- ceph_deploy_branch = config.get(
- 'ceph-deploy',
- default_cd_branch).get('ceph-deploy-branch')
-
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'git', 'clone', '-b', ceph_deploy_branch,
- 'git://ceph.com/ceph-deploy.git',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
-
- try:
- yield
- finally:
- log.info('Removing ceph-deploy ...')
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
-
-
-def is_healthy(ctx, config):
- """Wait until a Ceph cluster is healthy."""
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
- while True:
- r = remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo', 'ceph',
- 'health',
- ],
- stdout=StringIO(),
- logger=log.getChild('health'),
- )
- out = r.stdout.getvalue()
- log.debug('Ceph health: %s', out.rstrip('\n'))
- if out.split(None, 1)[0] == 'HEALTH_OK':
- break
- time.sleep(1)
-
-def get_nodes_using_roles(ctx, config, role):
- newl = []
- for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, role):
- rem = _remote
- if role == 'mon':
- req1 = str(rem).split('@')[-1]
- else:
- req = str(rem).split('.')[0]
- req1 = str(req).split('@')[1]
- newl.append(req1)
- return newl
-
-def get_dev_for_osd(ctx, config):
- osd_devs = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- host = remote.name.split('@')[-1]
- shortname = host.split('.')[0]
- devs = teuthology.get_scratch_devices(remote)
- num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
- num_osds = len(num_osd_per_host)
- assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
- for dev in devs[:num_osds]:
- dev_short = dev.split('/')[-1]
- osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
- return osd_devs
-
-def get_all_nodes(ctx, config):
- nodelist = []
- for t, k in ctx.config['targets'].iteritems():
- host = t.split('@')[-1]
- simple_host = host.split('.')[0]
- nodelist.append(simple_host)
- nodelist = " ".join(nodelist)
- return nodelist
-
-def execute_ceph_deploy(ctx, config, cmd):
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- exec_cmd = cmd
- (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
- proc = remote.run(
- args = [
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- run.Raw(exec_cmd),
- ],
- check_status=False,
- )
- exitstatus = proc.exitstatus
- return exitstatus
-
-@contextlib.contextmanager
-def build_ceph_cluster(ctx, config):
- log.info('Building ceph cluster using ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_branch = None
- if config.get('branch') is not None:
- cbranch = config.get('branch')
- for var, val in cbranch.iteritems():
- if var == 'testing':
- ceph_branch = '--{var}'.format(var=var)
- ceph_branch = '--{var}={val}'.format(var=var, val=val)
- node_dev_list = []
- all_nodes = get_all_nodes(ctx, config)
- mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
- mds_nodes = " ".join(mds_nodes)
- mon_node = get_nodes_using_roles(ctx, config, 'mon')
- mon_nodes = " ".join(mon_node)
- new_mon = './ceph-deploy new'+" "+mon_nodes
- install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
- mon_hostname = mon_nodes.split(' ')[0]
- mon_hostname = str(mon_hostname)
- gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
- deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
- no_of_osds = 0
-
- if mon_nodes is None:
- raise RuntimeError("no monitor nodes in the config file")
-
- estatus_new = execute_ceph_deploy(ctx, config, new_mon)
- if estatus_new != 0:
- raise RuntimeError("ceph-deploy: new command failed")
-
- log.info('adding config inputs...')
- testdir = teuthology.get_testdir(ctx)
- conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
- first_mon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(first_mon).remotes.keys()
-
- lines = None
- if config.get('conf') is not None:
- confp = config.get('conf')
- for section, keys in confp.iteritems():
- lines = '[{section}]\n'.format(section=section)
- teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
- for key, value in keys.iteritems():
- log.info("[%s] %s = %s" % (section, key, value))
- lines = '{key} = {value}\n'.format(key=key, value=value)
- teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
-
- estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
- if estatus_install != 0:
- raise RuntimeError("ceph-deploy: Failed to install ceph")
-
- mon_no = None
- mon_no = config.get('mon_initial_members')
- if mon_no is not None:
- i = 0
- mon1 = []
- while(i < mon_no):
- mon1.append(mon_node[i])
- i = i + 1
- initial_mons = " ".join(mon1)
- for k in range(mon_no, len(mon_node)):
- mon_create_nodes = './ceph-deploy mon create'+" "+initial_mons+" "+mon_node[k]
- estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
- if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitor")
- else:
- mon_create_nodes = './ceph-deploy mon create'+" "+mon_nodes
- estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
- if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitors")
-
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- while (estatus_gather != 0):
- #mon_create_nodes = './ceph-deploy mon create'+" "+mon_node[0]
- #execute_ceph_deploy(ctx, config, mon_create_nodes)
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
-
- if mds_nodes:
- estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
- if estatus_mds != 0:
- raise RuntimeError("ceph-deploy: Failed to deploy mds")
-
- if config.get('test_mon_destroy') is not None:
- for d in range(1, len(mon_node)):
- mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
- estatus_mon_d = execute_ceph_deploy(ctx, config, mon_destroy_nodes)
- if estatus_mon_d != 0:
- raise RuntimeError("ceph-deploy: Failed to delete monitor")
-
- node_dev_list = get_dev_for_osd(ctx, config)
- for d in node_dev_list:
- osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- zap_disk = './ceph-deploy disk zap'+" "+d
- execute_ceph_deploy(ctx, config, zap_disk)
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- raise RuntimeError("ceph-deploy: Failed to create osds")
-
- if config.get('wait-for-healthy', True) and no_of_osds >= 2:
- is_healthy(ctx=ctx, config=None)
-
- log.info('Setting up client nodes...')
- conf_path = '/etc/ceph/ceph.conf'
- admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
- conf_data = teuthology.get_file(
- remote=mon0_remote,
- path=conf_path,
- sudo=True,
- )
- admin_keyring = teuthology.get_file(
- remote=mon0_remote,
- path=admin_keyring_path,
- sudo=True,
- )
-
- clients = ctx.cluster.only(teuthology.is_type('client'))
- for remot, roles_for_host in clients.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
- client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- mon0_remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo','bash','-c',
- run.Raw('"'),'ceph',
- 'auth',
- 'get-or-create',
- 'client.{id}'.format(id=id_),
- 'mds', 'allow',
- 'mon', 'allow *',
- 'osd', 'allow *',
- run.Raw('>'),
- client_keyring,
- run.Raw('"'),
- ],
- )
- key_data = teuthology.get_file(
- remote=mon0_remote,
- path=client_keyring,
- sudo=True,
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=client_keyring,
- data=key_data,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=admin_keyring_path,
- data=admin_keyring,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=conf_path,
- data=conf_data,
- perms='0644'
- )
- else:
- raise RuntimeError("The cluster is NOT operational due to insufficient OSDs")
-
- try:
- yield
-
- finally:
- log.info('Stopping ceph...')
- ctx.cluster.run(args=[
- 'sudo', 'stop', 'ceph-all',
- run.Raw('||'),
- 'sudo', 'service', 'ceph', 'stop'
- ])
-
- if ctx.archive is not None:
- # archive mon data, too
- log.info('Archiving mon data...')
- path = os.path.join(ctx.archive, 'data')
- os.makedirs(path)
- mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote, roles in mons.remotes.iteritems():
- for role in roles:
- if role.startswith('mon.'):
- teuthology.pull_directory_tarball(
- remote,
- '/var/lib/ceph/mon',
- path + '/' + role + '.tgz')
-
- log.info('Compressing logs...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'find',
- '/var/log/ceph',
- '-name',
- '*.log',
- '-print0',
- run.Raw('|'),
- 'sudo',
- 'xargs',
- '-0',
- '--no-run-if-empty',
- '--',
- 'gzip',
- '--',
- ],
- wait=False,
- ),
- )
-
- log.info('Archiving logs...')
- path = os.path.join(ctx.archive, 'remote')
- os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
- sub = os.path.join(path, remote.shortname)
- os.makedirs(sub)
- teuthology.pull_directory(remote, '/var/log/ceph',
- os.path.join(sub, 'log'))
-
- log.info('Purging package...')
- execute_ceph_deploy(ctx, config, purge_nodes)
- log.info('Purging data...')
- execute_ceph_deploy(ctx, config, purgedata_nodes)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Set up and tear down a Ceph cluster.
-
- For example::
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- stable: bobtail
- mon_initial_members: 1
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- dev: master
- conf:
- mon:
- debug mon = 20
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- testing:
- """
- if config is None:
- config = {}
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- assert isinstance(config, dict), \
- "task ceph-deploy only supports a dictionary for configuration"
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- if config.get('branch') is not None:
- assert isinstance(config['branch'], dict), 'branch must be a dictionary'
-
- with contextutil.nested(
- lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
- lambda: download_ceph_deploy(ctx=ctx, config=config),
- lambda: build_ceph_cluster(ctx=ctx, config=dict(
- conf=config.get('conf', {}),
- branch=config.get('branch',{}),
- mon_initial_members=config.get('mon_initial_members', None),
- test_mon_destroy=config.get('test_mon_destroy', None),
- )),
- ):
- yield
+++ /dev/null
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a ``ceph-fuse`` client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on. This lets you e.g. set up one client with
- ``ceph-fuse`` and another with ``kclient``.
-
- Example that mounts all clients::
-
- tasks:
- - ceph:
- - ceph-fuse:
- - interactive:
-
- Example that uses both ``kclient` and ``ceph-fuse``::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - kclient: [client.1]
- - interactive:
-
- Example that enables valgrind:
-
- tasks:
- - ceph:
- - ceph-fuse:
- client.0:
- valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- - interactive:
-
- """
- log.info('Mounting ceph-fuse clients...')
- fuse_daemons = {}
-
- testdir = teuthology.get_testdir(ctx)
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))
-
- clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
-
- for id_, remote in clients:
- client_config = config.get("client.%s" % id_)
- if client_config is None:
- client_config = {}
- log.info("Client client.%s config is %s" % (id_, client_config))
-
- daemon_signal = 'kill'
- if client_config.get('coverage') or client_config.get('valgrind') is not None:
- daemon_signal = 'term'
-
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- run_cmd=[
- 'sudo',
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- '{tdir}/daemon-helper'.format(tdir=testdir),
- daemon_signal,
- ]
- run_cmd_tail=[
- 'ceph-fuse',
- '-f',
- '--name', 'client.{id}'.format(id=id_),
- # TODO ceph-fuse doesn't understand dash dash '--',
- mnt,
- ]
-
- if client_config.get('valgrind') is not None:
- run_cmd.extend(
- teuthology.get_valgrind_args(
- testdir,
- 'client.{id}'.format(id=id_),
- client_config.get('valgrind'),
- )
- )
-
- run_cmd.extend(run_cmd_tail)
-
- proc = remote.run(
- args=run_cmd,
- logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False,
- )
- fuse_daemons[id_] = proc
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- teuthology.wait_until_fuse_mounted(
- remote=remote,
- fuse=fuse_daemons[id_],
- mountpoint=mnt,
- )
- remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
-
- try:
- yield
- finally:
- log.info('Unmounting ceph-fuse clients...')
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- try:
- remote.run(
- args=[
- 'sudo',
- 'fusermount',
- '-u',
- mnt,
- ],
- )
- except run.CommandFailedError:
- log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
- # abort the fuse mount, killing all hung processes
- remote.run(
- args=[
- 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
- run.Raw(';'), 'then',
- 'echo',
- '1',
- run.Raw('>'),
- run.Raw('/sys/fs/fuse/connections/*/abort'),
- run.Raw(';'), 'fi',
- ],
- )
- # make sure its unmounted
- remote.run(
- args=[
- 'sudo',
- 'umount',
- '-l',
- '-f',
- mnt,
- ],
- )
-
- run.wait(fuse_daemons.itervalues())
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- remote.run(
- args=[
- 'rmdir',
- '--',
- mnt,
- ],
- )
--- /dev/null
+from cStringIO import StringIO
+
+import contextlib
+import os
+import time
+import logging
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+import ceph as ceph_fn
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download_ceph_deploy(ctx, config):
+ """
+ Downloads ceph-deploy from the ceph.com git mirror and (by default)
+ switches to the master branch. If the `ceph-deploy-branch` is specified, it
+ will use that instead.
+ """
+ log.info('Downloading ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ default_cd_branch = {'ceph-deploy-branch': 'master'}
+ ceph_deploy_branch = config.get(
+ 'ceph-deploy',
+ default_cd_branch).get('ceph-deploy-branch')
+
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'git', 'clone', '-b', ceph_deploy_branch,
+ 'git://ceph.com/ceph-deploy.git',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Removing ceph-deploy ...')
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+
+
+def is_healthy(ctx, config):
+ """Wait until a Ceph cluster is healthy."""
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
+ while True:
+ r = remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'ceph',
+ 'health',
+ ],
+ stdout=StringIO(),
+ logger=log.getChild('health'),
+ )
+ out = r.stdout.getvalue()
+ log.debug('Ceph health: %s', out.rstrip('\n'))
+ if out.split(None, 1)[0] == 'HEALTH_OK':
+ break
+ time.sleep(1)
+
+def get_nodes_using_roles(ctx, config, role):
+ newl = []
+ for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, role):
+ rem = _remote
+ if role == 'mon':
+ req1 = str(rem).split('@')[-1]
+ else:
+ req = str(rem).split('.')[0]
+ req1 = str(req).split('@')[1]
+ newl.append(req1)
+ return newl
+
+def get_dev_for_osd(ctx, config):
+ osd_devs = []
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ host = remote.name.split('@')[-1]
+ shortname = host.split('.')[0]
+ devs = teuthology.get_scratch_devices(remote)
+ num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
+ num_osds = len(num_osd_per_host)
+ assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
+ for dev in devs[:num_osds]:
+ dev_short = dev.split('/')[-1]
+ osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
+ return osd_devs
+
+def get_all_nodes(ctx, config):
+ nodelist = []
+ for t, k in ctx.config['targets'].iteritems():
+ host = t.split('@')[-1]
+ simple_host = host.split('.')[0]
+ nodelist.append(simple_host)
+ nodelist = " ".join(nodelist)
+ return nodelist
+
+def execute_ceph_deploy(ctx, config, cmd):
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ exec_cmd = cmd
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
+ proc = remote.run(
+ args = [
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ run.Raw(exec_cmd),
+ ],
+ check_status=False,
+ )
+ exitstatus = proc.exitstatus
+ return exitstatus
+
+@contextlib.contextmanager
+def build_ceph_cluster(ctx, config):
+ log.info('Building ceph cluster using ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_branch = None
+ if config.get('branch') is not None:
+ cbranch = config.get('branch')
+ for var, val in cbranch.iteritems():
+ if var == 'testing':
+ ceph_branch = '--{var}'.format(var=var)
+ ceph_branch = '--{var}={val}'.format(var=var, val=val)
+ node_dev_list = []
+ all_nodes = get_all_nodes(ctx, config)
+ mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
+ mds_nodes = " ".join(mds_nodes)
+ mon_node = get_nodes_using_roles(ctx, config, 'mon')
+ mon_nodes = " ".join(mon_node)
+ new_mon = './ceph-deploy new'+" "+mon_nodes
+ install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+ mon_hostname = mon_nodes.split(' ')[0]
+ mon_hostname = str(mon_hostname)
+ gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
+ deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
+ no_of_osds = 0
+
+ if mon_nodes is None:
+ raise RuntimeError("no monitor nodes in the config file")
+
+ estatus_new = execute_ceph_deploy(ctx, config, new_mon)
+ if estatus_new != 0:
+ raise RuntimeError("ceph-deploy: new command failed")
+
+ log.info('adding config inputs...')
+ testdir = teuthology.get_testdir(ctx)
+ conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ lines = None
+ if config.get('conf') is not None:
+ confp = config.get('conf')
+ for section, keys in confp.iteritems():
+ lines = '[{section}]\n'.format(section=section)
+ teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
+ for key, value in keys.iteritems():
+ log.info("[%s] %s = %s" % (section, key, value))
+ lines = '{key} = {value}\n'.format(key=key, value=value)
+ teuthology.append_lines_to_file(remote, conf_path, lines, sudo=True)
+
+ estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
+ if estatus_install != 0:
+ raise RuntimeError("ceph-deploy: Failed to install ceph")
+
+ mon_no = None
+ mon_no = config.get('mon_initial_members')
+ if mon_no is not None:
+ i = 0
+ mon1 = []
+ while(i < mon_no):
+ mon1.append(mon_node[i])
+ i = i + 1
+ initial_mons = " ".join(mon1)
+ for k in range(mon_no, len(mon_node)):
+ mon_create_nodes = './ceph-deploy mon create'+" "+initial_mons+" "+mon_node[k]
+ estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
+ if estatus_mon != 0:
+ raise RuntimeError("ceph-deploy: Failed to create monitor")
+ else:
+ mon_create_nodes = './ceph-deploy mon create'+" "+mon_nodes
+ estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
+ if estatus_mon != 0:
+ raise RuntimeError("ceph-deploy: Failed to create monitors")
+
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ while (estatus_gather != 0):
+ #mon_create_nodes = './ceph-deploy mon create'+" "+mon_node[0]
+ #execute_ceph_deploy(ctx, config, mon_create_nodes)
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+
+ if mds_nodes:
+ estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
+ if estatus_mds != 0:
+ raise RuntimeError("ceph-deploy: Failed to deploy mds")
+
+ if config.get('test_mon_destroy') is not None:
+ for d in range(1, len(mon_node)):
+ mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
+ estatus_mon_d = execute_ceph_deploy(ctx, config, mon_destroy_nodes)
+ if estatus_mon_d != 0:
+ raise RuntimeError("ceph-deploy: Failed to delete monitor")
+
+ node_dev_list = get_dev_for_osd(ctx, config)
+ for d in node_dev_list:
+ osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ zap_disk = './ceph-deploy disk zap'+" "+d
+ execute_ceph_deploy(ctx, config, zap_disk)
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ raise RuntimeError("ceph-deploy: Failed to create osds")
+
+ if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+ is_healthy(ctx=ctx, config=None)
+
+ log.info('Setting up client nodes...')
+ conf_path = '/etc/ceph/ceph.conf'
+ admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ conf_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=conf_path,
+ sudo=True,
+ )
+ admin_keyring = teuthology.get_file(
+ remote=mon0_remote,
+ path=admin_keyring_path,
+ sudo=True,
+ )
+
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ for remot, roles_for_host in clients.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+ client_keyring = '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ mon0_remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo','bash','-c',
+ run.Raw('"'),'ceph',
+ 'auth',
+ 'get-or-create',
+ 'client.{id}'.format(id=id_),
+ 'mds', 'allow',
+ 'mon', 'allow *',
+ 'osd', 'allow *',
+ run.Raw('>'),
+ client_keyring,
+ run.Raw('"'),
+ ],
+ )
+ key_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=client_keyring,
+ sudo=True,
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=client_keyring,
+ data=key_data,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=admin_keyring_path,
+ data=admin_keyring,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=conf_path,
+ data=conf_data,
+ perms='0644'
+ )
+ else:
+ raise RuntimeError("The cluster is NOT operational due to insufficient OSDs")
+
+ try:
+ yield
+
+ finally:
+ log.info('Stopping ceph...')
+ ctx.cluster.run(args=[
+ 'sudo', 'stop', 'ceph-all',
+ run.Raw('||'),
+ 'sudo', 'service', 'ceph', 'stop'
+ ])
+
+ if ctx.archive is not None:
+ # archive mon data, too
+ log.info('Archiving mon data...')
+ path = os.path.join(ctx.archive, 'data')
+ os.makedirs(path)
+ mons = ctx.cluster.only(teuthology.is_type('mon'))
+ for remote, roles in mons.remotes.iteritems():
+ for role in roles:
+ if role.startswith('mon.'):
+ teuthology.pull_directory_tarball(
+ remote,
+ '/var/lib/ceph/mon',
+ path + '/' + role + '.tgz')
+
+ log.info('Compressing logs...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'find',
+ '/var/log/ceph',
+ '-name',
+ '*.log',
+ '-print0',
+ run.Raw('|'),
+ 'sudo',
+ 'xargs',
+ '-0',
+ '--no-run-if-empty',
+ '--',
+ 'gzip',
+ '--',
+ ],
+ wait=False,
+ ),
+ )
+
+ log.info('Archiving logs...')
+ path = os.path.join(ctx.archive, 'remote')
+ os.makedirs(path)
+ for remote in ctx.cluster.remotes.iterkeys():
+ sub = os.path.join(path, remote.shortname)
+ os.makedirs(sub)
+ teuthology.pull_directory(remote, '/var/log/ceph',
+ os.path.join(sub, 'log'))
+
+ log.info('Purging package...')
+ execute_ceph_deploy(ctx, config, purge_nodes)
+ log.info('Purging data...')
+ execute_ceph_deploy(ctx, config, purgedata_nodes)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Set up and tear down a Ceph cluster.
+
+ For example::
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ stable: bobtail
+ mon_initial_members: 1
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ dev: master
+ conf:
+ mon:
+ debug mon = 20
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ testing:
+ """
+ if config is None:
+ config = {}
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ assert isinstance(config, dict), \
+ "task ceph-deploy only supports a dictionary for configuration"
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ if config.get('branch') is not None:
+ assert isinstance(config['branch'], dict), 'branch must be a dictionary'
+
+ with contextutil.nested(
+ lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
+ lambda: download_ceph_deploy(ctx=ctx, config=config),
+ lambda: build_ceph_cluster(ctx=ctx, config=dict(
+ conf=config.get('conf', {}),
+ branch=config.get('branch',{}),
+ mon_initial_members=config.get('mon_initial_members', None),
+ test_mon_destroy=config.get('test_mon_destroy', None),
+ )),
+ ):
+ yield
--- /dev/null
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a ``ceph-fuse`` client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on. This lets you e.g. set up one client with
+ ``ceph-fuse`` and another with ``kclient``.
+
+ Example that mounts all clients::
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - interactive:
+
+ Example that uses both ``kclient` and ``ceph-fuse``::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - kclient: [client.1]
+ - interactive:
+
+ Example that enables valgrind:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ - interactive:
+
+ """
+ log.info('Mounting ceph-fuse clients...')
+ fuse_daemons = {}
+
+ testdir = teuthology.get_testdir(ctx)
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))
+
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+ for id_, remote in clients:
+ client_config = config.get("client.%s" % id_)
+ if client_config is None:
+ client_config = {}
+ log.info("Client client.%s config is %s" % (id_, client_config))
+
+ daemon_signal = 'kill'
+ if client_config.get('coverage') or client_config.get('valgrind') is not None:
+ daemon_signal = 'term'
+
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ run_cmd=[
+ 'sudo',
+ '{tdir}/adjust-ulimits'.format(tdir=testdir),
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ '{tdir}/daemon-helper'.format(tdir=testdir),
+ daemon_signal,
+ ]
+ run_cmd_tail=[
+ 'ceph-fuse',
+ '-f',
+ '--name', 'client.{id}'.format(id=id_),
+ # TODO ceph-fuse doesn't understand dash dash '--',
+ mnt,
+ ]
+
+ if client_config.get('valgrind') is not None:
+ run_cmd.extend(
+ teuthology.get_valgrind_args(
+ testdir,
+ 'client.{id}'.format(id=id_),
+ client_config.get('valgrind'),
+ )
+ )
+
+ run_cmd.extend(run_cmd_tail)
+
+ proc = remote.run(
+ args=run_cmd,
+ logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ fuse_daemons[id_] = proc
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ teuthology.wait_until_fuse_mounted(
+ remote=remote,
+ fuse=fuse_daemons[id_],
+ mountpoint=mnt,
+ )
+ remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting ceph-fuse clients...')
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ try:
+ remote.run(
+ args=[
+ 'sudo',
+ 'fusermount',
+ '-u',
+ mnt,
+ ],
+ )
+ except run.CommandFailedError:
+ log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
+ # abort the fuse mount, killing all hung processes
+ remote.run(
+ args=[
+ 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
+ run.Raw(';'), 'then',
+ 'echo',
+ '1',
+ run.Raw('>'),
+ run.Raw('/sys/fs/fuse/connections/*/abort'),
+ run.Raw(';'), 'fi',
+ ],
+ )
+ # make sure its unmounted
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ '-l',
+ '-f',
+ mnt,
+ ],
+ )
+
+ run.wait(fuse_daemons.itervalues())
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ remote.run(
+ args=[
+ 'rmdir',
+ '--',
+ mnt,
+ ],
+ )
+++ /dev/null
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a cifs client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on.
-
- Example that starts smbd and mounts cifs on all nodes::
-
- tasks:
- - ceph:
- - samba:
- - cifs-mount:
- - interactive:
-
- Example that splits smbd and cifs:
-
- tasks:
- - ceph:
- - samba: [samba.0]
- - cifs-mount: [client.0]
- - ceph-fuse: [client.1]
- - interactive:
-
- Example that specifies the share name:
-
- tasks:
- - ceph:
- - ceph-fuse:
- - samba:
- samba.0:
- cephfuse: "{testdir}/mnt.0"
- - cifs-mount:
- client.0:
- share: cephfuse
- """
- log.info('Mounting cifs clients...')
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
-
- from teuthology.task.samba import get_sambas
- samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
- sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
- (ip, port) = sambas[0][1].ssh.get_transport().getpeername()
- log.info('samba ip: {ip}'.format(ip=ip))
-
- for id_, remote in clients:
- mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
- log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- rolestr = 'client.{id_}'.format(id_=id_)
- unc = "ceph"
- log.info("config: {c}".format(c=config))
- if config[rolestr] is not None and 'share' in config[rolestr]:
- unc = config[rolestr]['share']
-
- remote.run(
- args=[
- 'sudo',
- 'mount',
- '-t',
- 'cifs',
- '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
- '-o',
- 'username=ubuntu,password=ubuntu',
- mnt,
- ],
- )
-
- remote.run(
- args=[
- 'sudo',
- 'chown',
- 'ubuntu:ubuntu',
- '{m}/'.format(m=mnt),
- ],
- )
-
- try:
- yield
- finally:
- log.info('Unmounting cifs clients...')
- for id_, remote in clients:
- remote.run(
- args=[
- 'sudo',
- 'umount',
- mnt,
- ],
- )
- for id_, remote in clients:
- while True:
- try:
- remote.run(
- args=[
- 'rmdir', '--', mnt,
- run.Raw('2>&1'),
- run.Raw('|'),
- 'grep', 'Device or resource busy',
- ],
- )
- import time
- time.sleep(1)
- except:
- break
--- /dev/null
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a cifs client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on.
+
+ Example that starts smbd and mounts cifs on all nodes::
+
+ tasks:
+ - ceph:
+ - samba:
+ - cifs-mount:
+ - interactive:
+
+ Example that splits smbd and cifs:
+
+ tasks:
+ - ceph:
+ - samba: [samba.0]
+ - cifs-mount: [client.0]
+ - ceph-fuse: [client.1]
+ - interactive:
+
+ Example that specifies the share name:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - samba:
+ samba.0:
+ cephfuse: "{testdir}/mnt.0"
+ - cifs-mount:
+ client.0:
+ share: cephfuse
+ """
+ log.info('Mounting cifs clients...')
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+ from teuthology.task.samba import get_sambas
+ samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
+ sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
+ (ip, port) = sambas[0][1].ssh.get_transport().getpeername()
+ log.info('samba ip: {ip}'.format(ip=ip))
+
+ for id_, remote in clients:
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
+ log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ rolestr = 'client.{id_}'.format(id_=id_)
+ unc = "ceph"
+ log.info("config: {c}".format(c=config))
+ if config[rolestr] is not None and 'share' in config[rolestr]:
+ unc = config[rolestr]['share']
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'mount',
+ '-t',
+ 'cifs',
+ '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
+ '-o',
+ 'username=ubuntu,password=ubuntu',
+ mnt,
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'chown',
+ 'ubuntu:ubuntu',
+ '{m}/'.format(m=mnt),
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting cifs clients...')
+ for id_, remote in clients:
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ mnt,
+ ],
+ )
+ for id_, remote in clients:
+ while True:
+ try:
+ remote.run(
+ args=[
+ 'rmdir', '--', mnt,
+ run.Raw('2>&1'),
+ run.Raw('|'),
+ 'grep', 'Device or resource busy',
+ ],
+ )
+ import time
+ time.sleep(1)
+ except:
+ break
+++ /dev/null
-# The test cases in this file have been annotated for inventory.
-# To extract the inventory (in csv format) use the command:
-#
-# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-#
-
-from cStringIO import StringIO
-import logging
-import json
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import requests
-import time
-
-from boto.connection import AWSAuthConnection
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def successful_ops(out):
- summary = out['summary']
- if len(summary) == 0:
- return 0
- entry = summary[0]
- return entry['total']['successful_ops']
-
-def rgwadmin(ctx, client, cmd):
- log.info('radosgw-admin: %s' % cmd)
- testdir = teuthology.get_testdir(ctx)
- pre = [
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '--log-to-stderr',
- '--format', 'json',
- ]
- pre.extend(cmd)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- proc = remote.run(
- args=pre,
- check_status=False,
- stdout=StringIO(),
- stderr=StringIO(),
- )
- r = proc.exitstatus
- out = proc.stdout.getvalue()
- j = None
- if not r and out != '':
- try:
- j = json.loads(out)
- log.info(' json result: %s' % j)
- except ValueError:
- j = out
- log.info(' raw result: %s' % j)
- return (r, j)
-
-
-def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
- log.info('radosgw-admin-rest: %s %s' % (cmd, params))
- put_cmds = ['create', 'link', 'add']
- post_cmds = ['unlink', 'modify']
- delete_cmds = ['trim', 'rm', 'process']
- get_cmds = ['check', 'info', 'show', 'list']
-
- bucket_sub_resources = ['object', 'policy', 'index']
- user_sub_resources = ['subuser', 'key', 'caps']
- zone_sub_resources = ['pool', 'log', 'garbage']
-
- def get_cmd_method_and_handler(cmd):
- if cmd[1] in put_cmds:
- return 'PUT', requests.put
- elif cmd[1] in delete_cmds:
- return 'DELETE', requests.delete
- elif cmd[1] in post_cmds:
- return 'POST', requests.post
- elif cmd[1] in get_cmds:
- return 'GET', requests.get
-
- def get_resource(cmd):
- if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
- if cmd[0] == 'bucket':
- return 'bucket', ''
- else:
- return 'bucket', cmd[0]
- elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
- if cmd[0] == 'user':
- return 'user', ''
- else:
- return 'user', cmd[0]
- elif cmd[0] == 'usage':
- return 'usage', ''
- elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
- if cmd[0] == 'zone':
- return 'zone', ''
- else:
- return 'zone', cmd[0]
-
- """
- Adapted from the build_request() method of boto.connection
- """
- def build_admin_request(conn, method, resource = '', headers=None, data='',
- query_args=None, params=None):
-
- path = conn.calling_format.build_path_base('admin', resource)
- auth_path = conn.calling_format.build_auth_path('admin', resource)
- host = conn.calling_format.build_host(conn.server_name(), 'admin')
- if query_args:
- path += '?' + query_args
- boto.log.debug('path=%s' % path)
- auth_path += '?' + query_args
- boto.log.debug('auth_path=%s' % auth_path)
- return AWSAuthConnection.build_base_http_request(conn, method, path,
- auth_path, params, headers, data, host)
-
- method, handler = get_cmd_method_and_handler(cmd)
- resource, query_args = get_resource(cmd)
- request = build_admin_request(connection, method, resource,
- query_args=query_args, headers=headers)
-
- url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
- host=request.host, path=request.path)
-
- request.authorize(connection=connection)
- result = handler(url, params=params, headers=request.headers)
-
- if raw:
- log.info(' text result: %s' % result.txt)
- return result.status_code, result.txt
- else:
- log.info(' json result: %s' % result.json)
- return result.status_code, result.json
-
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality through the RESTful interface
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- # just use the first client...
- client = clients[0]
-
- ##
- admin_user = 'ada'
- admin_display_name = 'Ms. Admin User'
- admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
- admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
- admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
-
- user1 = 'foo'
- user2 = 'fud'
- subuser1 = 'foo:foo1'
- subuser2 = 'foo:foo2'
- display_name1 = 'Foo'
- display_name2 = 'Fud'
- email = 'foo@foo.com'
- access_key = '9te6NH5mcdcq0Tc5i8i1'
- secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2 = 'p5YnriCv1nAtykxBrupQ'
- secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name = 'myfoo'
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', admin_user,
- '--display-name', admin_display_name,
- '--access-key', admin_access_key,
- '--secret', admin_secret_key,
- '--max-buckets', '0',
- '--caps', admin_caps
- ])
- logging.error(out)
- logging.error(err)
- assert not err
-
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- remote_host = remote.name.split('@')[1]
- admin_conn = boto.s3.connection.S3Connection(
- aws_access_key_id=admin_access_key,
- aws_secret_access_key=admin_secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
- assert ret == 404
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user1,
- 'display-name' : display_name1,
- 'email' : email,
- 'access-key' : access_key,
- 'secret-key' : secret_key,
- 'max-buckets' : '4'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert not err
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'create'],
- {'uid' : user1,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2
- })
-
-
- assert ret == 200
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'uid' : user1,
- 'access-key' : access_key2
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser1,
- 'secret-key' : swift_secret1,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser2,
- 'secret-key' : swift_secret2,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'subuser' : subuser1,
- 'key-type' :'swift'
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser1
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser2,
- 'key-type' : 'swift',
- '{purge-keys' :True
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 0
-
- # connect to rgw
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
-
- assert ret == 200
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # create a second user to link the bucket to
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user2,
- 'display-name' : display_name2,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2,
- 'max-buckets' : '1',
- })
-
- assert ret == 200
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # try creating an object with the first user which should cause an error
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('three')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert denied
-
- # relink the bucket to the first user and delete the second user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
- assert ret == 200
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
- assert ret == 200
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # create a bucket for deletion stats
- useless_bucket = connection.create_bucket('useless_bucket')
- useless_key = useless_bucket.new_key('useless_key')
- useless_key.set_contents_from_string('useless string')
-
- # delete it
- useless_key.delete()
- useless_bucket.delete()
-
- # wait for the statistics to flush
- time.sleep(60)
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
-
- if successful_ops(out) > 0:
- break
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
- assert ret == 200
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
- assert ret == 200
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert ret == 200
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # delete should fail because ``key`` still exists
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
- assert ret == 200
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps = 'usage=read'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert out[0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert not out
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
- assert ret == 200
-
- # TESTCASE 'rm-user3','user','info','deleted user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 404
-
+++ /dev/null
-# The test cases in this file have been annotated for inventory.
-# To extract the inventory (in csv format) use the command:
-#
-# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-#
-
-import contextlib
-import json
-import logging
-import time
-
-from cStringIO import StringIO
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import teuthology.task_util.rgw as rgw_utils
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from teuthology.task_util.rgw import rgwadmin
-
-log = logging.getLogger(__name__)
-
-def successful_ops(out):
- summary = out['summary']
- if len(summary) == 0:
- return 0
- entry = summary[0]
- return entry['total']['successful_ops']
-
-# simple test to indicate if multi-region testing should occur
-def multi_region_enabled(ctx):
- # this is populated by the radosgw-agent task, seems reasonable to
- # use that as an indicator that we're testing multi-region sync
- return 'radosgw_agent' in ctx
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality against a running rgw instance.
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- multi_region_run = multi_region_enabled(ctx)
-
- client = clients[0]; # default choice, multi-region code may overwrite this
- if multi_region_run:
- client = rgw_utils.get_master_client(ctx, clients)
-
- ##
- user1='foo'
- user2='fud'
- subuser1='foo:foo1'
- subuser2='foo:foo2'
- display_name1='Foo'
- display_name2='Fud'
- email='foo@foo.com'
- access_key='9te6NH5mcdcq0Tc5i8i1'
- secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2='p5YnriCv1nAtykxBrupQ'
- secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name='myfoo'
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ])
- assert not err
-
- # TESTCASE 'duplicate email','user','create','existing user email','fails'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--email', email,
- ])
- assert err
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # this whole block should only be run if regions have been configured
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
- # post-sync, validate that user1 exists on the sync destination host
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
- assert not err
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # compare the metadata between different regions, make sure it matches
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', 'user:{uid}'.format(uid=user1)])
- (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', 'user:{uid}'.format(uid=user1)])
- assert not err1
- assert not err2
- assert out1 == out2
-
- # suspend a user on the master, then check the status on the destination
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
- assert not err
- assert out['suspended']
-
- # delete a user on the master, then check that it's gone on the destination
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1])
- assert not err
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
- assert out is None
-
- # then recreate it so later tests pass
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ])
- assert not err
- # end of 'if multi_region_run:'
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1])
- assert not err
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1])
- assert not err
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'create', '--uid', user1,
- '--access-key', access_key2, '--secret', secret_key2,
- ])
- assert not err
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--uid', user1,
- '--access-key', access_key2,
- ])
- assert not err
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- subuser_access = 'full'
- subuser_perm = 'full-control'
-
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser1,
- '--access', subuser_access
- ])
- assert not err
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'modify', '--subuser', subuser1,
- '--secret', swift_secret1,
- '--key-type', 'swift',
- ])
- assert not err
-
- # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
-
- assert out['subusers'][0]['permissions'] == subuser_perm
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser2,
- '--secret', swift_secret2,
- '--key-type', 'swift',
- ])
- assert not err
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert not err
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--subuser', subuser1,
- '--key-type', 'swift',
- ])
- assert not err
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser1,
- ])
- assert not err
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser2,
- '--key-type', 'swift', '--purge-keys',
- ])
- assert not err
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1])
- assert not err
- assert len(out) == 0
-
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # connect to rgw
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- (remote_user, remote_host) = remote.name.split('@')
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1])
- assert not err
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1])
- assert not err
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list'])
- assert not err
- assert len(out) >= 1
- assert bucket_name in out;
-
- # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
- bucket2 = connection.create_bucket(bucket_name + '2')
- bucket3 = connection.create_bucket(bucket_name + '3')
- bucket4 = connection.create_bucket(bucket_name + '4')
- # the 5th should fail.
- failed = False
- try:
- connection.create_bucket(bucket_name + '5')
- except:
- failed = True
- assert failed
-
- # delete the buckets
- bucket2.delete()
- bucket3.delete()
- bucket4.delete()
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name])
- assert not err
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1])
- assert not err
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name])
- assert not err
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name])
- assert not err
-
- # create a second user to link the bucket to
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--access-key', access_key2,
- '--secret', secret_key2,
- '--max-buckets', '1',
- ])
- assert not err
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name])
- assert not err
-
- # try to remove user, should fail (has a linked bucket)
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
- assert err
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name])
- assert not err
-
- # relink the bucket to the first user and delete the second user
- (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user1, '--bucket', bucket_name])
- assert not err
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
- assert not err
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (err, out) = rgwadmin(ctx, client, ['object', 'rm', '--bucket', bucket_name, '--object', object_name])
-
- assert not err
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name])
- assert not err
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # list log objects
- # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, ['log', 'list'])
- assert not err
- assert len(out) > 0
-
- for obj in out:
- # TESTCASE 'log-show','log','show','after activity','returns expected info'
- if obj[:4] == 'meta' or obj[:4] == 'data':
- continue
-
- (err, log) = rgwadmin(ctx, client, ['log', 'show', '--object', obj])
- assert not err
- assert len(log) > 0
-
- assert log['bucket'].find(bucket_name) == 0
- assert log['bucket'] != bucket_name or log['bucket_id'] == bucket_id
- assert log['bucket_owner'] == user1 or log['bucket'] == bucket_name + '5'
- for entry in log['log_entries']:
- assert entry['bucket'] == log['bucket']
- assert entry['user'] == user1 or log['bucket'] == bucket_name + '5'
-
- # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
- (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj])
- assert not err
-
- # TODO: show log by bucket+date
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
- if successful_ops(out) > 0:
- break;
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show'])
- assert not err
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1])
- assert not err
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat])
- assert not err
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1])
- assert not err
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1])
- assert not err
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1])
- assert not err
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1])
- assert not err
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # wait a bit to give the garbage collector time to cycle
- time.sleep(15)
-
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) > 0
-
- # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
- (err, out) = rgwadmin(ctx, client, ['gc', 'process'])
-
- assert not err
-
- #confirm
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) == 0
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # delete should fail because ``key`` still exists
- fails = False
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (err, out) = rgwadmin(ctx, client, ['policy', '--bucket', bucket.name, '--object', key.key])
-
- assert not err
-
- acl = key.get_xml_acl()
-
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (err, out) = rgwadmin(ctx, client, ['policy', '--bucket', bucket.name, '--object', key.key])
-
- assert not err
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (err, out) = rgwadmin(ctx, client, ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'])
- assert not err
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps='user=read'
- (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
-
- assert out['caps'][0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
-
- assert not out['caps']
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1, '--purge-data' ])
- assert not err
-
- # TESTCASE 'rm-user3','user','rm','deleted user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
- #
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- orig_placement_pools = len(out['placement_pools'])
-
- # removed this test, it is not correct to assume that zone has default placement, it really
- # depends on how we set it up before
- #
- # assert len(out) > 0
- # assert len(out['placement_pools']) == 1
-
- # default_rule = out['placement_pools'][0]
- # assert default_rule['key'] == 'default-placement'
-
- rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
-
- out['placement_pools'].append(rule)
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'set'], stdin=StringIO(json.dumps(out)))
- assert not err
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- assert len(out) > 0
- assert len(out['placement_pools']) == orig_placement_pools + 1
+++ /dev/null
-import contextlib
-import logging
-import argparse
-
-from ..orchestra import run
-from teuthology import misc as teuthology
-import teuthology.task_util.rgw as rgw_utils
-
-log = logging.getLogger(__name__)
-
-def run_radosgw_agent(ctx, config):
- """
- Run a single radosgw-agent. See task() for config format.
- """
- return_list = list()
- for (client, cconf) in config.items():
- # don't process entries that are not clients
- if not client.startswith('client.'):
- log.debug('key {data} does not start with \'client.\', moving on'.format(
- data=client))
- continue
-
- src_client = cconf['src']
- dest_client = cconf['dest']
-
- src_zone = rgw_utils.zone_for_client(ctx, src_client)
- dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
-
- log.info("source is %s", src_zone)
- log.info("dest is %s", dest_zone)
-
- testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(client).remotes.keys()
- # figure out which branch to pull from
- branch = cconf.get('force-branch', None)
- if not branch:
- branch = cconf.get('branch', 'master')
- sha1 = cconf.get('sha1')
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'git', 'clone',
- '-b', branch,
- 'https://github.com/ceph/radosgw-agent.git',
- 'radosgw-agent.{client}'.format(client=client),
- ]
- )
- if sha1 is not None:
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- run.Raw('&&'),
- 'git', 'reset', '--hard', sha1,
- ]
- )
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'cd', 'radosgw-agent.{client}'.format(client=client),
- run.Raw('&&'),
- './bootstrap',
- ]
- )
-
- src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
- src_zone)
- dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
- dest_zone)
- src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
- src_zone)
- dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
- dest_zone)
- sync_scope = cconf.get('sync-scope', None)
- port = cconf.get('port', 8000)
- daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
- in_args=[
- '{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
- '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
- client=client),
- '-v',
- '--src-access-key', src_access,
- '--src-secret-key', src_secret,
- '--src-host', src_host,
- '--src-port', str(src_port),
- '--src-zone', src_zone,
- '--dest-access-key', dest_access,
- '--dest-secret-key', dest_secret,
- '--dest-host', dest_host,
- '--dest-port', str(dest_port),
- '--dest-zone', dest_zone,
- '--daemon-id', daemon_name,
- '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
- tdir=testdir,
- client=client),
- ]
- # the test server and full/incremental flags are mutually exclusive
- if sync_scope is None:
- in_args.append('--test-server-host')
- in_args.append('0.0.0.0')
- in_args.append('--test-server-port')
- in_args.append(str(port))
- log.debug('Starting a sync test server on {client}'.format(client=client))
- else:
- in_args.append('--sync-scope')
- in_args.append(sync_scope)
- log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
-
- return_list.append((client, remote.run(
- args=in_args,
- wait=False,
- stdin=run.PIPE,
- logger=log.getChild(daemon_name),
- )))
- return return_list
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run radosgw-agents in test mode.
-
- Configuration is clients to run the agents on, with settings for
- source client, destination client, and port to listen on. Binds
- to 0.0.0.0. Port defaults to 8000. This must be run on clients
- that have the correct zone root pools and rgw zone set in
- ceph.conf, or the task cannot read the region information from the
- cluster.
-
- By default, this task will start an HTTP server that will trigger full
- or incremental syncs based on requests made to it.
- Alternatively, a single full sync can be triggered by
- specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
- by specifying 'sync-scope: incremental' (the loop will sleep
- '--incremental-sync-delay' seconds between each sync, default is 20 seconds).
-
- An example::
-
- tasks:
- - ceph:
- conf:
- client.0:
- rgw zone = foo
- rgw zone root pool = .root.pool
- client.1:
- rgw zone = bar
- rgw zone root pool = .root.pool2
- - rgw: # region configuration omitted for brevity
- - radosgw-agent:
- client.0:
- branch: wip-next-feature-branch
- src: client.0
- dest: client.1
- sync-scope: full
- # port: 8000 (default)
- client.1:
- src: client.1
- dest: client.0
- port: 8001
- """
- assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
- log.debug("config is %s", config)
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, but only if it exists in config since there isn't
- # a sensible default action for this task
- for client in config.iterkeys():
- if config[client]:
- log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
- teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
-
- ctx.radosgw_agent = argparse.Namespace()
- ctx.radosgw_agent.config = config
-
- procs = run_radosgw_agent(ctx, config)
-
- ctx.radosgw_agent.procs = procs
-
- try:
- yield
- finally:
- testdir = teuthology.get_testdir(ctx)
- try:
- for client, proc in procs:
- log.info("shutting down sync agent on %s", client)
- proc.stdin.close()
- proc.exitstatus.get()
- finally:
- for client, proc in procs:
- ctx.cluster.only(client).run(
- args=[
- 'rm', '-rf',
- '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
- client=client)
- ]
- )
--- /dev/null
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+
+import contextlib
+import json
+import logging
+import time
+
+from cStringIO import StringIO
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import teuthology.task_util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from teuthology.task_util.rgw import rgwadmin
+
+log = logging.getLogger(__name__)
+
+def successful_ops(out):
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ entry = summary[0]
+ return entry['total']['successful_ops']
+
+# simple test to indicate if multi-region testing should occur
+def multi_region_enabled(ctx):
+ # this is populated by the radosgw-agent task, seems reasonable to
+ # use that as an indicator that we're testing multi-region sync
+ return 'radosgw_agent' in ctx
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality against a running rgw instance.
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ multi_region_run = multi_region_enabled(ctx)
+
+ client = clients[0]; # default choice, multi-region code may overwrite this
+ if multi_region_run:
+ client = rgw_utils.get_master_client(ctx, clients)
+
+ ##
+ user1='foo'
+ user2='fud'
+ subuser1='foo:foo1'
+ subuser2='foo:foo2'
+ display_name1='Foo'
+ display_name2='Fud'
+ email='foo@foo.com'
+ access_key='9te6NH5mcdcq0Tc5i8i1'
+ secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2='p5YnriCv1nAtykxBrupQ'
+ secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name='myfoo'
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ])
+ assert not err
+
+ # TESTCASE 'duplicate email','user','create','existing user email','fails'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--email', email,
+ ])
+ assert err
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # this whole block should only be run if regions have been configured
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ # post-sync, validate that user1 exists on the sync destination host
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # compare the metadata between different regions, make sure it matches
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get', 'user:{uid}'.format(uid=user1)])
+ (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get', 'user:{uid}'.format(uid=user1)])
+ assert not err1
+ assert not err2
+ assert out1 == out2
+
+ # suspend a user on the master, then check the status on the destination
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert out['suspended']
+
+ # delete a user on the master, then check that it's gone on the destination
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1])
+ assert not err
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+ assert out is None
+
+ # then recreate it so later tests pass
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ])
+ assert not err
+ # end of 'if multi_region_run:'
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1])
+ assert not err
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1])
+ assert not err
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'create', '--uid', user1,
+ '--access-key', access_key2, '--secret', secret_key2,
+ ])
+ assert not err
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--uid', user1,
+ '--access-key', access_key2,
+ ])
+ assert not err
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ subuser_access = 'full'
+ subuser_perm = 'full-control'
+
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser1,
+ '--access', subuser_access
+ ])
+ assert not err
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'modify', '--subuser', subuser1,
+ '--secret', swift_secret1,
+ '--key-type', 'swift',
+ ])
+ assert not err
+
+ # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+
+ assert out['subusers'][0]['permissions'] == subuser_perm
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser2,
+ '--secret', swift_secret2,
+ '--key-type', 'swift',
+ ])
+ assert not err
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert not err
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--subuser', subuser1,
+ '--key-type', 'swift',
+ ])
+ assert not err
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser1,
+ ])
+ assert not err
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser2,
+ '--key-type', 'swift', '--purge-keys',
+ ])
+ assert not err
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1])
+ assert not err
+ assert len(out) == 0
+
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # connect to rgw
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ (remote_user, remote_host) = remote.name.split('@')
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1])
+ assert not err
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1])
+ assert not err
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list'])
+ assert not err
+ assert len(out) >= 1
+ assert bucket_name in out;
+
+ # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
+ bucket2 = connection.create_bucket(bucket_name + '2')
+ bucket3 = connection.create_bucket(bucket_name + '3')
+ bucket4 = connection.create_bucket(bucket_name + '4')
+ # the 5th should fail.
+ failed = False
+ try:
+ connection.create_bucket(bucket_name + '5')
+ except:
+ failed = True
+ assert failed
+
+ # delete the buckets
+ bucket2.delete()
+ bucket3.delete()
+ bucket4.delete()
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name])
+ assert not err
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1])
+ assert not err
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name])
+ assert not err
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name])
+ assert not err
+
+ # create a second user to link the bucket to
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '1',
+ ])
+ assert not err
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name])
+ assert not err
+
+ # try to remove user, should fail (has a linked bucket)
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
+ assert err
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name])
+ assert not err
+
+ # relink the bucket to the first user and delete the second user
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user1, '--bucket', bucket_name])
+ assert not err
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
+ assert not err
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (err, out) = rgwadmin(ctx, client, ['object', 'rm', '--bucket', bucket_name, '--object', object_name])
+
+ assert not err
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name])
+ assert not err
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # list log objects
+ # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, ['log', 'list'])
+ assert not err
+ assert len(out) > 0
+
+ for obj in out:
+ # TESTCASE 'log-show','log','show','after activity','returns expected info'
+ if obj[:4] == 'meta' or obj[:4] == 'data':
+ continue
+
+ (err, log) = rgwadmin(ctx, client, ['log', 'show', '--object', obj])
+ assert not err
+ assert len(log) > 0
+
+ assert log['bucket'].find(bucket_name) == 0
+ assert log['bucket'] != bucket_name or log['bucket_id'] == bucket_id
+ assert log['bucket_owner'] == user1 or log['bucket'] == bucket_name + '5'
+ for entry in log['log_entries']:
+ assert entry['bucket'] == log['bucket']
+ assert entry['user'] == user1 or log['bucket'] == bucket_name + '5'
+
+ # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj])
+ assert not err
+
+ # TODO: show log by bucket+date
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
+ if successful_ops(out) > 0:
+ break;
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show'])
+ assert not err
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1])
+ assert not err
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat])
+ assert not err
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1])
+ assert not err
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1])
+ assert not err
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1])
+ assert not err
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1])
+ assert not err
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # wait a bit to give the garbage collector time to cycle
+ time.sleep(15)
+
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) > 0
+
+ # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
+ (err, out) = rgwadmin(ctx, client, ['gc', 'process'])
+
+ assert not err
+
+ #confirm
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) == 0
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # delete should fail because ``key`` still exists
+ fails = False
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (err, out) = rgwadmin(ctx, client, ['policy', '--bucket', bucket.name, '--object', key.key])
+
+ assert not err
+
+ acl = key.get_xml_acl()
+
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (err, out) = rgwadmin(ctx, client, ['policy', '--bucket', bucket.name, '--object', key.key])
+
+ assert not err
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'])
+ assert not err
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps='user=read'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
+
+ assert out['caps'][0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
+
+ assert not out['caps']
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1, '--purge-data' ])
+ assert not err
+
+ # TESTCASE 'rm-user3','user','rm','deleted user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
+ #
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ orig_placement_pools = len(out['placement_pools'])
+
+ # removed this test, it is not correct to assume that zone has default placement, it really
+ # depends on how we set it up before
+ #
+ # assert len(out) > 0
+ # assert len(out['placement_pools']) == 1
+
+ # default_rule = out['placement_pools'][0]
+ # assert default_rule['key'] == 'default-placement'
+
+ rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
+
+ out['placement_pools'].append(rule)
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'set'], stdin=StringIO(json.dumps(out)))
+ assert not err
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ assert len(out) > 0
+ assert len(out['placement_pools']) == orig_placement_pools + 1
--- /dev/null
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+
+from cStringIO import StringIO
+import logging
+import json
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import requests
+import time
+
+from boto.connection import AWSAuthConnection
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def successful_ops(out):
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ entry = summary[0]
+ return entry['total']['successful_ops']
+
+def rgwadmin(ctx, client, cmd):
+ log.info('radosgw-admin: %s' % cmd)
+ testdir = teuthology.get_testdir(ctx)
+ pre = [
+ '{tdir}/adjust-ulimits'.format(tdir=testdir),
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '--log-to-stderr',
+ '--format', 'json',
+ ]
+ pre.extend(cmd)
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ proc = remote.run(
+ args=pre,
+ check_status=False,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ )
+ r = proc.exitstatus
+ out = proc.stdout.getvalue()
+ j = None
+ if not r and out != '':
+ try:
+ j = json.loads(out)
+ log.info(' json result: %s' % j)
+ except ValueError:
+ j = out
+ log.info(' raw result: %s' % j)
+ return (r, j)
+
+
+def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
+ log.info('radosgw-admin-rest: %s %s' % (cmd, params))
+ put_cmds = ['create', 'link', 'add']
+ post_cmds = ['unlink', 'modify']
+ delete_cmds = ['trim', 'rm', 'process']
+ get_cmds = ['check', 'info', 'show', 'list']
+
+ bucket_sub_resources = ['object', 'policy', 'index']
+ user_sub_resources = ['subuser', 'key', 'caps']
+ zone_sub_resources = ['pool', 'log', 'garbage']
+
+ def get_cmd_method_and_handler(cmd):
+ if cmd[1] in put_cmds:
+ return 'PUT', requests.put
+ elif cmd[1] in delete_cmds:
+ return 'DELETE', requests.delete
+ elif cmd[1] in post_cmds:
+ return 'POST', requests.post
+ elif cmd[1] in get_cmds:
+ return 'GET', requests.get
+
+ def get_resource(cmd):
+ if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
+ if cmd[0] == 'bucket':
+ return 'bucket', ''
+ else:
+ return 'bucket', cmd[0]
+ elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
+ if cmd[0] == 'user':
+ return 'user', ''
+ else:
+ return 'user', cmd[0]
+ elif cmd[0] == 'usage':
+ return 'usage', ''
+ elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
+ if cmd[0] == 'zone':
+ return 'zone', ''
+ else:
+ return 'zone', cmd[0]
+
+ """
+ Adapted from the build_request() method of boto.connection
+ """
+ def build_admin_request(conn, method, resource = '', headers=None, data='',
+ query_args=None, params=None):
+
+ path = conn.calling_format.build_path_base('admin', resource)
+ auth_path = conn.calling_format.build_auth_path('admin', resource)
+ host = conn.calling_format.build_host(conn.server_name(), 'admin')
+ if query_args:
+ path += '?' + query_args
+ boto.log.debug('path=%s' % path)
+ auth_path += '?' + query_args
+ boto.log.debug('auth_path=%s' % auth_path)
+ return AWSAuthConnection.build_base_http_request(conn, method, path,
+ auth_path, params, headers, data, host)
+
+ method, handler = get_cmd_method_and_handler(cmd)
+ resource, query_args = get_resource(cmd)
+ request = build_admin_request(connection, method, resource,
+ query_args=query_args, headers=headers)
+
+ url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
+ host=request.host, path=request.path)
+
+ request.authorize(connection=connection)
+ result = handler(url, params=params, headers=request.headers)
+
+ if raw:
+ log.info(' text result: %s' % result.txt)
+ return result.status_code, result.txt
+ else:
+ log.info(' json result: %s' % result.json)
+ return result.status_code, result.json
+
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality through the RESTful interface
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ # just use the first client...
+ client = clients[0]
+
+ ##
+ admin_user = 'ada'
+ admin_display_name = 'Ms. Admin User'
+ admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
+ admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
+ admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
+
+ user1 = 'foo'
+ user2 = 'fud'
+ subuser1 = 'foo:foo1'
+ subuser2 = 'foo:foo2'
+ display_name1 = 'Foo'
+ display_name2 = 'Fud'
+ email = 'foo@foo.com'
+ access_key = '9te6NH5mcdcq0Tc5i8i1'
+ secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2 = 'p5YnriCv1nAtykxBrupQ'
+ secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name = 'myfoo'
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', admin_user,
+ '--display-name', admin_display_name,
+ '--access-key', admin_access_key,
+ '--secret', admin_secret_key,
+ '--max-buckets', '0',
+ '--caps', admin_caps
+ ])
+ logging.error(out)
+ logging.error(err)
+ assert not err
+
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote_host = remote.name.split('@')[1]
+ admin_conn = boto.s3.connection.S3Connection(
+ aws_access_key_id=admin_access_key,
+ aws_secret_access_key=admin_secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
+ assert ret == 404
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user1,
+ 'display-name' : display_name1,
+ 'email' : email,
+ 'access-key' : access_key,
+ 'secret-key' : secret_key,
+ 'max-buckets' : '4'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert not err
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'create'],
+ {'uid' : user1,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2
+ })
+
+
+ assert ret == 200
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'uid' : user1,
+ 'access-key' : access_key2
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser1,
+ 'secret-key' : swift_secret1,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser2,
+ 'secret-key' : swift_secret2,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'subuser' : subuser1,
+ 'key-type' :'swift'
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser1
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser2,
+ 'key-type' : 'swift',
+ '{purge-keys' :True
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 0
+
+ # connect to rgw
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+
+ assert ret == 200
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # create a second user to link the bucket to
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user2,
+ 'display-name' : display_name2,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2,
+ 'max-buckets' : '1',
+ })
+
+ assert ret == 200
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # try creating an object with the first user which should cause an error
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('three')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert denied
+
+ # relink the bucket to the first user and delete the second user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
+ assert ret == 200
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
+ assert ret == 200
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # create a bucket for deletion stats
+ useless_bucket = connection.create_bucket('useless_bucket')
+ useless_key = useless_bucket.new_key('useless_key')
+ useless_key.set_contents_from_string('useless string')
+
+ # delete it
+ useless_key.delete()
+ useless_bucket.delete()
+
+ # wait for the statistics to flush
+ time.sleep(60)
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
+
+ if successful_ops(out) > 0:
+ break
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
+ assert ret == 200
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
+ assert ret == 200
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert ret == 200
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # delete should fail because ``key`` still exists
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
+ assert ret == 200
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps = 'usage=read'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert out[0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert not out
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
+ assert ret == 200
+
+ # TESTCASE 'rm-user3','user','info','deleted user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 404
+
--- /dev/null
+import contextlib
+import logging
+import argparse
+
+from ..orchestra import run
+from teuthology import misc as teuthology
+import teuthology.task_util.rgw as rgw_utils
+
+log = logging.getLogger(__name__)
+
+def run_radosgw_agent(ctx, config):
+ """
+ Run a single radosgw-agent. See task() for config format.
+ """
+ return_list = list()
+ for (client, cconf) in config.items():
+ # don't process entries that are not clients
+ if not client.startswith('client.'):
+ log.debug('key {data} does not start with \'client.\', moving on'.format(
+ data=client))
+ continue
+
+ src_client = cconf['src']
+ dest_client = cconf['dest']
+
+ src_zone = rgw_utils.zone_for_client(ctx, src_client)
+ dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
+
+ log.info("source is %s", src_zone)
+ log.info("dest is %s", dest_zone)
+
+ testdir = teuthology.get_testdir(ctx)
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ # figure out which branch to pull from
+ branch = cconf.get('force-branch', None)
+ if not branch:
+ branch = cconf.get('branch', 'master')
+ sha1 = cconf.get('sha1')
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'git', 'clone',
+ '-b', branch,
+ 'https://github.com/ceph/radosgw-agent.git',
+ 'radosgw-agent.{client}'.format(client=client),
+ ]
+ )
+ if sha1 is not None:
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ run.Raw('&&'),
+ 'git', 'reset', '--hard', sha1,
+ ]
+ )
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'cd', 'radosgw-agent.{client}'.format(client=client),
+ run.Raw('&&'),
+ './bootstrap',
+ ]
+ )
+
+ src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
+ src_zone)
+ dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
+ dest_zone)
+ src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
+ src_zone)
+ dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
+ dest_zone)
+ sync_scope = cconf.get('sync-scope', None)
+ port = cconf.get('port', 8000)
+ daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
+ in_args=[
+ '{tdir}/daemon-helper'.format(tdir=testdir), 'kill',
+ '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
+ client=client),
+ '-v',
+ '--src-access-key', src_access,
+ '--src-secret-key', src_secret,
+ '--src-host', src_host,
+ '--src-port', str(src_port),
+ '--src-zone', src_zone,
+ '--dest-access-key', dest_access,
+ '--dest-secret-key', dest_secret,
+ '--dest-host', dest_host,
+ '--dest-port', str(dest_port),
+ '--dest-zone', dest_zone,
+ '--daemon-id', daemon_name,
+ '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
+ tdir=testdir,
+ client=client),
+ ]
+ # the test server and full/incremental flags are mutually exclusive
+ if sync_scope is None:
+ in_args.append('--test-server-host')
+ in_args.append('0.0.0.0')
+ in_args.append('--test-server-port')
+ in_args.append(str(port))
+ log.debug('Starting a sync test server on {client}'.format(client=client))
+ else:
+ in_args.append('--sync-scope')
+ in_args.append(sync_scope)
+ log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
+
+ return_list.append((client, remote.run(
+ args=in_args,
+ wait=False,
+ stdin=run.PIPE,
+ logger=log.getChild(daemon_name),
+ )))
+ return return_list
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run radosgw-agents in test mode.
+
+ Configuration is clients to run the agents on, with settings for
+ source client, destination client, and port to listen on. Binds
+ to 0.0.0.0. Port defaults to 8000. This must be run on clients
+ that have the correct zone root pools and rgw zone set in
+ ceph.conf, or the task cannot read the region information from the
+ cluster.
+
+ By default, this task will start an HTTP server that will trigger full
+ or incremental syncs based on requests made to it.
+ Alternatively, a single full sync can be triggered by
+ specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
+ by specifying 'sync-scope: incremental' (the loop will sleep
+ '--incremental-sync-delay' seconds between each sync, default is 20 seconds).
+
+ An example::
+
+ tasks:
+ - ceph:
+ conf:
+ client.0:
+ rgw zone = foo
+ rgw zone root pool = .root.pool
+ client.1:
+ rgw zone = bar
+ rgw zone root pool = .root.pool2
+ - rgw: # region configuration omitted for brevity
+ - radosgw-agent:
+ client.0:
+ branch: wip-next-feature-branch
+ src: client.0
+ dest: client.1
+ sync-scope: full
+ # port: 8000 (default)
+ client.1:
+ src: client.1
+ dest: client.0
+ port: 8001
+ """
+ assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
+ log.debug("config is %s", config)
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, but only if it exists in config since there isn't
+ # a sensible default action for this task
+ for client in config.iterkeys():
+ if config[client]:
+ log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
+ teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
+
+ ctx.radosgw_agent = argparse.Namespace()
+ ctx.radosgw_agent.config = config
+
+ procs = run_radosgw_agent(ctx, config)
+
+ ctx.radosgw_agent.procs = procs
+
+ try:
+ yield
+ finally:
+ testdir = teuthology.get_testdir(ctx)
+ try:
+ for client, proc in procs:
+ log.info("shutting down sync agent on %s", client)
+ proc.stdin.close()
+ proc.exitstatus.get()
+ finally:
+ for client, proc in procs:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm', '-rf',
+ '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
+ client=client)
+ ]
+ )
+++ /dev/null
-import logging
-import contextlib
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from ..orchestra import run
-from teuthology.task.ceph import CephState
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def run_rest_api_daemon(ctx, api_clients):
- if not hasattr(ctx, 'daemons'):
- ctx.daemons = CephState()
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('clients'):]
- run_cmd = [
- 'sudo',
- '{tdir}/daemon-helper'.format(tdir=testdir),
- 'kill',
- 'ceph-rest-api',
- '-n',
- 'client.rest{id}'.format(id=id_), ]
- cl_rest_id = 'client.rest{id}'.format(id=id_)
- ctx.daemons.add_daemon(rems, 'restapi',
- cl_rest_id,
- args=run_cmd,
- logger=log.getChild(cl_rest_id),
- stdin=run.PIPE,
- wait=False,
- )
- try:
- yield
-
- finally:
- """
- TO DO: destroy daemons started -- modify iter_daemons_of_role
- """
- teuthology.stop_daemons_of_type(ctx, 'restapi')
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Start up rest-api.
-
- To start on on all clients::
-
- tasks:
- - ceph:
- - rest-api:
-
- To only run on certain clients::
-
- tasks:
- - ceph:
- - rest-api: [client.0, client.3]
-
- or
-
- tasks:
- - ceph:
- - rest-api:
- client.0:
- client.3:
-
- The general flow of things here is:
- 1. Find clients on which rest-api is supposed to run (api_clients)
- 2. Generate keyring values
- 3. Start up ceph-rest-api daemons
- On cleanup:
- 4. Stop the daemons
- 5. Delete keyring value files.
- """
- api_clients = []
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- log.info(remotes)
- if config == None:
- for _, role_v in remotes.iteritems():
- for node in role_v:
- api_clients.append(node)
- else:
- for role_v in config:
- api_clients.append(role_v)
- log.info(api_clients)
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('clients'):]
- keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
- id=id_)
- rems.run(
- args=[
- 'sudo',
- '{tdir}/adjust-ulimits'.format(tdir=testdir),
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- '--name=client.rest{id}'.format(id=id_),
- '--set-uid=0',
- '--cap', 'mon', 'allow *',
- '--cap', 'osd', 'allow *',
- '--cap', 'mds', 'allow',
- keyring,
- run.Raw('&&'),
- 'sudo',
- 'chmod',
- '0644',
- keyring,
- ],
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- "echo",
- '[client.rest{id}]'.format(id=id_),
- run.Raw('>>'),
- "/etc/ceph/ceph.conf",
- run.Raw("'")
- ]
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- 'echo',
- 'restapi',
- 'keyring',
- '=',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- run.Raw('>>'),
- '/etc/ceph/ceph.conf',
- run.Raw("'"),
- ]
- )
- rems.run(
- args=[
- 'ceph',
- 'auth',
- 'import',
- '-i',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- ]
- )
- with contextutil.nested(
- lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
- yield
-
--- /dev/null
+import logging
+import contextlib
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from ..orchestra import run
+from teuthology.task.ceph import CephState
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def run_rest_api_daemon(ctx, api_clients):
+ if not hasattr(ctx, 'daemons'):
+ ctx.daemons = CephState()
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ testdir = teuthology.get_testdir(ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('clients'):]
+ run_cmd = [
+ 'sudo',
+ '{tdir}/daemon-helper'.format(tdir=testdir),
+ 'kill',
+ 'ceph-rest-api',
+ '-n',
+ 'client.rest{id}'.format(id=id_), ]
+ cl_rest_id = 'client.rest{id}'.format(id=id_)
+ ctx.daemons.add_daemon(rems, 'restapi',
+ cl_rest_id,
+ args=run_cmd,
+ logger=log.getChild(cl_rest_id),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ try:
+ yield
+
+ finally:
+ """
+ TO DO: destroy daemons started -- modify iter_daemons_of_role
+ """
+ teuthology.stop_daemons_of_type(ctx, 'restapi')
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Start up rest-api.
+
+ To start on on all clients::
+
+ tasks:
+ - ceph:
+ - rest-api:
+
+ To only run on certain clients::
+
+ tasks:
+ - ceph:
+ - rest-api: [client.0, client.3]
+
+ or
+
+ tasks:
+ - ceph:
+ - rest-api:
+ client.0:
+ client.3:
+
+ The general flow of things here is:
+ 1. Find clients on which rest-api is supposed to run (api_clients)
+ 2. Generate keyring values
+ 3. Start up ceph-rest-api daemons
+ On cleanup:
+ 4. Stop the daemons
+ 5. Delete keyring value files.
+ """
+ api_clients = []
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ log.info(remotes)
+ if config == None:
+ for _, role_v in remotes.iteritems():
+ for node in role_v:
+ api_clients.append(node)
+ else:
+ for role_v in config:
+ api_clients.append(role_v)
+ log.info(api_clients)
+ testdir = teuthology.get_testdir(ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('clients'):]
+ keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
+ id=id_)
+ rems.run(
+ args=[
+ 'sudo',
+ '{tdir}/adjust-ulimits'.format(tdir=testdir),
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ '--name=client.rest{id}'.format(id=id_),
+ '--set-uid=0',
+ '--cap', 'mon', 'allow *',
+ '--cap', 'osd', 'allow *',
+ '--cap', 'mds', 'allow',
+ keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'chmod',
+ '0644',
+ keyring,
+ ],
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ "echo",
+ '[client.rest{id}]'.format(id=id_),
+ run.Raw('>>'),
+ "/etc/ceph/ceph.conf",
+ run.Raw("'")
+ ]
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ 'echo',
+ 'restapi',
+ 'keyring',
+ '=',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ run.Raw('>>'),
+ '/etc/ceph/ceph.conf',
+ run.Raw("'"),
+ ]
+ )
+ rems.run(
+ args=[
+ 'ceph',
+ 'auth',
+ 'import',
+ '-i',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ ]
+ )
+ with contextutil.nested(
+ lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
+ yield
+
+++ /dev/null
-from cStringIO import StringIO
-from configobj import ConfigObj
-import contextlib
-import logging
-import s3tests
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download(ctx, config):
- return s3tests.do_download(ctx, config)
-
-def _config_user(s3tests_conf, section, user):
- return s3tests._config_user(s3tests_conf, section, user)
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- return s3tests.do_create_users(ctx, config)
-
-@contextlib.contextmanager
-def configure(ctx, config):
- return s3tests.do_configure(ctx, config)
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- client_config['extra_args'] = [
- 's3tests.functional.test_s3:test_bucket_list_return_data',
- ]
-
-# args = [
-# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
-# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
-# '-w',
-# '{tdir}/s3-tests'.format(tdir=testdir),
-# '-v',
-# 's3tests.functional.test_s3:test_bucket_list_return_data',
-# ]
-# if client_config is not None and 'extra_args' in client_config:
-# args.extend(client_config['extra_args'])
-#
-# ctx.cluster.only(client).run(
-# args=args,
-# )
-
- s3tests.do_run_tests(ctx, config)
-
- netcat_out = StringIO()
-
- for client, client_config in config.iteritems():
- ctx.cluster.only(client).run(
- args = [
- 'netcat',
- '-w', '5',
- '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
- ],
- stdout = netcat_out,
- )
-
- out = netcat_out.getvalue()
-
- assert len(out) > 100
-
- log.info('Received', out)
-
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run some s3-tests suite against rgw, verify opslog socket returns data
-
- Must restrict testing to a particular client::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests: [client.0]
-
- To pass extra arguments to nose (e.g. to run a certain test)::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests:
- client.0:
- extra_args: ['test_s3:test_object_acl_grand_public_read']
- client.1:
- extra_args: ['--exclude', 'test_100_continue']
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, not the top level.
- for (client, cconf) in config.iteritems():
- teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
-
- log.debug('config is %s', config)
-
- s3tests_conf = {}
- for client in clients:
- s3tests_conf[client] = ConfigObj(
- indent_type='',
- infile={
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : 'no',
- },
- 'fixtures' : {},
- 's3 main' : {},
- 's3 alt' : {},
- }
- )
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=config),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- yield
--- /dev/null
+from cStringIO import StringIO
+from configobj import ConfigObj
+import contextlib
+import logging
+import s3tests
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download(ctx, config):
+ return s3tests.do_download(ctx, config)
+
+def _config_user(s3tests_conf, section, user):
+ return s3tests._config_user(s3tests_conf, section, user)
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ return s3tests.do_create_users(ctx, config)
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ return s3tests.do_configure(ctx, config)
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ client_config['extra_args'] = [
+ 's3tests.functional.test_s3:test_bucket_list_return_data',
+ ]
+
+# args = [
+# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+# '-w',
+# '{tdir}/s3-tests'.format(tdir=testdir),
+# '-v',
+# 's3tests.functional.test_s3:test_bucket_list_return_data',
+# ]
+# if client_config is not None and 'extra_args' in client_config:
+# args.extend(client_config['extra_args'])
+#
+# ctx.cluster.only(client).run(
+# args=args,
+# )
+
+ s3tests.do_run_tests(ctx, config)
+
+ netcat_out = StringIO()
+
+ for client, client_config in config.iteritems():
+ ctx.cluster.only(client).run(
+ args = [
+ 'netcat',
+ '-w', '5',
+ '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
+ ],
+ stdout = netcat_out,
+ )
+
+ out = netcat_out.getvalue()
+
+ assert len(out) > 100
+
+ log.info('Received', out)
+
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run some s3-tests suite against rgw, verify opslog socket returns data
+
+ Must restrict testing to a particular client::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests: [client.0]
+
+ To pass extra arguments to nose (e.g. to run a certain test)::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests:
+ client.0:
+ extra_args: ['test_s3:test_object_acl_grand_public_read']
+ client.1:
+ extra_args: ['--exclude', 'test_100_continue']
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, not the top level.
+ for (client, cconf) in config.iteritems():
+ teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
+
+ log.debug('config is %s', config)
+
+ s3tests_conf = {}
+ for client in clients:
+ s3tests_conf[client] = ConfigObj(
+ indent_type='',
+ infile={
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : 'no',
+ },
+ 'fixtures' : {},
+ 's3 main' : {},
+ 's3 alt' : {},
+ }
+ )
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=config),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ yield