subtask = 'task'
if '.' in taskname:
(submod, subtask) = taskname.rsplit('.', 1)
+
+ # Teuthology configs may refer to modules like ceph_deploy as ceph-deploy
+ submod = submod.replace('-', '_')
+
parent = __import__('teuthology.task', globals(), locals(), [submod], 0)
try:
mod = getattr(parent, submod)
+++ /dev/null
-"""
-Execute ceph-deploy as a task
-"""
-from cStringIO import StringIO
-
-import contextlib
-import os
-import time
-import logging
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from ..config import config as teuth_config
-import ceph as ceph_fn
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download_ceph_deploy(ctx, config):
- """
- Downloads ceph-deploy from the ceph.com git mirror and (by default)
- switches to the master branch. If the `ceph-deploy-branch` is specified, it
- will use that instead.
- """
- log.info('Downloading ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- default_cd_branch = {'ceph-deploy-branch': 'master'}
- ceph_deploy_branch = config.get(
- 'ceph-deploy',
- default_cd_branch).get('ceph-deploy-branch')
-
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'git', 'clone', '-b', ceph_deploy_branch,
- teuth_config.ceph_git_base_url + 'ceph-deploy.git',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- './bootstrap',
- ],
- )
-
- try:
- yield
- finally:
- log.info('Removing ceph-deploy ...')
- ctx.cluster.only(ceph_admin).run(
- args=[
- 'rm',
- '-rf',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- ],
- )
-
-
-def is_healthy(ctx, config):
- """Wait until a Ceph cluster is healthy."""
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
- max_tries = 90 # 90 tries * 10 secs --> 15 minutes
- tries = 0
- while True:
- tries += 1
- if tries >= max_tries:
- msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
- raise RuntimeError(msg)
-
- r = remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo', 'ceph',
- 'health',
- ],
- stdout=StringIO(),
- logger=log.getChild('health'),
- )
- out = r.stdout.getvalue()
- log.debug('Ceph health: %s', out.rstrip('\n'))
- if out.split(None, 1)[0] == 'HEALTH_OK':
- break
- time.sleep(10)
-
-def get_nodes_using_roles(ctx, config, role):
- """Extract the names of nodes that match a given role from a cluster"""
- newl = []
- for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, role):
- rem = _remote
- if role == 'mon':
- req1 = str(rem).split('@')[-1]
- else:
- req = str(rem).split('.')[0]
- req1 = str(req).split('@')[1]
- newl.append(req1)
- return newl
-
-def get_dev_for_osd(ctx, config):
- """Get a list of all osd device names."""
- osd_devs = []
- for remote, roles_for_host in ctx.cluster.remotes.iteritems():
- host = remote.name.split('@')[-1]
- shortname = host.split('.')[0]
- devs = teuthology.get_scratch_devices(remote)
- num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
- num_osds = len(num_osd_per_host)
- assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
- for dev in devs[:num_osds]:
- dev_short = dev.split('/')[-1]
- osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
- return osd_devs
-
-def get_all_nodes(ctx, config):
- """Return a string of node names separated by blanks"""
- nodelist = []
- for t, k in ctx.config['targets'].iteritems():
- host = t.split('@')[-1]
- simple_host = host.split('.')[0]
- nodelist.append(simple_host)
- nodelist = " ".join(nodelist)
- return nodelist
-
-def execute_ceph_deploy(ctx, config, cmd):
- """Remotely execute a ceph_deploy command"""
- testdir = teuthology.get_testdir(ctx)
- ceph_admin = teuthology.get_first_mon(ctx, config)
- exec_cmd = cmd
- (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
- proc = remote.run(
- args = [
- 'cd',
- '{tdir}/ceph-deploy'.format(tdir=testdir),
- run.Raw('&&'),
- run.Raw(exec_cmd),
- ],
- check_status=False,
- )
- exitstatus = proc.exitstatus
- return exitstatus
-
-
-@contextlib.contextmanager
-def build_ceph_cluster(ctx, config):
- """Build a ceph cluster"""
-
- try:
- log.info('Building ceph cluster using ceph-deploy...')
- testdir = teuthology.get_testdir(ctx)
- ceph_branch = None
- if config.get('branch') is not None:
- cbranch = config.get('branch')
- for var, val in cbranch.iteritems():
- if var == 'testing':
- ceph_branch = '--{var}'.format(var=var)
- ceph_branch = '--{var}={val}'.format(var=var, val=val)
- node_dev_list = []
- all_nodes = get_all_nodes(ctx, config)
- mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
- mds_nodes = " ".join(mds_nodes)
- mon_node = get_nodes_using_roles(ctx, config, 'mon')
- mon_nodes = " ".join(mon_node)
- new_mon = './ceph-deploy new'+" "+mon_nodes
- install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
- mon_hostname = mon_nodes.split(' ')[0]
- mon_hostname = str(mon_hostname)
- gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
- deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
- no_of_osds = 0
-
- if mon_nodes is None:
- raise RuntimeError("no monitor nodes in the config file")
-
- estatus_new = execute_ceph_deploy(ctx, config, new_mon)
- if estatus_new != 0:
- raise RuntimeError("ceph-deploy: new command failed")
-
- log.info('adding config inputs...')
- testdir = teuthology.get_testdir(ctx)
- conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
- first_mon = teuthology.get_first_mon(ctx, config)
- (remote,) = ctx.cluster.only(first_mon).remotes.keys()
-
- lines = None
- if config.get('conf') is not None:
- confp = config.get('conf')
- for section, keys in confp.iteritems():
- lines = '[{section}]\n'.format(section=section)
- teuthology.append_lines_to_file(remote, conf_path, lines,
- sudo=True)
- for key, value in keys.iteritems():
- log.info("[%s] %s = %s" % (section, key, value))
- lines = '{key} = {value}\n'.format(key=key, value=value)
- teuthology.append_lines_to_file(remote, conf_path, lines,
- sudo=True)
-
- estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
- if estatus_install != 0:
- raise RuntimeError("ceph-deploy: Failed to install ceph")
-
- mon_no = None
- mon_no = config.get('mon_initial_members')
- if mon_no is not None:
- i = 0
- mon1 = []
- while(i < mon_no):
- mon1.append(mon_node[i])
- i = i + 1
- initial_mons = " ".join(mon1)
- for k in range(mon_no, len(mon_node)):
- mon_create_nodes = './ceph-deploy mon create' + " " + \
- initial_mons + " " + mon_node[k]
- estatus_mon = execute_ceph_deploy(ctx, config,
- mon_create_nodes)
- if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitor")
- else:
- mon_create_nodes = './ceph-deploy mon create-initial'
- estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
- if estatus_mon != 0:
- raise RuntimeError("ceph-deploy: Failed to create monitors")
-
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- max_gather_tries = 90
- gather_tries = 0
- while (estatus_gather != 0):
- gather_tries += 1
- if gather_tries >= max_gather_tries:
- msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
- raise RuntimeError(msg)
- estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
- time.sleep(10)
-
- if mds_nodes:
- estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
- if estatus_mds != 0:
- raise RuntimeError("ceph-deploy: Failed to deploy mds")
-
- if config.get('test_mon_destroy') is not None:
- for d in range(1, len(mon_node)):
- mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
- estatus_mon_d = execute_ceph_deploy(ctx, config,
- mon_destroy_nodes)
- if estatus_mon_d != 0:
- raise RuntimeError("ceph-deploy: Failed to delete monitor")
-
- node_dev_list = get_dev_for_osd(ctx, config)
- for d in node_dev_list:
- osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- zap_disk = './ceph-deploy disk zap'+" "+d
- execute_ceph_deploy(ctx, config, zap_disk)
- estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
- if estatus_osd == 0:
- log.info('successfully created osd')
- no_of_osds += 1
- else:
- raise RuntimeError("ceph-deploy: Failed to create osds")
-
- if config.get('wait-for-healthy', True) and no_of_osds >= 2:
- is_healthy(ctx=ctx, config=None)
-
- log.info('Setting up client nodes...')
- conf_path = '/etc/ceph/ceph.conf'
- admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
- first_mon = teuthology.get_first_mon(ctx, config)
- (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
- conf_data = teuthology.get_file(
- remote=mon0_remote,
- path=conf_path,
- sudo=True,
- )
- admin_keyring = teuthology.get_file(
- remote=mon0_remote,
- path=admin_keyring_path,
- sudo=True,
- )
-
- clients = ctx.cluster.only(teuthology.is_type('client'))
- for remot, roles_for_host in clients.remotes.iteritems():
- for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
- client_keyring = \
- '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
- mon0_remote.run(
- args=[
- 'cd',
- '{tdir}'.format(tdir=testdir),
- run.Raw('&&'),
- 'sudo', 'bash', '-c',
- run.Raw('"'), 'ceph',
- 'auth',
- 'get-or-create',
- 'client.{id}'.format(id=id_),
- 'mds', 'allow',
- 'mon', 'allow *',
- 'osd', 'allow *',
- run.Raw('>'),
- client_keyring,
- run.Raw('"'),
- ],
- )
- key_data = teuthology.get_file(
- remote=mon0_remote,
- path=client_keyring,
- sudo=True,
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=client_keyring,
- data=key_data,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=admin_keyring_path,
- data=admin_keyring,
- perms='0644'
- )
- teuthology.sudo_write_file(
- remote=remot,
- path=conf_path,
- data=conf_data,
- perms='0644'
- )
- else:
- raise RuntimeError(
- "The cluster is NOT operational due to insufficient OSDs")
- yield
-
- finally:
- log.info('Stopping ceph...')
- ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
- 'sudo', 'service', 'ceph', 'stop' ])
-
- if ctx.archive is not None:
- # archive mon data, too
- log.info('Archiving mon data...')
- path = os.path.join(ctx.archive, 'data')
- os.makedirs(path)
- mons = ctx.cluster.only(teuthology.is_type('mon'))
- for remote, roles in mons.remotes.iteritems():
- for role in roles:
- if role.startswith('mon.'):
- teuthology.pull_directory_tarball(
- remote,
- '/var/lib/ceph/mon',
- path + '/' + role + '.tgz')
-
- log.info('Compressing logs...')
- run.wait(
- ctx.cluster.run(
- args=[
- 'sudo',
- 'find',
- '/var/log/ceph',
- '-name',
- '*.log',
- '-print0',
- run.Raw('|'),
- 'sudo',
- 'xargs',
- '-0',
- '--no-run-if-empty',
- '--',
- 'gzip',
- '--',
- ],
- wait=False,
- ),
- )
-
- log.info('Archiving logs...')
- path = os.path.join(ctx.archive, 'remote')
- os.makedirs(path)
- for remote in ctx.cluster.remotes.iterkeys():
- sub = os.path.join(path, remote.shortname)
- os.makedirs(sub)
- teuthology.pull_directory(remote, '/var/log/ceph',
- os.path.join(sub, 'log'))
-
- # Prevent these from being undefined if the try block fails
- all_nodes = get_all_nodes(ctx, config)
- purge_nodes = './ceph-deploy purge'+" "+all_nodes
- purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
-
- log.info('Purging package...')
- execute_ceph_deploy(ctx, config, purge_nodes)
- log.info('Purging data...')
- execute_ceph_deploy(ctx, config, purgedata_nodes)
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Set up and tear down a Ceph cluster.
-
- For example::
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- stable: bobtail
- mon_initial_members: 1
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- dev: master
- conf:
- mon:
- debug mon = 20
-
- tasks:
- - install:
- extras: yes
- - ssh_keys:
- - ceph-deploy:
- branch:
- testing:
- """
- if config is None:
- config = {}
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- assert isinstance(config, dict), \
- "task ceph-deploy only supports a dictionary for configuration"
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
-
- if config.get('branch') is not None:
- assert isinstance(config['branch'], dict), 'branch must be a dictionary'
-
- with contextutil.nested(
- lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
- lambda: download_ceph_deploy(ctx=ctx, config=config),
- lambda: build_ceph_cluster(ctx=ctx, config=dict(
- conf=config.get('conf', {}),
- branch=config.get('branch',{}),
- mon_initial_members=config.get('mon_initial_members', None),
- test_mon_destroy=config.get('test_mon_destroy', None),
- )),
- ):
- yield
+++ /dev/null
-"""
-Ceph FUSE client task
-"""
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a ``ceph-fuse`` client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on. This lets you e.g. set up one client with
- ``ceph-fuse`` and another with ``kclient``.
-
- Example that mounts all clients::
-
- tasks:
- - ceph:
- - ceph-fuse:
- - interactive:
-
- Example that uses both ``kclient` and ``ceph-fuse``::
-
- tasks:
- - ceph:
- - ceph-fuse: [client.0]
- - kclient: [client.1]
- - interactive:
-
- Example that enables valgrind:
-
- tasks:
- - ceph:
- - ceph-fuse:
- client.0:
- valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
- - interactive:
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Mounting ceph-fuse clients...')
- fuse_daemons = {}
-
- testdir = teuthology.get_testdir(ctx)
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- overrides = ctx.config.get('overrides', {})
- teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))
-
- clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
-
- for id_, remote in clients:
- client_config = config.get("client.%s" % id_)
- if client_config is None:
- client_config = {}
- log.info("Client client.%s config is %s" % (id_, client_config))
-
- daemon_signal = 'kill'
- if client_config.get('coverage') or client_config.get('valgrind') is not None:
- daemon_signal = 'term'
-
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- run_cmd=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'daemon-helper',
- daemon_signal,
- ]
- run_cmd_tail=[
- 'ceph-fuse',
- '-f',
- '--name', 'client.{id}'.format(id=id_),
- # TODO ceph-fuse doesn't understand dash dash '--',
- mnt,
- ]
-
- if client_config.get('valgrind') is not None:
- run_cmd = teuthology.get_valgrind_args(
- testdir,
- 'client.{id}'.format(id=id_),
- run_cmd,
- client_config.get('valgrind'),
- )
-
- run_cmd.extend(run_cmd_tail)
-
- proc = remote.run(
- args=run_cmd,
- logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
- stdin=run.PIPE,
- wait=False,
- )
- fuse_daemons[id_] = proc
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- teuthology.wait_until_fuse_mounted(
- remote=remote,
- fuse=fuse_daemons[id_],
- mountpoint=mnt,
- )
- remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
-
- try:
- yield
- finally:
- log.info('Unmounting ceph-fuse clients...')
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- try:
- remote.run(
- args=[
- 'sudo',
- 'fusermount',
- '-u',
- mnt,
- ],
- )
- except run.CommandFailedError:
- log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
- # abort the fuse mount, killing all hung processes
- remote.run(
- args=[
- 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
- run.Raw(';'), 'then',
- 'echo',
- '1',
- run.Raw('>'),
- run.Raw('/sys/fs/fuse/connections/*/abort'),
- run.Raw(';'), 'fi',
- ],
- )
- # make sure its unmounted
- remote.run(
- args=[
- 'sudo',
- 'umount',
- '-l',
- '-f',
- mnt,
- ],
- )
-
- run.wait(fuse_daemons.itervalues())
-
- for id_, remote in clients:
- mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
- remote.run(
- args=[
- 'rmdir',
- '--',
- mnt,
- ],
- )
--- /dev/null
+"""
+Execute ceph-deploy as a task
+"""
+from cStringIO import StringIO
+
+import contextlib
+import os
+import time
+import logging
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from ..config import config as teuth_config
+import ceph as ceph_fn
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download_ceph_deploy(ctx, config):
+ """
+ Downloads ceph-deploy from the ceph.com git mirror and (by default)
+ switches to the master branch. If the `ceph-deploy-branch` is specified, it
+ will use that instead.
+ """
+ log.info('Downloading ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ default_cd_branch = {'ceph-deploy-branch': 'master'}
+ ceph_deploy_branch = config.get(
+ 'ceph-deploy',
+ default_cd_branch).get('ceph-deploy-branch')
+
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'git', 'clone', '-b', ceph_deploy_branch,
+ teuth_config.ceph_git_base_url + 'ceph-deploy.git',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ './bootstrap',
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Removing ceph-deploy ...')
+ ctx.cluster.only(ceph_admin).run(
+ args=[
+ 'rm',
+ '-rf',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ ],
+ )
+
+
+def is_healthy(ctx, config):
+ """Wait until a Ceph cluster is healthy."""
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.keys()
+ max_tries = 90 # 90 tries * 10 secs --> 15 minutes
+ tries = 0
+ while True:
+ tries += 1
+ if tries >= max_tries:
+ msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes"
+ raise RuntimeError(msg)
+
+ r = remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'ceph',
+ 'health',
+ ],
+ stdout=StringIO(),
+ logger=log.getChild('health'),
+ )
+ out = r.stdout.getvalue()
+ log.debug('Ceph health: %s', out.rstrip('\n'))
+ if out.split(None, 1)[0] == 'HEALTH_OK':
+ break
+ time.sleep(10)
+
+def get_nodes_using_roles(ctx, config, role):
+ """Extract the names of nodes that match a given role from a cluster"""
+ newl = []
+ for _remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, role):
+ rem = _remote
+ if role == 'mon':
+ req1 = str(rem).split('@')[-1]
+ else:
+ req = str(rem).split('.')[0]
+ req1 = str(req).split('@')[1]
+ newl.append(req1)
+ return newl
+
+def get_dev_for_osd(ctx, config):
+ """Get a list of all osd device names."""
+ osd_devs = []
+ for remote, roles_for_host in ctx.cluster.remotes.iteritems():
+ host = remote.name.split('@')[-1]
+ shortname = host.split('.')[0]
+ devs = teuthology.get_scratch_devices(remote)
+ num_osd_per_host = list(teuthology.roles_of_type(roles_for_host, 'osd'))
+ num_osds = len(num_osd_per_host)
+ assert num_osds <= len(devs), 'fewer disks than osds on ' + shortname
+ for dev in devs[:num_osds]:
+ dev_short = dev.split('/')[-1]
+ osd_devs.append('{host}:{dev}'.format(host=shortname, dev=dev_short))
+ return osd_devs
+
+def get_all_nodes(ctx, config):
+ """Return a string of node names separated by blanks"""
+ nodelist = []
+ for t, k in ctx.config['targets'].iteritems():
+ host = t.split('@')[-1]
+ simple_host = host.split('.')[0]
+ nodelist.append(simple_host)
+ nodelist = " ".join(nodelist)
+ return nodelist
+
+def execute_ceph_deploy(ctx, config, cmd):
+ """Remotely execute a ceph_deploy command"""
+ testdir = teuthology.get_testdir(ctx)
+ ceph_admin = teuthology.get_first_mon(ctx, config)
+ exec_cmd = cmd
+ (remote,) = ctx.cluster.only(ceph_admin).remotes.iterkeys()
+ proc = remote.run(
+ args = [
+ 'cd',
+ '{tdir}/ceph-deploy'.format(tdir=testdir),
+ run.Raw('&&'),
+ run.Raw(exec_cmd),
+ ],
+ check_status=False,
+ )
+ exitstatus = proc.exitstatus
+ return exitstatus
+
+
+@contextlib.contextmanager
+def build_ceph_cluster(ctx, config):
+ """Build a ceph cluster"""
+
+ try:
+ log.info('Building ceph cluster using ceph-deploy...')
+ testdir = teuthology.get_testdir(ctx)
+ ceph_branch = None
+ if config.get('branch') is not None:
+ cbranch = config.get('branch')
+ for var, val in cbranch.iteritems():
+ if var == 'testing':
+ ceph_branch = '--{var}'.format(var=var)
+ ceph_branch = '--{var}={val}'.format(var=var, val=val)
+ node_dev_list = []
+ all_nodes = get_all_nodes(ctx, config)
+ mds_nodes = get_nodes_using_roles(ctx, config, 'mds')
+ mds_nodes = " ".join(mds_nodes)
+ mon_node = get_nodes_using_roles(ctx, config, 'mon')
+ mon_nodes = " ".join(mon_node)
+ new_mon = './ceph-deploy new'+" "+mon_nodes
+ install_nodes = './ceph-deploy install '+ceph_branch+" "+all_nodes
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+ mon_hostname = mon_nodes.split(' ')[0]
+ mon_hostname = str(mon_hostname)
+ gather_keys = './ceph-deploy gatherkeys'+" "+mon_hostname
+ deploy_mds = './ceph-deploy mds create'+" "+mds_nodes
+ no_of_osds = 0
+
+ if mon_nodes is None:
+ raise RuntimeError("no monitor nodes in the config file")
+
+ estatus_new = execute_ceph_deploy(ctx, config, new_mon)
+ if estatus_new != 0:
+ raise RuntimeError("ceph-deploy: new command failed")
+
+ log.info('adding config inputs...')
+ testdir = teuthology.get_testdir(ctx)
+ conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir)
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (remote,) = ctx.cluster.only(first_mon).remotes.keys()
+
+ lines = None
+ if config.get('conf') is not None:
+ confp = config.get('conf')
+ for section, keys in confp.iteritems():
+ lines = '[{section}]\n'.format(section=section)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+ for key, value in keys.iteritems():
+ log.info("[%s] %s = %s" % (section, key, value))
+ lines = '{key} = {value}\n'.format(key=key, value=value)
+ teuthology.append_lines_to_file(remote, conf_path, lines,
+ sudo=True)
+
+ estatus_install = execute_ceph_deploy(ctx, config, install_nodes)
+ if estatus_install != 0:
+ raise RuntimeError("ceph-deploy: Failed to install ceph")
+
+ mon_no = None
+ mon_no = config.get('mon_initial_members')
+ if mon_no is not None:
+ i = 0
+ mon1 = []
+ while(i < mon_no):
+ mon1.append(mon_node[i])
+ i = i + 1
+ initial_mons = " ".join(mon1)
+ for k in range(mon_no, len(mon_node)):
+ mon_create_nodes = './ceph-deploy mon create' + " " + \
+ initial_mons + " " + mon_node[k]
+ estatus_mon = execute_ceph_deploy(ctx, config,
+ mon_create_nodes)
+ if estatus_mon != 0:
+ raise RuntimeError("ceph-deploy: Failed to create monitor")
+ else:
+ mon_create_nodes = './ceph-deploy mon create-initial'
+ estatus_mon = execute_ceph_deploy(ctx, config, mon_create_nodes)
+ if estatus_mon != 0:
+ raise RuntimeError("ceph-deploy: Failed to create monitors")
+
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ max_gather_tries = 90
+ gather_tries = 0
+ while (estatus_gather != 0):
+ gather_tries += 1
+ if gather_tries >= max_gather_tries:
+ msg = 'ceph-deploy was not able to gatherkeys after 15 minutes'
+ raise RuntimeError(msg)
+ estatus_gather = execute_ceph_deploy(ctx, config, gather_keys)
+ time.sleep(10)
+
+ if mds_nodes:
+ estatus_mds = execute_ceph_deploy(ctx, config, deploy_mds)
+ if estatus_mds != 0:
+ raise RuntimeError("ceph-deploy: Failed to deploy mds")
+
+ if config.get('test_mon_destroy') is not None:
+ for d in range(1, len(mon_node)):
+ mon_destroy_nodes = './ceph-deploy mon destroy'+" "+mon_node[d]
+ estatus_mon_d = execute_ceph_deploy(ctx, config,
+ mon_destroy_nodes)
+ if estatus_mon_d != 0:
+ raise RuntimeError("ceph-deploy: Failed to delete monitor")
+
+ node_dev_list = get_dev_for_osd(ctx, config)
+ for d in node_dev_list:
+ osd_create_cmds = './ceph-deploy osd create --zap-disk'+" "+d
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ zap_disk = './ceph-deploy disk zap'+" "+d
+ execute_ceph_deploy(ctx, config, zap_disk)
+ estatus_osd = execute_ceph_deploy(ctx, config, osd_create_cmds)
+ if estatus_osd == 0:
+ log.info('successfully created osd')
+ no_of_osds += 1
+ else:
+ raise RuntimeError("ceph-deploy: Failed to create osds")
+
+ if config.get('wait-for-healthy', True) and no_of_osds >= 2:
+ is_healthy(ctx=ctx, config=None)
+
+ log.info('Setting up client nodes...')
+ conf_path = '/etc/ceph/ceph.conf'
+ admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring'
+ first_mon = teuthology.get_first_mon(ctx, config)
+ (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys()
+ conf_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=conf_path,
+ sudo=True,
+ )
+ admin_keyring = teuthology.get_file(
+ remote=mon0_remote,
+ path=admin_keyring_path,
+ sudo=True,
+ )
+
+ clients = ctx.cluster.only(teuthology.is_type('client'))
+ for remot, roles_for_host in clients.remotes.iteritems():
+ for id_ in teuthology.roles_of_type(roles_for_host, 'client'):
+ client_keyring = \
+ '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_)
+ mon0_remote.run(
+ args=[
+ 'cd',
+ '{tdir}'.format(tdir=testdir),
+ run.Raw('&&'),
+ 'sudo', 'bash', '-c',
+ run.Raw('"'), 'ceph',
+ 'auth',
+ 'get-or-create',
+ 'client.{id}'.format(id=id_),
+ 'mds', 'allow',
+ 'mon', 'allow *',
+ 'osd', 'allow *',
+ run.Raw('>'),
+ client_keyring,
+ run.Raw('"'),
+ ],
+ )
+ key_data = teuthology.get_file(
+ remote=mon0_remote,
+ path=client_keyring,
+ sudo=True,
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=client_keyring,
+ data=key_data,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=admin_keyring_path,
+ data=admin_keyring,
+ perms='0644'
+ )
+ teuthology.sudo_write_file(
+ remote=remot,
+ path=conf_path,
+ data=conf_data,
+ perms='0644'
+ )
+ else:
+ raise RuntimeError(
+ "The cluster is NOT operational due to insufficient OSDs")
+ yield
+
+ finally:
+ log.info('Stopping ceph...')
+ ctx.cluster.run(args=['sudo', 'stop', 'ceph-all', run.Raw('||'),
+ 'sudo', 'service', 'ceph', 'stop' ])
+
+ if ctx.archive is not None:
+ # archive mon data, too
+ log.info('Archiving mon data...')
+ path = os.path.join(ctx.archive, 'data')
+ os.makedirs(path)
+ mons = ctx.cluster.only(teuthology.is_type('mon'))
+ for remote, roles in mons.remotes.iteritems():
+ for role in roles:
+ if role.startswith('mon.'):
+ teuthology.pull_directory_tarball(
+ remote,
+ '/var/lib/ceph/mon',
+ path + '/' + role + '.tgz')
+
+ log.info('Compressing logs...')
+ run.wait(
+ ctx.cluster.run(
+ args=[
+ 'sudo',
+ 'find',
+ '/var/log/ceph',
+ '-name',
+ '*.log',
+ '-print0',
+ run.Raw('|'),
+ 'sudo',
+ 'xargs',
+ '-0',
+ '--no-run-if-empty',
+ '--',
+ 'gzip',
+ '--',
+ ],
+ wait=False,
+ ),
+ )
+
+ log.info('Archiving logs...')
+ path = os.path.join(ctx.archive, 'remote')
+ os.makedirs(path)
+ for remote in ctx.cluster.remotes.iterkeys():
+ sub = os.path.join(path, remote.shortname)
+ os.makedirs(sub)
+ teuthology.pull_directory(remote, '/var/log/ceph',
+ os.path.join(sub, 'log'))
+
+ # Prevent these from being undefined if the try block fails
+ all_nodes = get_all_nodes(ctx, config)
+ purge_nodes = './ceph-deploy purge'+" "+all_nodes
+ purgedata_nodes = './ceph-deploy purgedata'+" "+all_nodes
+
+ log.info('Purging package...')
+ execute_ceph_deploy(ctx, config, purge_nodes)
+ log.info('Purging data...')
+ execute_ceph_deploy(ctx, config, purgedata_nodes)
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Set up and tear down a Ceph cluster.
+
+ For example::
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ stable: bobtail
+ mon_initial_members: 1
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ dev: master
+ conf:
+ mon:
+ debug mon = 20
+
+ tasks:
+ - install:
+ extras: yes
+ - ssh_keys:
+ - ceph-deploy:
+ branch:
+ testing:
+ """
+ if config is None:
+ config = {}
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ assert isinstance(config, dict), \
+ "task ceph-deploy only supports a dictionary for configuration"
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-deploy', {}))
+
+ if config.get('branch') is not None:
+ assert isinstance(config['branch'], dict), 'branch must be a dictionary'
+
+ with contextutil.nested(
+ lambda: ceph_fn.ship_utilities(ctx=ctx, config=None),
+ lambda: download_ceph_deploy(ctx=ctx, config=config),
+ lambda: build_ceph_cluster(ctx=ctx, config=dict(
+ conf=config.get('conf', {}),
+ branch=config.get('branch',{}),
+ mon_initial_members=config.get('mon_initial_members', None),
+ test_mon_destroy=config.get('test_mon_destroy', None),
+ )),
+ ):
+ yield
--- /dev/null
+"""
+Ceph FUSE client task
+"""
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a ``ceph-fuse`` client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on. This lets you e.g. set up one client with
+ ``ceph-fuse`` and another with ``kclient``.
+
+ Example that mounts all clients::
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - interactive:
+
+ Example that uses both ``kclient` and ``ceph-fuse``::
+
+ tasks:
+ - ceph:
+ - ceph-fuse: [client.0]
+ - kclient: [client.1]
+ - interactive:
+
+ Example that enables valgrind:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ client.0:
+ valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ - interactive:
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Mounting ceph-fuse clients...')
+ fuse_daemons = {}
+
+ testdir = teuthology.get_testdir(ctx)
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ overrides = ctx.config.get('overrides', {})
+ teuthology.deep_merge(config, overrides.get('ceph-fuse', {}))
+
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+ for id_, remote in clients:
+ client_config = config.get("client.%s" % id_)
+ if client_config is None:
+ client_config = {}
+ log.info("Client client.%s config is %s" % (id_, client_config))
+
+ daemon_signal = 'kill'
+ if client_config.get('coverage') or client_config.get('valgrind') is not None:
+ daemon_signal = 'term'
+
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ run_cmd=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'daemon-helper',
+ daemon_signal,
+ ]
+ run_cmd_tail=[
+ 'ceph-fuse',
+ '-f',
+ '--name', 'client.{id}'.format(id=id_),
+ # TODO ceph-fuse doesn't understand dash dash '--',
+ mnt,
+ ]
+
+ if client_config.get('valgrind') is not None:
+ run_cmd = teuthology.get_valgrind_args(
+ testdir,
+ 'client.{id}'.format(id=id_),
+ run_cmd,
+ client_config.get('valgrind'),
+ )
+
+ run_cmd.extend(run_cmd_tail)
+
+ proc = remote.run(
+ args=run_cmd,
+ logger=log.getChild('ceph-fuse.{id}'.format(id=id_)),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ fuse_daemons[id_] = proc
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ teuthology.wait_until_fuse_mounted(
+ remote=remote,
+ fuse=fuse_daemons[id_],
+ mountpoint=mnt,
+ )
+ remote.run(args=['sudo', 'chmod', '1777', '{tdir}/mnt.{id}'.format(tdir=testdir, id=id_)],)
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting ceph-fuse clients...')
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ try:
+ remote.run(
+ args=[
+ 'sudo',
+ 'fusermount',
+ '-u',
+ mnt,
+ ],
+ )
+ except run.CommandFailedError:
+ log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=remote.name))
+ # abort the fuse mount, killing all hung processes
+ remote.run(
+ args=[
+ 'if', 'test', '-e', '/sys/fs/fuse/connections/*/abort',
+ run.Raw(';'), 'then',
+ 'echo',
+ '1',
+ run.Raw('>'),
+ run.Raw('/sys/fs/fuse/connections/*/abort'),
+ run.Raw(';'), 'fi',
+ ],
+ )
+ # make sure its unmounted
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ '-l',
+ '-f',
+ mnt,
+ ],
+ )
+
+ run.wait(fuse_daemons.itervalues())
+
+ for id_, remote in clients:
+ mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_))
+ remote.run(
+ args=[
+ 'rmdir',
+ '--',
+ mnt,
+ ],
+ )
+++ /dev/null
-"""
-Mount cifs clients. Unmount when finished.
-"""
-import contextlib
-import logging
-import os
-
-from teuthology import misc as teuthology
-from ..orchestra import run
-
-log = logging.getLogger(__name__)
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Mount/unmount a cifs client.
-
- The config is optional and defaults to mounting on all clients. If
- a config is given, it is expected to be a list of clients to do
- this operation on.
-
- Example that starts smbd and mounts cifs on all nodes::
-
- tasks:
- - ceph:
- - samba:
- - cifs-mount:
- - interactive:
-
- Example that splits smbd and cifs:
-
- tasks:
- - ceph:
- - samba: [samba.0]
- - cifs-mount: [client.0]
- - ceph-fuse: [client.1]
- - interactive:
-
- Example that specifies the share name:
-
- tasks:
- - ceph:
- - ceph-fuse:
- - samba:
- samba.0:
- cephfuse: "{testdir}/mnt.0"
- - cifs-mount:
- client.0:
- share: cephfuse
-
- :param ctx: Context
- :param config: Configuration
- """
- log.info('Mounting cifs clients...')
-
- if config is None:
- config = dict(('client.{id}'.format(id=id_), None)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
- elif isinstance(config, list):
- config = dict((name, None) for name in config)
-
- clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
-
- from teuthology.task.samba import get_sambas
- samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
- sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
- (ip, _) = sambas[0][1].ssh.get_transport().getpeername()
- log.info('samba ip: {ip}'.format(ip=ip))
-
- for id_, remote in clients:
- mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
- log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
- id=id_, remote=remote,mnt=mnt))
-
- remote.run(
- args=[
- 'mkdir',
- '--',
- mnt,
- ],
- )
-
- rolestr = 'client.{id_}'.format(id_=id_)
- unc = "ceph"
- log.info("config: {c}".format(c=config))
- if config[rolestr] is not None and 'share' in config[rolestr]:
- unc = config[rolestr]['share']
-
- remote.run(
- args=[
- 'sudo',
- 'mount',
- '-t',
- 'cifs',
- '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
- '-o',
- 'username=ubuntu,password=ubuntu',
- mnt,
- ],
- )
-
- remote.run(
- args=[
- 'sudo',
- 'chown',
- 'ubuntu:ubuntu',
- '{m}/'.format(m=mnt),
- ],
- )
-
- try:
- yield
- finally:
- log.info('Unmounting cifs clients...')
- for id_, remote in clients:
- remote.run(
- args=[
- 'sudo',
- 'umount',
- mnt,
- ],
- )
- for id_, remote in clients:
- while True:
- try:
- remote.run(
- args=[
- 'rmdir', '--', mnt,
- run.Raw('2>&1'),
- run.Raw('|'),
- 'grep', 'Device or resource busy',
- ],
- )
- import time
- time.sleep(1)
- except Exception:
- break
--- /dev/null
+"""
+Mount cifs clients. Unmount when finished.
+"""
+import contextlib
+import logging
+import os
+
+from teuthology import misc as teuthology
+from ..orchestra import run
+
+log = logging.getLogger(__name__)
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Mount/unmount a cifs client.
+
+ The config is optional and defaults to mounting on all clients. If
+ a config is given, it is expected to be a list of clients to do
+ this operation on.
+
+ Example that starts smbd and mounts cifs on all nodes::
+
+ tasks:
+ - ceph:
+ - samba:
+ - cifs-mount:
+ - interactive:
+
+ Example that splits smbd and cifs:
+
+ tasks:
+ - ceph:
+ - samba: [samba.0]
+ - cifs-mount: [client.0]
+ - ceph-fuse: [client.1]
+ - interactive:
+
+ Example that specifies the share name:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - samba:
+ samba.0:
+ cephfuse: "{testdir}/mnt.0"
+ - cifs-mount:
+ client.0:
+ share: cephfuse
+
+ :param ctx: Context
+ :param config: Configuration
+ """
+ log.info('Mounting cifs clients...')
+
+ if config is None:
+ config = dict(('client.{id}'.format(id=id_), None)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client'))
+ elif isinstance(config, list):
+ config = dict((name, None) for name in config)
+
+ clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys()))
+
+ from teuthology.task.samba import get_sambas
+ samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')]
+ sambas = list(get_sambas(ctx=ctx, roles=samba_roles))
+ (ip, _) = sambas[0][1].ssh.get_transport().getpeername()
+ log.info('samba ip: {ip}'.format(ip=ip))
+
+ for id_, remote in clients:
+ mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_))
+ log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format(
+ id=id_, remote=remote,mnt=mnt))
+
+ remote.run(
+ args=[
+ 'mkdir',
+ '--',
+ mnt,
+ ],
+ )
+
+ rolestr = 'client.{id_}'.format(id_=id_)
+ unc = "ceph"
+ log.info("config: {c}".format(c=config))
+ if config[rolestr] is not None and 'share' in config[rolestr]:
+ unc = config[rolestr]['share']
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'mount',
+ '-t',
+ 'cifs',
+ '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc),
+ '-o',
+ 'username=ubuntu,password=ubuntu',
+ mnt,
+ ],
+ )
+
+ remote.run(
+ args=[
+ 'sudo',
+ 'chown',
+ 'ubuntu:ubuntu',
+ '{m}/'.format(m=mnt),
+ ],
+ )
+
+ try:
+ yield
+ finally:
+ log.info('Unmounting cifs clients...')
+ for id_, remote in clients:
+ remote.run(
+ args=[
+ 'sudo',
+ 'umount',
+ mnt,
+ ],
+ )
+ for id_, remote in clients:
+ while True:
+ try:
+ remote.run(
+ args=[
+ 'rmdir', '--', mnt,
+ run.Raw('2>&1'),
+ run.Raw('|'),
+ 'grep', 'Device or resource busy',
+ ],
+ )
+ import time
+ time.sleep(1)
+ except Exception:
+ break
+++ /dev/null
-"""
-Run a series of rgw admin commands through the rest interface.
-
-The test cases in this file have been annotated for inventory.
-To extract the inventory (in csv format) use the command:
-
- grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-
-"""
-from cStringIO import StringIO
-import logging
-import json
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import requests
-import time
-
-from boto.connection import AWSAuthConnection
-from teuthology import misc as teuthology
-
-log = logging.getLogger(__name__)
-
-def successful_ops(out):
- """
- Extract successful operations
- :param out: list
- """
- summary = out['summary']
- if len(summary) == 0:
- return 0
- entry = summary[0]
- return entry['total']['successful_ops']
-
-def rgwadmin(ctx, client, cmd):
- """
- Perform rgw admin command
-
- :param client: client
- :param cmd: command to execute.
- :return: command exit status, json result.
- """
- log.info('radosgw-admin: %s' % cmd)
- testdir = teuthology.get_testdir(ctx)
- pre = [
- 'adjust-ulimits',
- 'ceph-coverage',
- '{tdir}/archive/coverage'.format(tdir=testdir),
- 'radosgw-admin',
- '--log-to-stderr',
- '--format', 'json',
- ]
- pre.extend(cmd)
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- proc = remote.run(
- args=pre,
- check_status=False,
- stdout=StringIO(),
- stderr=StringIO(),
- )
- r = proc.exitstatus
- out = proc.stdout.getvalue()
- j = None
- if not r and out != '':
- try:
- j = json.loads(out)
- log.info(' json result: %s' % j)
- except ValueError:
- j = out
- log.info(' raw result: %s' % j)
- return (r, j)
-
-
-def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
- """
- perform a rest command
- """
- log.info('radosgw-admin-rest: %s %s' % (cmd, params))
- put_cmds = ['create', 'link', 'add']
- post_cmds = ['unlink', 'modify']
- delete_cmds = ['trim', 'rm', 'process']
- get_cmds = ['check', 'info', 'show', 'list']
-
- bucket_sub_resources = ['object', 'policy', 'index']
- user_sub_resources = ['subuser', 'key', 'caps']
- zone_sub_resources = ['pool', 'log', 'garbage']
-
- def get_cmd_method_and_handler(cmd):
- """
- Get the rest command and handler from information in cmd and
- from the imported requests object.
- """
- if cmd[1] in put_cmds:
- return 'PUT', requests.put
- elif cmd[1] in delete_cmds:
- return 'DELETE', requests.delete
- elif cmd[1] in post_cmds:
- return 'POST', requests.post
- elif cmd[1] in get_cmds:
- return 'GET', requests.get
-
- def get_resource(cmd):
- """
- Get the name of the resource from information in cmd.
- """
- if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
- if cmd[0] == 'bucket':
- return 'bucket', ''
- else:
- return 'bucket', cmd[0]
- elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
- if cmd[0] == 'user':
- return 'user', ''
- else:
- return 'user', cmd[0]
- elif cmd[0] == 'usage':
- return 'usage', ''
- elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
- if cmd[0] == 'zone':
- return 'zone', ''
- else:
- return 'zone', cmd[0]
-
- def build_admin_request(conn, method, resource = '', headers=None, data='',
- query_args=None, params=None):
- """
- Build an administative request adapted from the build_request()
- method of boto.connection
- """
-
- path = conn.calling_format.build_path_base('admin', resource)
- auth_path = conn.calling_format.build_auth_path('admin', resource)
- host = conn.calling_format.build_host(conn.server_name(), 'admin')
- if query_args:
- path += '?' + query_args
- boto.log.debug('path=%s' % path)
- auth_path += '?' + query_args
- boto.log.debug('auth_path=%s' % auth_path)
- return AWSAuthConnection.build_base_http_request(conn, method, path,
- auth_path, params, headers, data, host)
-
- method, handler = get_cmd_method_and_handler(cmd)
- resource, query_args = get_resource(cmd)
- request = build_admin_request(connection, method, resource,
- query_args=query_args, headers=headers)
-
- url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
- host=request.host, path=request.path)
-
- request.authorize(connection=connection)
- result = handler(url, params=params, headers=request.headers)
-
- if raw:
- log.info(' text result: %s' % result.txt)
- return result.status_code, result.txt
- else:
- log.info(' json result: %s' % result.json())
- return result.status_code, result.json()
-
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality through the RESTful interface
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- # just use the first client...
- client = clients[0]
-
- ##
- admin_user = 'ada'
- admin_display_name = 'Ms. Admin User'
- admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
- admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
- admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
-
- user1 = 'foo'
- user2 = 'fud'
- subuser1 = 'foo:foo1'
- subuser2 = 'foo:foo2'
- display_name1 = 'Foo'
- display_name2 = 'Fud'
- email = 'foo@foo.com'
- access_key = '9te6NH5mcdcq0Tc5i8i1'
- secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2 = 'p5YnriCv1nAtykxBrupQ'
- secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name = 'myfoo'
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', admin_user,
- '--display-name', admin_display_name,
- '--access-key', admin_access_key,
- '--secret', admin_secret_key,
- '--max-buckets', '0',
- '--caps', admin_caps
- ])
- logging.error(out)
- logging.error(err)
- assert not err
-
- (remote,) = ctx.cluster.only(client).remotes.iterkeys()
- remote_host = remote.name.split('@')[1]
- admin_conn = boto.s3.connection.S3Connection(
- aws_access_key_id=admin_access_key,
- aws_secret_access_key=admin_secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
- assert ret == 404
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user1,
- 'display-name' : display_name1,
- 'email' : email,
- 'access-key' : access_key,
- 'secret-key' : secret_key,
- 'max-buckets' : '4'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert not err
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'create'],
- {'uid' : user1,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2
- })
-
-
- assert ret == 200
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'uid' : user1,
- 'access-key' : access_key2
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
-
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser1,
- 'secret-key' : swift_secret1,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'create'],
- {'subuser' : subuser2,
- 'secret-key' : swift_secret2,
- 'key-type' : 'swift'
- })
-
- assert ret == 200
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['key', 'rm'],
- {'subuser' : subuser1,
- 'key-type' :'swift'
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser1
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['subuser', 'rm'],
- {'subuser' : subuser2,
- 'key-type' : 'swift',
- '{purge-keys' :True
- })
-
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 0
-
- # connect to rgw
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=7280,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
- assert ret == 200
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (ret, out) = rgwadmin_rest(admin_conn,
- ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
-
- assert ret == 200
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
- assert ret == 200
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # create a second user to link the bucket to
- (ret, out) = rgwadmin_rest(admin_conn,
- ['user', 'create'],
- {'uid' : user2,
- 'display-name' : display_name2,
- 'access-key' : access_key2,
- 'secret-key' : secret_key2,
- 'max-buckets' : '1',
- })
-
- assert ret == 200
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
-
- assert ret == 200
-
- # try creating an object with the first user which should cause an error
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('three')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert denied
-
- # relink the bucket to the first user and delete the second user
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
- assert ret == 200
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
- assert ret == 200
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
- assert ret == 200
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
- assert ret == 200
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # create a bucket for deletion stats
- useless_bucket = connection.create_bucket('useless_bucket')
- useless_key = useless_bucket.new_key('useless_key')
- useless_key.set_contents_from_string('useless string')
-
- # delete it
- useless_key.delete()
- useless_bucket.delete()
-
- # wait for the statistics to flush
- time.sleep(60)
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
-
- if successful_ops(out) > 0:
- break
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
- assert ret == 200
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
- assert ret == 200
- (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
- assert ret == 200
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
- assert ret == 200
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
- assert ret == 200
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # delete should fail because ``key`` still exists
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
- assert ret == 200
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
- assert ret == 200
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps = 'usage=read'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert out[0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
- assert ret == 200
- assert not out
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
- assert ret == 409
-
- # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
- assert ret == 200
-
- # TESTCASE 'rm-user3','user','info','deleted user','fails'
- (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
- assert ret == 404
-
+++ /dev/null
-"""
-Rgw admin testing against a running instance
-"""
-# The test cases in this file have been annotated for inventory.
-# To extract the inventory (in csv format) use the command:
-#
-# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
-#
-
-import copy
-import json
-import logging
-import time
-
-from cStringIO import StringIO
-
-import boto.exception
-import boto.s3.connection
-import boto.s3.acl
-
-import teuthology.task_util.rgw as rgw_utils
-
-from teuthology import misc as teuthology
-from teuthology.task_util.rgw import rgwadmin
-
-log = logging.getLogger(__name__)
-
-
-def successful_ops(out):
- """Extract total from the first summary entry (presumed to be only one)"""
- summary = out['summary']
- if len(summary) == 0:
- return 0
- entry = summary[0]
- return entry['total']['successful_ops']
-
-
-def task(ctx, config):
- """
- Test radosgw-admin functionality against a running rgw instance.
- """
- global log
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- multi_region_run = rgw_utils.multi_region_enabled(ctx)
-
- client = clients[0]; # default choice, multi-region code may overwrite this
- if multi_region_run:
- client = rgw_utils.get_master_client(ctx, clients)
-
- # once the client is chosen, pull the host name and assigned port out of
- # the role_endpoints that were assigned by the rgw task
- (remote_host, remote_port) = ctx.rgw.role_endpoints[client]
-
- ##
- user1='foo'
- user2='fud'
- subuser1='foo:foo1'
- subuser2='foo:foo2'
- display_name1='Foo'
- display_name2='Fud'
- email='foo@foo.com'
- email2='bar@bar.com'
- access_key='9te6NH5mcdcq0Tc5i8i1'
- secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
- access_key2='p5YnriCv1nAtykxBrupQ'
- secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
- swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
- swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
-
- bucket_name='myfoo'
- bucket_name2='mybar'
-
- # connect to rgw
- connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=remote_port,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- connection2 = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key2,
- aws_secret_access_key=secret_key2,
- is_secure=False,
- port=remote_port,
- host=remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # legend (test cases can be easily grep-ed out)
- # TESTCASE 'testname','object','method','operation','assertion'
- # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ],
- check_status=True)
-
- # TESTCASE 'duplicate email','user','create','existing user email','fails'
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--email', email,
- ])
- assert err
-
- # TESTCASE 'info-existing','user','info','existing user','returns correct info'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # this whole block should only be run if regions have been configured
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
- # post-sync, validate that user1 exists on the sync destination host
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['user_id'] == user1
- assert out['email'] == email
- assert out['display_name'] == display_name1
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
- assert not out['suspended']
-
- # compare the metadata between different regions, make sure it matches
- log.debug('compare the metadata between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
- assert out1 == out2
-
- # suspend a user on the master, then check the status on the destination
- log.debug('suspend a user on the master, then check the status on the destination')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['suspended']
-
- # delete a user on the master, then check that it's gone on the destination
- log.debug('delete a user on the master, then check that it\'s gone on the destination')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
- rgw_utils.radosgw_agent_sync_all(ctx)
- (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
- assert out is None
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
- assert out is None
-
- # then recreate it so later tests pass
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user1,
- '--display-name', display_name1,
- '--email', email,
- '--access-key', access_key,
- '--secret', secret_key,
- '--max-buckets', '4'
- ],
- check_status=True)
-
- # now do the multi-region bucket tests
- log.debug('now do the multi-region bucket tests')
-
- # Create a second user for the following tests
- log.debug('Create a second user for the following tests')
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--email', email2,
- '--access-key', access_key2,
- '--secret', secret_key2,
- '--max-buckets', '4'
- ],
- check_status=True)
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
- assert out is not None
-
- # create a bucket and do a sync
- log.debug('create a bucket and do a sync')
- bucket = connection.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
-
- # get the bucket.instance info and compare that
- src_bucket_id = out1['data']['bucket']['bucket_id']
- dest_bucket_id = out2['data']['bucket']['bucket_id']
- (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
- 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
- bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
- 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
- bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
- check_status=True)
- del out1['data']['bucket_info']['bucket']['pool']
- del out1['data']['bucket_info']['bucket']['index_pool']
- del out2['data']['bucket_info']['bucket']['pool']
- del out2['data']['bucket_info']['bucket']['index_pool']
- assert out1 == out2
-
- same_region = 0
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
-
- source_region = rgw_utils.region_for_client(ctx, source_client)
- dest_region = rgw_utils.region_for_client(ctx, dest_client)
-
- # 301 is only returned for requests to something in a different region
- if source_region == dest_region:
- log.debug('301 is only returned for requests to something in a different region')
- same_region += 1
- continue
-
- # Attempt to create a new connection with user1 to the destination RGW
- log.debug('Attempt to create a new connection with user1 to the destination RGW')
- # and use that to attempt a delete (that should fail)
- exception_encountered = False
- try:
- (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
- connection_dest = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_remote_port,
- host=dest_remote_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
-
- # this should fail
- connection_dest.delete_bucket(bucket_name2)
- except boto.exception.S3ResponseError as e:
- assert e.status == 301
- exception_encountered = True
-
- # confirm that the expected exception was seen
- assert exception_encountered
-
- # now delete the bucket on the source RGW and do another sync
- log.debug('now delete the bucket on the source RGW and do another sync')
- bucket.delete()
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- if same_region == len(ctx.radosgw_agent.config):
- bucket.delete()
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # make sure that the bucket no longer exists in either region
- log.debug('make sure that the bucket no longer exists in either region')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
- 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
- (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
- 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
- # Both of the previous calls should have errors due to requesting
- # metadata for non-existent buckets
- assert err1
- assert err2
-
- # create a bucket and then sync it
- log.debug('create a bucket and then sync it')
- bucket = connection.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
-
- # Now delete the bucket and recreate it with a different user
- log.debug('Now delete the bucket and recreate it with a different user')
- # within the same window of time and then sync.
- bucket.delete()
- bucket = connection2.create_bucket(bucket_name2)
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # compare the metadata for the bucket between different regions, make sure it matches
- log.debug('compare the metadata for the bucket between different regions, make sure it matches')
- # user2 should own the bucket in both regions
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out1 == out2
- assert out1['data']['owner'] == user2
- assert out1['data']['owner'] != user1
-
- # now we're going to use this bucket to test meta-data update propagation
- log.debug('now we\'re going to use this bucket to test meta-data update propagation')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
-
- # get the metadata so we can tweak it
- log.debug('get the metadata so we can tweak it')
- (err, orig_data) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
-
- # manually edit mtime for this bucket to be 300 seconds in the past
- log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
- new_data = copy.deepcopy(orig_data)
- new_data['mtime'] = orig_data['mtime'] - 300
- assert new_data != orig_data
- (err, out) = rgwadmin(ctx, source_client,
- ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- stdin=StringIO(json.dumps(new_data)),
- check_status=True)
-
- # get the metadata and make sure that the 'put' worked
- log.debug('get the metadata and make sure that the \'put\' worked')
- (err, out) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- assert out == new_data
-
- # sync to propagate the new metadata
- log.debug('sync to propagate the new metadata')
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # get the metadata from the dest and compare it to what we just set
- log.debug('get the metadata from the dest and compare it to what we just set')
- # and what the source region has.
- (err1, out1) = rgwadmin(ctx, source_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- (err2, out2) = rgwadmin(ctx, dest_client,
- ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
- check_status=True)
- # yeah for the transitive property
- assert out1 == out2
- assert out1 == new_data
-
- # now we delete the bucket
- log.debug('now we delete the bucket')
- bucket.delete()
-
- log.debug('sync to propagate the deleted bucket')
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # Delete user2 as later tests do not expect it to exist.
- # Verify that it is gone on both regions
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- source_client = c_config['src']
- dest_client = c_config['dest']
- (err, out) = rgwadmin(ctx, source_client,
- ['user', 'rm', '--uid', user2], check_status=True)
- rgw_utils.radosgw_agent_sync_all(ctx)
- # The two 'user info' calls should fail and not return any data
- # since we just deleted this user.
- (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
- assert out is None
- (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
- assert out is None
-
- # Test data sync
-
- # First create a bucket for data sync test purpose
- bucket = connection.create_bucket(bucket_name + 'data')
-
- # Create a tiny file and check if in sync
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- if c_config.get('metadata-only'):
- continue
-
- source_client = c_config['src']
- dest_client = c_config['dest']
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file'
- k.set_contents_from_string("123456789")
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
- (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
- dest_connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_port,
- host=dest_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
- assert k.get_contents_as_string() == dest_k.get_contents_as_string()
-
- # check that deleting it removes it from the dest zone
- k.delete()
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
- dest_k = dest_bucket.get_key('tiny_file')
- assert dest_k == None, 'object not deleted from destination zone'
-
- # finally we delete the bucket
- bucket.delete()
-
- bucket = connection.create_bucket(bucket_name + 'data2')
- for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
- if c_config.get('metadata-only'):
- continue
-
- source_client = c_config['src']
- dest_client = c_config['dest']
- (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
- dest_connection = boto.s3.connection.S3Connection(
- aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- is_secure=False,
- port=dest_port,
- host=dest_host,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- )
- for i in range(20):
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file_' + str(i)
- k.set_contents_from_string(str(i) * 100)
-
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- for i in range(20):
- dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
- assert (str(i) * 100) == dest_k.get_contents_as_string()
- k = boto.s3.key.Key(bucket)
- k.key = 'tiny_file_' + str(i)
- k.delete()
-
- # check that deleting removes the objects from the dest zone
- time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
- rgw_utils.radosgw_agent_sync_all(ctx, data=True)
-
- for i in range(20):
- dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
- dest_k = dest_bucket.get_key('tiny_file_' + str(i))
- assert dest_k == None, 'object %d not deleted from destination zone' % i
- bucket.delete()
-
- # end of 'if multi_region_run:'
-
- # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert out['suspended']
-
- # TESTCASE 're-enable','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
-
- # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert not out['suspended']
-
- # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'create', '--uid', user1,
- '--access-key', access_key2, '--secret', secret_key2,
- ], check_status=True)
-
- # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
- check_status=True)
- assert len(out['keys']) == 2
- assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
- assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
-
- # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--uid', user1,
- '--access-key', access_key2,
- ], check_status=True)
- assert len(out['keys']) == 1
- assert out['keys'][0]['access_key'] == access_key
- assert out['keys'][0]['secret_key'] == secret_key
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- subuser_access = 'full'
- subuser_perm = 'full-control'
-
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser1,
- '--access', subuser_access
- ], check_status=True)
-
- # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'modify', '--subuser', subuser1,
- '--secret', swift_secret1,
- '--key-type', 'swift',
- ], check_status=True)
-
- # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
-
- assert out['subusers'][0]['permissions'] == subuser_perm
-
- # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert len(out['swift_keys']) == 1
- assert out['swift_keys'][0]['user'] == subuser1
- assert out['swift_keys'][0]['secret_key'] == swift_secret1
-
- # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'create', '--subuser', subuser2,
- '--secret', swift_secret2,
- '--key-type', 'swift',
- ], check_status=True)
-
- # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
- assert len(out['swift_keys']) == 2
- assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
- assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
-
- # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'key', 'rm', '--subuser', subuser1,
- '--key-type', 'swift',
- ], check_status=True)
- assert len(out['swift_keys']) == 1
-
- # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser1,
- ], check_status=True)
- assert len(out['subusers']) == 1
-
- # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
- (err, out) = rgwadmin(ctx, client, [
- 'subuser', 'rm', '--subuser', subuser2,
- '--key-type', 'swift', '--purge-keys',
- ], check_status=True)
- assert len(out['swift_keys']) == 0
- assert len(out['subusers']) == 0
-
- # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
- check_status=True)
- assert len(out) == 0
-
- if multi_region_run:
- rgw_utils.radosgw_agent_sync_all(ctx)
-
- # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
- assert len(out) == 0
-
- # create a first bucket
- bucket = connection.create_bucket(bucket_name)
-
- # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
- assert len(out) == 1
- assert out[0] == bucket_name
-
- # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
- assert len(out) >= 1
- assert bucket_name in out;
-
- # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
- bucket2 = connection.create_bucket(bucket_name + '2')
- bucket3 = connection.create_bucket(bucket_name + '3')
- bucket4 = connection.create_bucket(bucket_name + '4')
- # the 5th should fail.
- failed = False
- try:
- connection.create_bucket(bucket_name + '5')
- except Exception:
- failed = True
- assert failed
-
- # delete the buckets
- bucket2.delete()
- bucket3.delete()
- bucket4.delete()
-
- # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
- assert out['owner'] == user1
- bucket_id = out['id']
-
- # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
- assert len(out) == 1
- assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
-
- # use some space
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('one')
-
- # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 1
- assert out['usage']['rgw.main']['size_kb'] > 0
-
- # reclaim it
- key.delete()
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
- check_status=True)
-
- # create a second user to link the bucket to
- (err, out) = rgwadmin(ctx, client, [
- 'user', 'create',
- '--uid', user2,
- '--display-name', display_name2,
- '--access-key', access_key2,
- '--secret', secret_key2,
- '--max-buckets', '1',
- ],
- check_status=True)
-
- # try creating an object with the first user before the bucket is relinked
- denied = False
- key = boto.s3.key.Key(bucket)
-
- try:
- key.set_contents_from_string('two')
- except boto.exception.S3ResponseError:
- denied = True
-
- assert not denied
-
- # delete the object
- key.delete()
-
- # link the bucket to another user
- (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name],
- check_status=True)
-
- # try to remove user, should fail (has a linked bucket)
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
- assert err
-
- # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
- (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
- check_status=True)
-
- # relink the bucket to the first user and delete the second user
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'link', '--uid', user1, '--bucket', bucket_name],
- check_status=True)
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
- check_status=True)
-
- # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
-
- # upload an object
- object_name = 'four'
- key = boto.s3.key.Key(bucket, object_name)
- key.set_contents_from_string(object_name)
-
- # now delete it
- (err, out) = rgwadmin(ctx, client,
- ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
- check_status=True)
-
- # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, [
- 'bucket', 'stats', '--bucket', bucket_name],
- check_status=True)
- assert out['id'] == bucket_id
- assert out['usage']['rgw.main']['num_objects'] == 0
-
- # list log objects
- # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
- (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
- assert len(out) > 0
-
- for obj in out:
- # TESTCASE 'log-show','log','show','after activity','returns expected info'
- if obj[:4] == 'meta' or obj[:4] == 'data':
- continue
-
- (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
- check_status=True)
- assert len(rgwlog) > 0
-
- # exempt bucket_name2 from checking as it was only used for multi-region tests
- assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
- assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
- assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
- for entry in rgwlog['log_entries']:
- log.debug('checking log entry: ', entry)
- assert entry['bucket'] == rgwlog['bucket']
- possible_buckets = [bucket_name + '5', bucket_name2]
- user = entry['user']
- assert user == user1 or user.endswith('system-user') or \
- rgwlog['bucket'] in possible_buckets
-
- # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
- (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
- check_status=True)
-
- # TODO: show log by bucket+date
-
- # need to wait for all usage data to get flushed, should take up to 30 seconds
- timestamp = time.time()
- while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
- if successful_ops(out) > 0:
- break;
- time.sleep(1)
-
- assert time.time() - timestamp <= (20 * 60)
-
- # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- total = user_summary['total']
- assert total['successful_ops'] > 0
-
- # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
- check_status=True)
- assert len(out['entries']) > 0
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- for entry in user_summary['categories']:
- assert entry['successful_ops'] > 0
- assert user_summary['user'] == user1
-
- # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
- test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
- for cat in test_categories:
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
- check_status=True)
- assert len(out['summary']) > 0
- user_summary = out['summary'][0]
- assert user_summary['user'] == user1
- assert len(user_summary['categories']) == 1
- entry = user_summary['categories'][0]
- assert entry['category'] == cat
- assert entry['successful_ops'] > 0
-
- # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
- (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
- check_status=True)
- (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
- check_status=True)
- assert len(out['entries']) == 0
- assert len(out['summary']) == 0
-
- # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
- try:
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('five')
- except boto.exception.S3ResponseError as e:
- assert e.status == 403
-
- # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
- (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
- check_status=True)
-
- # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('six')
-
- # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
-
- # create an object large enough to be split into multiple parts
- test_string = 'foo'*10000000
-
- big_key = boto.s3.key.Key(bucket)
- big_key.set_contents_from_string(test_string)
-
- # now delete the head
- big_key.delete()
-
- # wait a bit to give the garbage collector time to cycle
- time.sleep(15)
-
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) > 0
-
- # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
- (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
-
- #confirm
- (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
-
- assert len(out) == 0
-
- # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # delete should fail because ``key`` still exists
- try:
- bucket.delete()
- except boto.exception.S3ResponseError as e:
- assert e.status == 409
-
- key.delete()
- bucket.delete()
-
- # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
- bucket = connection.create_bucket(bucket_name)
-
- # create an object
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('seven')
-
- # should be private already but guarantee it
- key.set_acl('private')
-
- (err, out) = rgwadmin(ctx, client,
- ['policy', '--bucket', bucket.name, '--object', key.key],
- check_status=True)
-
- acl = key.get_xml_acl()
-
- assert acl == out.strip('\n')
-
- # add another grantee by making the object public read
- key.set_acl('public-read')
-
- (err, out) = rgwadmin(ctx, client,
- ['policy', '--bucket', bucket.name, '--object', key.key],
- check_status=True)
-
- acl = key.get_xml_acl()
- assert acl == out.strip('\n')
-
- # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key_name = ['eight', 'nine', 'ten', 'eleven']
- for i in range(4):
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string(key_name[i])
-
- (err, out) = rgwadmin(ctx, client,
- ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
- check_status=True)
-
- # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
- caps='user=read'
- (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
-
- assert out['caps'][0]['perm'] == 'read'
-
- # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
- (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
-
- assert not out['caps']
-
- # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
-
- (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
- assert err
-
- # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
- bucket = connection.create_bucket(bucket_name)
- key = boto.s3.key.Key(bucket)
- key.set_contents_from_string('twelve')
-
- (err, out) = rgwadmin(ctx, client,
- ['user', 'rm', '--uid', user1, '--purge-data' ],
- check_status=True)
-
- # TESTCASE 'rm-user3','user','rm','deleted user','fails'
- (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
- assert err
-
- # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
- #
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- orig_placement_pools = len(out['placement_pools'])
-
- # removed this test, it is not correct to assume that zone has default placement, it really
- # depends on how we set it up before
- #
- # assert len(out) > 0
- # assert len(out['placement_pools']) == 1
-
- # default_rule = out['placement_pools'][0]
- # assert default_rule['key'] == 'default-placement'
-
- rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
-
- out['placement_pools'].append(rule)
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
- stdin=StringIO(json.dumps(out)),
- check_status=True)
-
- (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
- assert len(out) > 0
- assert len(out['placement_pools']) == orig_placement_pools + 1
+++ /dev/null
-"""
-Run rados gateway agent in test mode
-"""
-import contextlib
-import logging
-import argparse
-
-from ..orchestra import run
-from teuthology import misc as teuthology
-import teuthology.task_util.rgw as rgw_utils
-
-log = logging.getLogger(__name__)
-
-def run_radosgw_agent(ctx, config):
- """
- Run a single radosgw-agent. See task() for config format.
- """
- return_list = list()
- for (client, cconf) in config.items():
- # don't process entries that are not clients
- if not client.startswith('client.'):
- log.debug('key {data} does not start with \'client.\', moving on'.format(
- data=client))
- continue
-
- src_client = cconf['src']
- dest_client = cconf['dest']
-
- src_zone = rgw_utils.zone_for_client(ctx, src_client)
- dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
-
- log.info("source is %s", src_zone)
- log.info("dest is %s", dest_zone)
-
- testdir = teuthology.get_testdir(ctx)
- (remote,) = ctx.cluster.only(client).remotes.keys()
- # figure out which branch to pull from
- branch = cconf.get('force-branch', None)
- if not branch:
- branch = cconf.get('branch', 'master')
- sha1 = cconf.get('sha1')
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'git', 'clone',
- '-b', branch,
-# 'https://github.com/ceph/radosgw-agent.git',
- 'git://ceph.com/git/radosgw-agent.git',
- 'radosgw-agent.{client}'.format(client=client),
- ]
- )
- if sha1 is not None:
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- run.Raw('&&'),
- 'git', 'reset', '--hard', sha1,
- ]
- )
- remote.run(
- args=[
- 'cd', testdir, run.Raw('&&'),
- 'cd', 'radosgw-agent.{client}'.format(client=client),
- run.Raw('&&'),
- './bootstrap',
- ]
- )
-
- src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
- src_zone)
- dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
- dest_zone)
- src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
- src_zone)
- dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
- dest_zone)
- sync_scope = cconf.get('sync-scope', None)
- port = cconf.get('port', 8000)
- daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
- in_args=[
- 'daemon-helper',
- 'kill',
- '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
- client=client),
- '-v',
- '--src-access-key', src_access,
- '--src-secret-key', src_secret,
- '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port),
- '--dest-access-key', dest_access,
- '--dest-secret-key', dest_secret,
- '--max-entries', str(cconf.get('max-entries', 1000)),
- '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
- tdir=testdir,
- client=client),
- '--object-sync-timeout', '30',
- ]
-
- if cconf.get('metadata-only', False):
- in_args.append('--metadata-only')
-
- # the test server and full/incremental flags are mutually exclusive
- if sync_scope is None:
- in_args.append('--test-server-host')
- in_args.append('0.0.0.0')
- in_args.append('--test-server-port')
- in_args.append(str(port))
- log.debug('Starting a sync test server on {client}'.format(client=client))
- # Stash the radosgw-agent server / port # for use by subsequent tasks
- ctx.radosgw_agent.endpoint = (client, str(port))
- else:
- in_args.append('--sync-scope')
- in_args.append(sync_scope)
- log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
-
- # positional arg for destination must come last
- in_args.append("http://{addr}:{port}".format(addr=dest_host,
- port=dest_port))
-
- return_list.append((client, remote.run(
- args=in_args,
- wait=False,
- stdin=run.PIPE,
- logger=log.getChild(daemon_name),
- )))
- return return_list
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run radosgw-agents in test mode.
-
- Configuration is clients to run the agents on, with settings for
- source client, destination client, and port to listen on. Binds
- to 0.0.0.0. Port defaults to 8000. This must be run on clients
- that have the correct zone root pools and rgw zone set in
- ceph.conf, or the task cannot read the region information from the
- cluster.
-
- By default, this task will start an HTTP server that will trigger full
- or incremental syncs based on requests made to it.
- Alternatively, a single full sync can be triggered by
- specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
- by specifying 'sync-scope: incremental' (the loop will sleep
- '--incremental-sync-delay' seconds between each sync, default is 30 seconds).
-
- By default, both data and metadata are synced. To only sync
- metadata, for example because you want to sync between regions,
- set metadata-only: true.
-
- An example::
-
- tasks:
- - ceph:
- conf:
- client.0:
- rgw zone = foo
- rgw zone root pool = .root.pool
- client.1:
- rgw zone = bar
- rgw zone root pool = .root.pool2
- - rgw: # region configuration omitted for brevity
- - radosgw-agent:
- client.0:
- branch: wip-next-feature-branch
- src: client.0
- dest: client.1
- sync-scope: full
- metadata-only: true
- # port: 8000 (default)
- client.1:
- src: client.1
- dest: client.0
- port: 8001
- """
- assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
- log.debug("config is %s", config)
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, but only if it exists in config since there isn't
- # a sensible default action for this task
- for client in config.iterkeys():
- if config[client]:
- log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
- teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
-
- ctx.radosgw_agent = argparse.Namespace()
- ctx.radosgw_agent.config = config
-
- procs = run_radosgw_agent(ctx, config)
-
- ctx.radosgw_agent.procs = procs
-
- try:
- yield
- finally:
- testdir = teuthology.get_testdir(ctx)
- try:
- for client, proc in procs:
- log.info("shutting down sync agent on %s", client)
- proc.stdin.close()
- proc.exitstatus.get()
- finally:
- for client, proc in procs:
- ctx.cluster.only(client).run(
- args=[
- 'rm', '-rf',
- '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
- client=client)
- ]
- )
--- /dev/null
+"""
+Rgw admin testing against a running instance
+"""
+# The test cases in this file have been annotated for inventory.
+# To extract the inventory (in csv format) use the command:
+#
+# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+#
+
+import copy
+import json
+import logging
+import time
+
+from cStringIO import StringIO
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import teuthology.task_util.rgw as rgw_utils
+
+from teuthology import misc as teuthology
+from teuthology.task_util.rgw import rgwadmin
+
+log = logging.getLogger(__name__)
+
+
+def successful_ops(out):
+ """Extract total from the first summary entry (presumed to be only one)"""
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ entry = summary[0]
+ return entry['total']['successful_ops']
+
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality against a running rgw instance.
+ """
+ global log
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ multi_region_run = rgw_utils.multi_region_enabled(ctx)
+
+ client = clients[0]; # default choice, multi-region code may overwrite this
+ if multi_region_run:
+ client = rgw_utils.get_master_client(ctx, clients)
+
+ # once the client is chosen, pull the host name and assigned port out of
+ # the role_endpoints that were assigned by the rgw task
+ (remote_host, remote_port) = ctx.rgw.role_endpoints[client]
+
+ ##
+ user1='foo'
+ user2='fud'
+ subuser1='foo:foo1'
+ subuser2='foo:foo2'
+ display_name1='Foo'
+ display_name2='Fud'
+ email='foo@foo.com'
+ email2='bar@bar.com'
+ access_key='9te6NH5mcdcq0Tc5i8i1'
+ secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2='p5YnriCv1nAtykxBrupQ'
+ secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name='myfoo'
+ bucket_name2='mybar'
+
+ # connect to rgw
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=remote_port,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ connection2 = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key2,
+ aws_secret_access_key=secret_key2,
+ is_secure=False,
+ port=remote_port,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+
+ # TESTCASE 'duplicate email','user','create','existing user email','fails'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--email', email,
+ ])
+ assert err
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # this whole block should only be run if regions have been configured
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ # post-sync, validate that user1 exists on the sync destination host
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, dest_client, ['metadata', 'list', 'user'])
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # compare the metadata between different regions, make sure it matches
+ log.debug('compare the metadata between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'user:{uid}'.format(uid=user1)], check_status=True)
+ assert out1 == out2
+
+ # suspend a user on the master, then check the status on the destination
+ log.debug('suspend a user on the master, then check the status on the destination')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'suspend', '--uid', user1])
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['suspended']
+
+ # delete a user on the master, then check that it's gone on the destination
+ log.debug('delete a user on the master, then check that it\'s gone on the destination')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'rm', '--uid', user1], check_status=True)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user1])
+ assert out is None
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user1])
+ assert out is None
+
+ # then recreate it so later tests pass
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user1,
+ '--display-name', display_name1,
+ '--email', email,
+ '--access-key', access_key,
+ '--secret', secret_key,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+
+ # now do the multi-region bucket tests
+ log.debug('now do the multi-region bucket tests')
+
+ # Create a second user for the following tests
+ log.debug('Create a second user for the following tests')
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--email', email2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '4'
+ ],
+ check_status=True)
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user2], check_status=True)
+ assert out is not None
+
+ # create a bucket and do a sync
+ log.debug('create a bucket and do a sync')
+ bucket = connection.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+
+ # get the bucket.instance info and compare that
+ src_bucket_id = out1['data']['bucket']['bucket_id']
+ dest_bucket_id = out2['data']['bucket']['bucket_id']
+ (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+ 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+ bucket_name=bucket_name2,bucket_instance=src_bucket_id)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+ 'bucket.instance:{bucket_name}:{bucket_instance}'.format(
+ bucket_name=bucket_name2,bucket_instance=dest_bucket_id)],
+ check_status=True)
+ del out1['data']['bucket_info']['bucket']['pool']
+ del out1['data']['bucket_info']['bucket']['index_pool']
+ del out2['data']['bucket_info']['bucket']['pool']
+ del out2['data']['bucket_info']['bucket']['index_pool']
+ assert out1 == out2
+
+ same_region = 0
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+
+ source_region = rgw_utils.region_for_client(ctx, source_client)
+ dest_region = rgw_utils.region_for_client(ctx, dest_client)
+
+ # 301 is only returned for requests to something in a different region
+ if source_region == dest_region:
+ log.debug('301 is only returned for requests to something in a different region')
+ same_region += 1
+ continue
+
+ # Attempt to create a new connection with user1 to the destination RGW
+ log.debug('Attempt to create a new connection with user1 to the destination RGW')
+ # and use that to attempt a delete (that should fail)
+ exception_encountered = False
+ try:
+ (dest_remote_host, dest_remote_port) = ctx.rgw.role_endpoints[dest_client]
+ connection_dest = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_remote_port,
+ host=dest_remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # this should fail
+ connection_dest.delete_bucket(bucket_name2)
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 301
+ exception_encountered = True
+
+ # confirm that the expected exception was seen
+ assert exception_encountered
+
+ # now delete the bucket on the source RGW and do another sync
+ log.debug('now delete the bucket on the source RGW and do another sync')
+ bucket.delete()
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ if same_region == len(ctx.radosgw_agent.config):
+ bucket.delete()
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # make sure that the bucket no longer exists in either region
+ log.debug('make sure that the bucket no longer exists in either region')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client, ['metadata', 'get',
+ 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+ (err2, out2) = rgwadmin(ctx, dest_client, ['metadata', 'get',
+ 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)])
+ # Both of the previous calls should have errors due to requesting
+ # metadata for non-existent buckets
+ assert err1
+ assert err2
+
+ # create a bucket and then sync it
+ log.debug('create a bucket and then sync it')
+ bucket = connection.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+
+ # Now delete the bucket and recreate it with a different user
+ log.debug('Now delete the bucket and recreate it with a different user')
+ # within the same window of time and then sync.
+ bucket.delete()
+ bucket = connection2.create_bucket(bucket_name2)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # compare the metadata for the bucket between different regions, make sure it matches
+ log.debug('compare the metadata for the bucket between different regions, make sure it matches')
+ # user2 should own the bucket in both regions
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out1 == out2
+ assert out1['data']['owner'] == user2
+ assert out1['data']['owner'] != user1
+
+ # now we're going to use this bucket to test meta-data update propagation
+ log.debug('now we\'re going to use this bucket to test meta-data update propagation')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+
+ # get the metadata so we can tweak it
+ log.debug('get the metadata so we can tweak it')
+ (err, orig_data) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+
+ # manually edit mtime for this bucket to be 300 seconds in the past
+ log.debug('manually edit mtime for this bucket to be 300 seconds in the past')
+ new_data = copy.deepcopy(orig_data)
+ new_data['mtime'] = orig_data['mtime'] - 300
+ assert new_data != orig_data
+ (err, out) = rgwadmin(ctx, source_client,
+ ['metadata', 'put', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ stdin=StringIO(json.dumps(new_data)),
+ check_status=True)
+
+ # get the metadata and make sure that the 'put' worked
+ log.debug('get the metadata and make sure that the \'put\' worked')
+ (err, out) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ assert out == new_data
+
+ # sync to propagate the new metadata
+ log.debug('sync to propagate the new metadata')
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # get the metadata from the dest and compare it to what we just set
+ log.debug('get the metadata from the dest and compare it to what we just set')
+ # and what the source region has.
+ (err1, out1) = rgwadmin(ctx, source_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ (err2, out2) = rgwadmin(ctx, dest_client,
+ ['metadata', 'get', 'bucket:{bucket_name}'.format(bucket_name=bucket_name2)],
+ check_status=True)
+ # yeah for the transitive property
+ assert out1 == out2
+ assert out1 == new_data
+
+ # now we delete the bucket
+ log.debug('now we delete the bucket')
+ bucket.delete()
+
+ log.debug('sync to propagate the deleted bucket')
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # Delete user2 as later tests do not expect it to exist.
+ # Verify that it is gone on both regions
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (err, out) = rgwadmin(ctx, source_client,
+ ['user', 'rm', '--uid', user2], check_status=True)
+ rgw_utils.radosgw_agent_sync_all(ctx)
+ # The two 'user info' calls should fail and not return any data
+ # since we just deleted this user.
+ (err, out) = rgwadmin(ctx, source_client, ['user', 'info', '--uid', user2])
+ assert out is None
+ (err, out) = rgwadmin(ctx, dest_client, ['user', 'info', '--uid', user2])
+ assert out is None
+
+ # Test data sync
+
+ # First create a bucket for data sync test purpose
+ bucket = connection.create_bucket(bucket_name + 'data')
+
+ # Create a tiny file and check if in sync
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ if c_config.get('metadata-only'):
+ continue
+
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file'
+ k.set_contents_from_string("123456789")
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+ (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+ dest_connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_port,
+ host=dest_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ dest_k = dest_connection.get_bucket(bucket_name + 'data').get_key('tiny_file')
+ assert k.get_contents_as_string() == dest_k.get_contents_as_string()
+
+ # check that deleting it removes it from the dest zone
+ k.delete()
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ dest_bucket = dest_connection.get_bucket(bucket_name + 'data')
+ dest_k = dest_bucket.get_key('tiny_file')
+ assert dest_k == None, 'object not deleted from destination zone'
+
+ # finally we delete the bucket
+ bucket.delete()
+
+ bucket = connection.create_bucket(bucket_name + 'data2')
+ for agent_client, c_config in ctx.radosgw_agent.config.iteritems():
+ if c_config.get('metadata-only'):
+ continue
+
+ source_client = c_config['src']
+ dest_client = c_config['dest']
+ (dest_host, dest_port) = ctx.rgw.role_endpoints[dest_client]
+ dest_connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=dest_port,
+ host=dest_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+ for i in range(20):
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file_' + str(i)
+ k.set_contents_from_string(str(i) * 100)
+
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ for i in range(20):
+ dest_k = dest_connection.get_bucket(bucket_name + 'data2').get_key('tiny_file_' + str(i))
+ assert (str(i) * 100) == dest_k.get_contents_as_string()
+ k = boto.s3.key.Key(bucket)
+ k.key = 'tiny_file_' + str(i)
+ k.delete()
+
+ # check that deleting removes the objects from the dest zone
+ time.sleep(rgw_utils.radosgw_data_log_window(ctx, source_client))
+ rgw_utils.radosgw_agent_sync_all(ctx, data=True)
+
+ for i in range(20):
+ dest_bucket = dest_connection.get_bucket(bucket_name + 'data2')
+ dest_k = dest_bucket.get_key('tiny_file_' + str(i))
+ assert dest_k == None, 'object %d not deleted from destination zone' % i
+ bucket.delete()
+
+ # end of 'if multi_region_run:'
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True)
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'create', '--uid', user1,
+ '--access-key', access_key2, '--secret', secret_key2,
+ ], check_status=True)
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1],
+ check_status=True)
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--uid', user1,
+ '--access-key', access_key2,
+ ], check_status=True)
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ subuser_access = 'full'
+ subuser_perm = 'full-control'
+
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser1,
+ '--access', subuser_access
+ ], check_status=True)
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'modify', '--subuser', subuser1,
+ '--secret', swift_secret1,
+ '--key-type', 'swift',
+ ], check_status=True)
+
+ # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+
+ assert out['subusers'][0]['permissions'] == subuser_perm
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'create', '--subuser', subuser2,
+ '--secret', swift_secret2,
+ '--key-type', 'swift',
+ ], check_status=True)
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True)
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'key', 'rm', '--subuser', subuser1,
+ '--key-type', 'swift',
+ ], check_status=True)
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser1,
+ ], check_status=True)
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (err, out) = rgwadmin(ctx, client, [
+ 'subuser', 'rm', '--subuser', subuser2,
+ '--key-type', 'swift', '--purge-keys',
+ ], check_status=True)
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1],
+ check_status=True)
+ assert len(out) == 0
+
+ if multi_region_run:
+ rgw_utils.radosgw_agent_sync_all(ctx)
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True)
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True)
+ assert len(out) >= 1
+ assert bucket_name in out;
+
+ # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4'
+ bucket2 = connection.create_bucket(bucket_name + '2')
+ bucket3 = connection.create_bucket(bucket_name + '3')
+ bucket4 = connection.create_bucket(bucket_name + '4')
+ # the 5th should fail.
+ failed = False
+ try:
+ connection.create_bucket(bucket_name + '5')
+ except Exception:
+ failed = True
+ assert failed
+
+ # delete the buckets
+ bucket2.delete()
+ bucket3.delete()
+ bucket4.delete()
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True)
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name], check_status=True)
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name],
+ check_status=True)
+
+ # create a second user to link the bucket to
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', user2,
+ '--display-name', display_name2,
+ '--access-key', access_key2,
+ '--secret', secret_key2,
+ '--max-buckets', '1',
+ ],
+ check_status=True)
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name],
+ check_status=True)
+
+ # try to remove user, should fail (has a linked bucket)
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2])
+ assert err
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked'
+ (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name],
+ check_status=True)
+
+ # relink the bucket to the first user and delete the second user
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'link', '--uid', user1, '--bucket', bucket_name],
+ check_status=True)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2],
+ check_status=True)
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (err, out) = rgwadmin(ctx, client,
+ ['object', 'rm', '--bucket', bucket_name, '--object', object_name],
+ check_status=True)
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, [
+ 'bucket', 'stats', '--bucket', bucket_name],
+ check_status=True)
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # list log objects
+ # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects'
+ (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True)
+ assert len(out) > 0
+
+ for obj in out:
+ # TESTCASE 'log-show','log','show','after activity','returns expected info'
+ if obj[:4] == 'meta' or obj[:4] == 'data':
+ continue
+
+ (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj],
+ check_status=True)
+ assert len(rgwlog) > 0
+
+ # exempt bucket_name2 from checking as it was only used for multi-region tests
+ assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0
+ assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id
+ assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2
+ for entry in rgwlog['log_entries']:
+ log.debug('checking log entry: ', entry)
+ assert entry['bucket'] == rgwlog['bucket']
+ possible_buckets = [bucket_name + '5', bucket_name2]
+ user = entry['user']
+ assert user == user1 or user.endswith('system-user') or \
+ rgwlog['bucket'] in possible_buckets
+
+ # TESTCASE 'log-rm','log','rm','delete log objects','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj],
+ check_status=True)
+
+ # TODO: show log by bucket+date
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # last operation we did is delete obj, wait for it to flush
+ if successful_ops(out) > 0:
+ break;
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True)
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+ check_status=True)
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat],
+ check_status=True)
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1],
+ check_status=True)
+ (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1],
+ check_status=True)
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1],
+ check_status=True)
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # wait a bit to give the garbage collector time to cycle
+ time.sleep(15)
+
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) > 0
+
+ # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage'
+ (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True)
+
+ #confirm
+ (err, out) = rgwadmin(ctx, client, ['gc', 'list'])
+
+ assert len(out) == 0
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # delete should fail because ``key`` still exists
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['policy', '--bucket', bucket.name, '--object', key.key],
+ check_status=True)
+
+ acl = key.get_xml_acl()
+
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['policy', '--bucket', bucket.name, '--object', key.key],
+ check_status=True)
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (err, out) = rgwadmin(ctx, client,
+ ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'],
+ check_status=True)
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps='user=read'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps])
+
+ assert out['caps'][0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps])
+
+ assert not out['caps']
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1])
+ assert err
+
+ # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (err, out) = rgwadmin(ctx, client,
+ ['user', 'rm', '--uid', user1, '--purge-data' ],
+ check_status=True)
+
+ # TESTCASE 'rm-user3','user','rm','deleted user','fails'
+ (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1])
+ assert err
+
+ # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule'
+ #
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ orig_placement_pools = len(out['placement_pools'])
+
+ # removed this test, it is not correct to assume that zone has default placement, it really
+ # depends on how we set it up before
+ #
+ # assert len(out) > 0
+ # assert len(out['placement_pools']) == 1
+
+ # default_rule = out['placement_pools'][0]
+ # assert default_rule['key'] == 'default-placement'
+
+ rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}}
+
+ out['placement_pools'].append(rule)
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'set'],
+ stdin=StringIO(json.dumps(out)),
+ check_status=True)
+
+ (err, out) = rgwadmin(ctx, client, ['zone', 'get'])
+ assert len(out) > 0
+ assert len(out['placement_pools']) == orig_placement_pools + 1
--- /dev/null
+"""
+Run a series of rgw admin commands through the rest interface.
+
+The test cases in this file have been annotated for inventory.
+To extract the inventory (in csv format) use the command:
+
+ grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //'
+
+"""
+from cStringIO import StringIO
+import logging
+import json
+
+import boto.exception
+import boto.s3.connection
+import boto.s3.acl
+
+import requests
+import time
+
+from boto.connection import AWSAuthConnection
+from teuthology import misc as teuthology
+
+log = logging.getLogger(__name__)
+
+def successful_ops(out):
+ """
+ Extract successful operations
+ :param out: list
+ """
+ summary = out['summary']
+ if len(summary) == 0:
+ return 0
+ entry = summary[0]
+ return entry['total']['successful_ops']
+
+def rgwadmin(ctx, client, cmd):
+ """
+ Perform rgw admin command
+
+ :param client: client
+ :param cmd: command to execute.
+ :return: command exit status, json result.
+ """
+ log.info('radosgw-admin: %s' % cmd)
+ testdir = teuthology.get_testdir(ctx)
+ pre = [
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ '{tdir}/archive/coverage'.format(tdir=testdir),
+ 'radosgw-admin',
+ '--log-to-stderr',
+ '--format', 'json',
+ ]
+ pre.extend(cmd)
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ proc = remote.run(
+ args=pre,
+ check_status=False,
+ stdout=StringIO(),
+ stderr=StringIO(),
+ )
+ r = proc.exitstatus
+ out = proc.stdout.getvalue()
+ j = None
+ if not r and out != '':
+ try:
+ j = json.loads(out)
+ log.info(' json result: %s' % j)
+ except ValueError:
+ j = out
+ log.info(' raw result: %s' % j)
+ return (r, j)
+
+
+def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False):
+ """
+ perform a rest command
+ """
+ log.info('radosgw-admin-rest: %s %s' % (cmd, params))
+ put_cmds = ['create', 'link', 'add']
+ post_cmds = ['unlink', 'modify']
+ delete_cmds = ['trim', 'rm', 'process']
+ get_cmds = ['check', 'info', 'show', 'list']
+
+ bucket_sub_resources = ['object', 'policy', 'index']
+ user_sub_resources = ['subuser', 'key', 'caps']
+ zone_sub_resources = ['pool', 'log', 'garbage']
+
+ def get_cmd_method_and_handler(cmd):
+ """
+ Get the rest command and handler from information in cmd and
+ from the imported requests object.
+ """
+ if cmd[1] in put_cmds:
+ return 'PUT', requests.put
+ elif cmd[1] in delete_cmds:
+ return 'DELETE', requests.delete
+ elif cmd[1] in post_cmds:
+ return 'POST', requests.post
+ elif cmd[1] in get_cmds:
+ return 'GET', requests.get
+
+ def get_resource(cmd):
+ """
+ Get the name of the resource from information in cmd.
+ """
+ if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources:
+ if cmd[0] == 'bucket':
+ return 'bucket', ''
+ else:
+ return 'bucket', cmd[0]
+ elif cmd[0] == 'user' or cmd[0] in user_sub_resources:
+ if cmd[0] == 'user':
+ return 'user', ''
+ else:
+ return 'user', cmd[0]
+ elif cmd[0] == 'usage':
+ return 'usage', ''
+ elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources:
+ if cmd[0] == 'zone':
+ return 'zone', ''
+ else:
+ return 'zone', cmd[0]
+
+ def build_admin_request(conn, method, resource = '', headers=None, data='',
+ query_args=None, params=None):
+ """
+ Build an administative request adapted from the build_request()
+ method of boto.connection
+ """
+
+ path = conn.calling_format.build_path_base('admin', resource)
+ auth_path = conn.calling_format.build_auth_path('admin', resource)
+ host = conn.calling_format.build_host(conn.server_name(), 'admin')
+ if query_args:
+ path += '?' + query_args
+ boto.log.debug('path=%s' % path)
+ auth_path += '?' + query_args
+ boto.log.debug('auth_path=%s' % auth_path)
+ return AWSAuthConnection.build_base_http_request(conn, method, path,
+ auth_path, params, headers, data, host)
+
+ method, handler = get_cmd_method_and_handler(cmd)
+ resource, query_args = get_resource(cmd)
+ request = build_admin_request(connection, method, resource,
+ query_args=query_args, headers=headers)
+
+ url = '{protocol}://{host}{path}'.format(protocol=request.protocol,
+ host=request.host, path=request.path)
+
+ request.authorize(connection=connection)
+ result = handler(url, params=params, headers=request.headers)
+
+ if raw:
+ log.info(' text result: %s' % result.txt)
+ return result.status_code, result.txt
+ else:
+ log.info(' json result: %s' % result.json())
+ return result.status_code, result.json()
+
+
+def task(ctx, config):
+ """
+ Test radosgw-admin functionality through the RESTful interface
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ # just use the first client...
+ client = clients[0]
+
+ ##
+ admin_user = 'ada'
+ admin_display_name = 'Ms. Admin User'
+ admin_access_key = 'MH1WC2XQ1S8UISFDZC8W'
+ admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG'
+ admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write'
+
+ user1 = 'foo'
+ user2 = 'fud'
+ subuser1 = 'foo:foo1'
+ subuser2 = 'foo:foo2'
+ display_name1 = 'Foo'
+ display_name2 = 'Fud'
+ email = 'foo@foo.com'
+ access_key = '9te6NH5mcdcq0Tc5i8i1'
+ secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu'
+ access_key2 = 'p5YnriCv1nAtykxBrupQ'
+ secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh'
+ swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL'
+ swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy'
+
+ bucket_name = 'myfoo'
+
+ # legend (test cases can be easily grep-ed out)
+ # TESTCASE 'testname','object','method','operation','assertion'
+ # TESTCASE 'create-admin-user','user','create','administrative user','succeeds'
+ (err, out) = rgwadmin(ctx, client, [
+ 'user', 'create',
+ '--uid', admin_user,
+ '--display-name', admin_display_name,
+ '--access-key', admin_access_key,
+ '--secret', admin_secret_key,
+ '--max-buckets', '0',
+ '--caps', admin_caps
+ ])
+ logging.error(out)
+ logging.error(err)
+ assert not err
+
+ (remote,) = ctx.cluster.only(client).remotes.iterkeys()
+ remote_host = remote.name.split('@')[1]
+ admin_conn = boto.s3.connection.S3Connection(
+ aws_access_key_id=admin_access_key,
+ aws_secret_access_key=admin_secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'info-nosuch','user','info','non-existent user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1})
+ assert ret == 404
+
+ # TESTCASE 'create-ok','user','create','w/all valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user1,
+ 'display-name' : display_name1,
+ 'email' : email,
+ 'access-key' : access_key,
+ 'secret-key' : secret_key,
+ 'max-buckets' : '4'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-existing','user','info','existing user','returns correct info'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert out['user_id'] == user1
+ assert out['email'] == email
+ assert out['display_name'] == display_name1
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+ assert not out['suspended']
+
+ # TESTCASE 'suspend-ok','user','suspend','active user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert out['suspended']
+
+ # TESTCASE 're-enable','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert not err
+
+ # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert not out['suspended']
+
+ # TESTCASE 'add-keys','key','create','w/valid info','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'create'],
+ {'uid' : user1,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2
+ })
+
+
+ assert ret == 200
+
+ # TESTCASE 'info-new-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['keys']) == 2
+ assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2
+ assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2
+
+ # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'uid' : user1,
+ 'access-key' : access_key2
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+
+ assert len(out['keys']) == 1
+ assert out['keys'][0]['access_key'] == access_key
+ assert out['keys'][0]['secret_key'] == secret_key
+
+ # TESTCASE 'add-swift-key','key','create','swift key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser1,
+ 'secret-key' : swift_secret1,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 1
+ assert out['swift_keys'][0]['user'] == subuser1
+ assert out['swift_keys'][0]['secret_key'] == swift_secret1
+
+ # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'create'],
+ {'subuser' : subuser2,
+ 'secret-key' : swift_secret2,
+ 'key-type' : 'swift'
+ })
+
+ assert ret == 200
+
+ # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['swift_keys']) == 2
+ assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2
+ assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2
+
+ # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['key', 'rm'],
+ {'subuser' : subuser1,
+ 'key-type' :'swift'
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 1
+
+ # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser1
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['subusers']) == 1
+
+ # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['subuser', 'rm'],
+ {'subuser' : subuser2,
+ 'key-type' : 'swift',
+ '{purge-keys' :True
+ })
+
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert len(out['swift_keys']) == 0
+ assert len(out['subusers']) == 0
+
+ # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 0
+
+ # connect to rgw
+ connection = boto.s3.connection.S3Connection(
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=False,
+ port=7280,
+ host=remote_host,
+ calling_format=boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 0
+
+ # create a first bucket
+ bucket = connection.create_bucket(bucket_name)
+
+ # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0] == bucket_name
+
+ # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list'
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+
+ assert ret == 200
+ assert out['owner'] == user1
+ bucket_id = out['id']
+
+ # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True})
+ assert ret == 200
+ assert len(out) == 1
+ assert out[0]['id'] == bucket_id # does it return the same ID twice in a row?
+
+ # use some space
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('one')
+
+ # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 1
+ assert out['usage']['rgw.main']['size_kb'] > 0
+
+ # reclaim it
+ key.delete()
+
+ # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # create a second user to link the bucket to
+ (ret, out) = rgwadmin_rest(admin_conn,
+ ['user', 'create'],
+ {'uid' : user2,
+ 'display-name' : display_name2,
+ 'access-key' : access_key2,
+ 'secret-key' : secret_key2,
+ 'max-buckets' : '1',
+ })
+
+ assert ret == 200
+
+ # try creating an object with the first user before the bucket is relinked
+ denied = False
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('two')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert not denied
+
+ # delete the object
+ key.delete()
+
+ # link the bucket to another user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user2, 'bucket' : bucket_name})
+
+ assert ret == 200
+
+ # try creating an object with the first user which should cause an error
+ key = boto.s3.key.Key(bucket)
+
+ try:
+ key.set_contents_from_string('three')
+ except boto.exception.S3ResponseError:
+ denied = True
+
+ assert denied
+
+ # relink the bucket to the first user and delete the second user
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'link'], {'uid' : user1, 'bucket' : bucket_name})
+ assert ret == 200
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2})
+ assert ret == 200
+
+ # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed'
+
+ # upload an object
+ object_name = 'four'
+ key = boto.s3.key.Key(bucket, object_name)
+ key.set_contents_from_string(object_name)
+
+ # now delete it
+ (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name})
+ assert ret == 200
+
+ # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects'
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True})
+ assert ret == 200
+ assert out['id'] == bucket_id
+ assert out['usage']['rgw.main']['num_objects'] == 0
+
+ # create a bucket for deletion stats
+ useless_bucket = connection.create_bucket('useless_bucket')
+ useless_key = useless_bucket.new_key('useless_key')
+ useless_key.set_contents_from_string('useless string')
+
+ # delete it
+ useless_key.delete()
+ useless_bucket.delete()
+
+ # wait for the statistics to flush
+ time.sleep(60)
+
+ # need to wait for all usage data to get flushed, should take up to 30 seconds
+ timestamp = time.time()
+ while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush
+
+ if successful_ops(out) > 0:
+ break
+ time.sleep(1)
+
+ assert time.time() - timestamp <= (20 * 60)
+
+ # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'])
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ total = user_summary['total']
+ assert total['successful_ops'] > 0
+
+ # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) > 0
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ for entry in user_summary['categories']:
+ assert entry['successful_ops'] > 0
+ assert user_summary['user'] == user1
+
+ # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds'
+ test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket']
+ for cat in test_categories:
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat})
+ assert ret == 200
+ assert len(out['summary']) > 0
+ user_summary = out['summary'][0]
+ assert user_summary['user'] == user1
+ assert len(user_summary['categories']) == 1
+ entry = user_summary['categories'][0]
+ assert entry['category'] == cat
+ assert entry['successful_ops'] > 0
+
+ # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed'
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1})
+ assert ret == 200
+ (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1})
+ assert ret == 200
+ assert len(out['entries']) == 0
+ assert len(out['summary']) == 0
+
+ # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True})
+ assert ret == 200
+
+ # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects'
+ try:
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('five')
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 403
+
+ # TESTCASE 'user-renable2','user','enable','suspended user','succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'})
+ assert ret == 200
+
+ # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects'
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('six')
+
+ # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection'
+
+ # create an object large enough to be split into multiple parts
+ test_string = 'foo'*10000000
+
+ big_key = boto.s3.key.Key(bucket)
+ big_key.set_contents_from_string(test_string)
+
+ # now delete the head
+ big_key.delete()
+
+ # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # delete should fail because ``key`` still exists
+ try:
+ bucket.delete()
+ except boto.exception.S3ResponseError as e:
+ assert e.status == 409
+
+ key.delete()
+ bucket.delete()
+
+ # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy'
+ bucket = connection.create_bucket(bucket_name)
+
+ # create an object
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('seven')
+
+ # should be private already but guarantee it
+ key.set_acl('private')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # add another grantee by making the object public read
+ key.set_acl('public-read')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key})
+ assert ret == 200
+
+ acl = key.get_xml_acl()
+ assert acl == out.strip('\n')
+
+ # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key_name = ['eight', 'nine', 'ten', 'eleven']
+ for i in range(4):
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string(key_name[i])
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True})
+ assert ret == 200
+
+ # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds'
+ caps = 'usage=read'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert out[0]['perm'] == 'read'
+
+ # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds'
+ (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps})
+ assert ret == 200
+ assert not out
+
+ # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1})
+ assert ret == 409
+
+ # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds'
+ bucket = connection.create_bucket(bucket_name)
+ key = boto.s3.key.Key(bucket)
+ key.set_contents_from_string('twelve')
+
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True})
+ assert ret == 200
+
+ # TESTCASE 'rm-user3','user','info','deleted user','fails'
+ (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1})
+ assert ret == 404
+
--- /dev/null
+"""
+Run rados gateway agent in test mode
+"""
+import contextlib
+import logging
+import argparse
+
+from ..orchestra import run
+from teuthology import misc as teuthology
+import teuthology.task_util.rgw as rgw_utils
+
+log = logging.getLogger(__name__)
+
+def run_radosgw_agent(ctx, config):
+ """
+ Run a single radosgw-agent. See task() for config format.
+ """
+ return_list = list()
+ for (client, cconf) in config.items():
+ # don't process entries that are not clients
+ if not client.startswith('client.'):
+ log.debug('key {data} does not start with \'client.\', moving on'.format(
+ data=client))
+ continue
+
+ src_client = cconf['src']
+ dest_client = cconf['dest']
+
+ src_zone = rgw_utils.zone_for_client(ctx, src_client)
+ dest_zone = rgw_utils.zone_for_client(ctx, dest_client)
+
+ log.info("source is %s", src_zone)
+ log.info("dest is %s", dest_zone)
+
+ testdir = teuthology.get_testdir(ctx)
+ (remote,) = ctx.cluster.only(client).remotes.keys()
+ # figure out which branch to pull from
+ branch = cconf.get('force-branch', None)
+ if not branch:
+ branch = cconf.get('branch', 'master')
+ sha1 = cconf.get('sha1')
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'git', 'clone',
+ '-b', branch,
+# 'https://github.com/ceph/radosgw-agent.git',
+ 'git://ceph.com/git/radosgw-agent.git',
+ 'radosgw-agent.{client}'.format(client=client),
+ ]
+ )
+ if sha1 is not None:
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ run.Raw('&&'),
+ 'git', 'reset', '--hard', sha1,
+ ]
+ )
+ remote.run(
+ args=[
+ 'cd', testdir, run.Raw('&&'),
+ 'cd', 'radosgw-agent.{client}'.format(client=client),
+ run.Raw('&&'),
+ './bootstrap',
+ ]
+ )
+
+ src_host, src_port = rgw_utils.get_zone_host_and_port(ctx, src_client,
+ src_zone)
+ dest_host, dest_port = rgw_utils.get_zone_host_and_port(ctx, dest_client,
+ dest_zone)
+ src_access, src_secret = rgw_utils.get_zone_system_keys(ctx, src_client,
+ src_zone)
+ dest_access, dest_secret = rgw_utils.get_zone_system_keys(ctx, dest_client,
+ dest_zone)
+ sync_scope = cconf.get('sync-scope', None)
+ port = cconf.get('port', 8000)
+ daemon_name = '{host}.{port}.syncdaemon'.format(host=remote.name, port=port)
+ in_args=[
+ 'daemon-helper',
+ 'kill',
+ '{tdir}/radosgw-agent.{client}/radosgw-agent'.format(tdir=testdir,
+ client=client),
+ '-v',
+ '--src-access-key', src_access,
+ '--src-secret-key', src_secret,
+ '--source', "http://{addr}:{port}".format(addr=src_host, port=src_port),
+ '--dest-access-key', dest_access,
+ '--dest-secret-key', dest_secret,
+ '--max-entries', str(cconf.get('max-entries', 1000)),
+ '--log-file', '{tdir}/archive/rgw_sync_agent.{client}.log'.format(
+ tdir=testdir,
+ client=client),
+ '--object-sync-timeout', '30',
+ ]
+
+ if cconf.get('metadata-only', False):
+ in_args.append('--metadata-only')
+
+ # the test server and full/incremental flags are mutually exclusive
+ if sync_scope is None:
+ in_args.append('--test-server-host')
+ in_args.append('0.0.0.0')
+ in_args.append('--test-server-port')
+ in_args.append(str(port))
+ log.debug('Starting a sync test server on {client}'.format(client=client))
+ # Stash the radosgw-agent server / port # for use by subsequent tasks
+ ctx.radosgw_agent.endpoint = (client, str(port))
+ else:
+ in_args.append('--sync-scope')
+ in_args.append(sync_scope)
+ log.debug('Starting a {scope} sync on {client}'.format(scope=sync_scope,client=client))
+
+ # positional arg for destination must come last
+ in_args.append("http://{addr}:{port}".format(addr=dest_host,
+ port=dest_port))
+
+ return_list.append((client, remote.run(
+ args=in_args,
+ wait=False,
+ stdin=run.PIPE,
+ logger=log.getChild(daemon_name),
+ )))
+ return return_list
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run radosgw-agents in test mode.
+
+ Configuration is clients to run the agents on, with settings for
+ source client, destination client, and port to listen on. Binds
+ to 0.0.0.0. Port defaults to 8000. This must be run on clients
+ that have the correct zone root pools and rgw zone set in
+ ceph.conf, or the task cannot read the region information from the
+ cluster.
+
+ By default, this task will start an HTTP server that will trigger full
+ or incremental syncs based on requests made to it.
+ Alternatively, a single full sync can be triggered by
+ specifying 'sync-scope: full' or a loop of incremental syncs can be triggered
+ by specifying 'sync-scope: incremental' (the loop will sleep
+ '--incremental-sync-delay' seconds between each sync, default is 30 seconds).
+
+ By default, both data and metadata are synced. To only sync
+ metadata, for example because you want to sync between regions,
+ set metadata-only: true.
+
+ An example::
+
+ tasks:
+ - ceph:
+ conf:
+ client.0:
+ rgw zone = foo
+ rgw zone root pool = .root.pool
+ client.1:
+ rgw zone = bar
+ rgw zone root pool = .root.pool2
+ - rgw: # region configuration omitted for brevity
+ - radosgw-agent:
+ client.0:
+ branch: wip-next-feature-branch
+ src: client.0
+ dest: client.1
+ sync-scope: full
+ metadata-only: true
+ # port: 8000 (default)
+ client.1:
+ src: client.1
+ dest: client.0
+ port: 8001
+ """
+ assert isinstance(config, dict), 'rgw_sync_agent requires a dictionary config'
+ log.debug("config is %s", config)
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, but only if it exists in config since there isn't
+ # a sensible default action for this task
+ for client in config.iterkeys():
+ if config[client]:
+ log.debug('config[{client}]: {data}'.format(client=client, data=config[client]))
+ teuthology.deep_merge(config[client], overrides.get('radosgw-agent', {}))
+
+ ctx.radosgw_agent = argparse.Namespace()
+ ctx.radosgw_agent.config = config
+
+ procs = run_radosgw_agent(ctx, config)
+
+ ctx.radosgw_agent.procs = procs
+
+ try:
+ yield
+ finally:
+ testdir = teuthology.get_testdir(ctx)
+ try:
+ for client, proc in procs:
+ log.info("shutting down sync agent on %s", client)
+ proc.stdin.close()
+ proc.exitstatus.get()
+ finally:
+ for client, proc in procs:
+ ctx.cluster.only(client).run(
+ args=[
+ 'rm', '-rf',
+ '{tdir}/radosgw-agent.{client}'.format(tdir=testdir,
+ client=client)
+ ]
+ )
+++ /dev/null
-"""
-Rest Api
-"""
-import logging
-import contextlib
-import time
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-from ..orchestra import run
-from teuthology.task.ceph import CephState
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def run_rest_api_daemon(ctx, api_clients):
- """
- Wrapper starts the rest api daemons
- """
- if not hasattr(ctx, 'daemons'):
- ctx.daemons = CephState()
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('clients'):]
- run_cmd = [
- 'sudo',
- 'daemon-helper',
- 'kill',
- 'ceph-rest-api',
- '-n',
- 'client.rest{id}'.format(id=id_), ]
- cl_rest_id = 'client.rest{id}'.format(id=id_)
- ctx.daemons.add_daemon(rems, 'restapi',
- cl_rest_id,
- args=run_cmd,
- logger=log.getChild(cl_rest_id),
- stdin=run.PIPE,
- wait=False,
- )
- for i in range(1, 12):
- log.info('testing for ceph-rest-api try {0}'.format(i))
- run_cmd = [
- 'wget',
- '-O',
- '/dev/null',
- '-q',
- 'http://localhost:5000/api/v0.1/status'
- ]
- proc = rems.run(
- args=run_cmd,
- check_status=False
- )
- if proc.exitstatus == 0:
- break
- time.sleep(5)
- if proc.exitstatus != 0:
- raise RuntimeError('Cannot contact ceph-rest-api')
- try:
- yield
-
- finally:
- """
- TO DO: destroy daemons started -- modify iter_daemons_of_role
- """
- teuthology.stop_daemons_of_type(ctx, 'restapi')
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Start up rest-api.
-
- To start on on all clients::
-
- tasks:
- - ceph:
- - rest-api:
-
- To only run on certain clients::
-
- tasks:
- - ceph:
- - rest-api: [client.0, client.3]
-
- or
-
- tasks:
- - ceph:
- - rest-api:
- client.0:
- client.3:
-
- The general flow of things here is:
- 1. Find clients on which rest-api is supposed to run (api_clients)
- 2. Generate keyring values
- 3. Start up ceph-rest-api daemons
- On cleanup:
- 4. Stop the daemons
- 5. Delete keyring value files.
- """
- api_clients = []
- remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
- log.info(remotes)
- if config == None:
- api_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- else:
- api_clients = config
- log.info(api_clients)
- testdir = teuthology.get_testdir(ctx)
- coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
- for rems, roles in remotes.iteritems():
- for whole_id_ in roles:
- if whole_id_ in api_clients:
- id_ = whole_id_[len('client.'):]
- keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
- id=id_)
- rems.run(
- args=[
- 'sudo',
- 'adjust-ulimits',
- 'ceph-coverage',
- coverage_dir,
- 'ceph-authtool',
- '--create-keyring',
- '--gen-key',
- '--name=client.rest{id}'.format(id=id_),
- '--set-uid=0',
- '--cap', 'mon', 'allow *',
- '--cap', 'osd', 'allow *',
- '--cap', 'mds', 'allow',
- keyring,
- run.Raw('&&'),
- 'sudo',
- 'chmod',
- '0644',
- keyring,
- ],
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- "echo",
- '[client.rest{id}]'.format(id=id_),
- run.Raw('>>'),
- "/etc/ceph/ceph.conf",
- run.Raw("'")
- ]
- )
- rems.run(
- args=[
- 'sudo',
- 'sh',
- '-c',
- run.Raw("'"),
- 'echo',
- 'restapi',
- 'keyring',
- '=',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- run.Raw('>>'),
- '/etc/ceph/ceph.conf',
- run.Raw("'"),
- ]
- )
- rems.run(
- args=[
- 'ceph',
- 'auth',
- 'import',
- '-i',
- '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
- ]
- )
- with contextutil.nested(
- lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
- yield
-
--- /dev/null
+"""
+Rest Api
+"""
+import logging
+import contextlib
+import time
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+from ..orchestra import run
+from teuthology.task.ceph import CephState
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def run_rest_api_daemon(ctx, api_clients):
+ """
+ Wrapper starts the rest api daemons
+ """
+ if not hasattr(ctx, 'daemons'):
+ ctx.daemons = CephState()
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('clients'):]
+ run_cmd = [
+ 'sudo',
+ 'daemon-helper',
+ 'kill',
+ 'ceph-rest-api',
+ '-n',
+ 'client.rest{id}'.format(id=id_), ]
+ cl_rest_id = 'client.rest{id}'.format(id=id_)
+ ctx.daemons.add_daemon(rems, 'restapi',
+ cl_rest_id,
+ args=run_cmd,
+ logger=log.getChild(cl_rest_id),
+ stdin=run.PIPE,
+ wait=False,
+ )
+ for i in range(1, 12):
+ log.info('testing for ceph-rest-api try {0}'.format(i))
+ run_cmd = [
+ 'wget',
+ '-O',
+ '/dev/null',
+ '-q',
+ 'http://localhost:5000/api/v0.1/status'
+ ]
+ proc = rems.run(
+ args=run_cmd,
+ check_status=False
+ )
+ if proc.exitstatus == 0:
+ break
+ time.sleep(5)
+ if proc.exitstatus != 0:
+ raise RuntimeError('Cannot contact ceph-rest-api')
+ try:
+ yield
+
+ finally:
+ """
+ TO DO: destroy daemons started -- modify iter_daemons_of_role
+ """
+ teuthology.stop_daemons_of_type(ctx, 'restapi')
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Start up rest-api.
+
+ To start on on all clients::
+
+ tasks:
+ - ceph:
+ - rest-api:
+
+ To only run on certain clients::
+
+ tasks:
+ - ceph:
+ - rest-api: [client.0, client.3]
+
+ or
+
+ tasks:
+ - ceph:
+ - rest-api:
+ client.0:
+ client.3:
+
+ The general flow of things here is:
+ 1. Find clients on which rest-api is supposed to run (api_clients)
+ 2. Generate keyring values
+ 3. Start up ceph-rest-api daemons
+ On cleanup:
+ 4. Stop the daemons
+ 5. Delete keyring value files.
+ """
+ api_clients = []
+ remotes = ctx.cluster.only(teuthology.is_type('client')).remotes
+ log.info(remotes)
+ if config == None:
+ api_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ else:
+ api_clients = config
+ log.info(api_clients)
+ testdir = teuthology.get_testdir(ctx)
+ coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir)
+ for rems, roles in remotes.iteritems():
+ for whole_id_ in roles:
+ if whole_id_ in api_clients:
+ id_ = whole_id_[len('client.'):]
+ keyring = '/etc/ceph/ceph.client.rest{id}.keyring'.format(
+ id=id_)
+ rems.run(
+ args=[
+ 'sudo',
+ 'adjust-ulimits',
+ 'ceph-coverage',
+ coverage_dir,
+ 'ceph-authtool',
+ '--create-keyring',
+ '--gen-key',
+ '--name=client.rest{id}'.format(id=id_),
+ '--set-uid=0',
+ '--cap', 'mon', 'allow *',
+ '--cap', 'osd', 'allow *',
+ '--cap', 'mds', 'allow',
+ keyring,
+ run.Raw('&&'),
+ 'sudo',
+ 'chmod',
+ '0644',
+ keyring,
+ ],
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ "echo",
+ '[client.rest{id}]'.format(id=id_),
+ run.Raw('>>'),
+ "/etc/ceph/ceph.conf",
+ run.Raw("'")
+ ]
+ )
+ rems.run(
+ args=[
+ 'sudo',
+ 'sh',
+ '-c',
+ run.Raw("'"),
+ 'echo',
+ 'restapi',
+ 'keyring',
+ '=',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ run.Raw('>>'),
+ '/etc/ceph/ceph.conf',
+ run.Raw("'"),
+ ]
+ )
+ rems.run(
+ args=[
+ 'ceph',
+ 'auth',
+ 'import',
+ '-i',
+ '/etc/ceph/ceph.client.rest{id}.keyring'.format(id=id_),
+ ]
+ )
+ with contextutil.nested(
+ lambda: run_rest_api_daemon(ctx=ctx, api_clients=api_clients),):
+ yield
+
+++ /dev/null
-"""
-rgw s3tests logging wrappers
-"""
-from cStringIO import StringIO
-from configobj import ConfigObj
-import contextlib
-import logging
-import s3tests
-
-from teuthology import misc as teuthology
-from teuthology import contextutil
-
-log = logging.getLogger(__name__)
-
-
-@contextlib.contextmanager
-def download(ctx, config):
- """
- Run s3tests download function
- """
- return s3tests.download(ctx, config)
-
-def _config_user(s3tests_conf, section, user):
- """
- Run s3tests user config function
- """
- return s3tests._config_user(s3tests_conf, section, user)
-
-@contextlib.contextmanager
-def create_users(ctx, config):
- """
- Run s3tests user create function
- """
- return s3tests.create_users(ctx, config)
-
-@contextlib.contextmanager
-def configure(ctx, config):
- """
- Run s3tests user configure function
- """
- return s3tests.configure(ctx, config)
-
-@contextlib.contextmanager
-def run_tests(ctx, config):
- """
- Run remote netcat tests
- """
- assert isinstance(config, dict)
- testdir = teuthology.get_testdir(ctx)
- for client, client_config in config.iteritems():
- client_config['extra_args'] = [
- 's3tests.functional.test_s3:test_bucket_list_return_data',
- ]
-# args = [
-# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
-# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
-# '-w',
-# '{tdir}/s3-tests'.format(tdir=testdir),
-# '-v',
-# 's3tests.functional.test_s3:test_bucket_list_return_data',
-# ]
-# if client_config is not None and 'extra_args' in client_config:
-# args.extend(client_config['extra_args'])
-#
-# ctx.cluster.only(client).run(
-# args=args,
-# )
-
- s3tests.run_tests(ctx, config)
-
- netcat_out = StringIO()
-
- for client, client_config in config.iteritems():
- ctx.cluster.only(client).run(
- args = [
- 'netcat',
- '-w', '5',
- '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
- ],
- stdout = netcat_out,
- )
-
- out = netcat_out.getvalue()
-
- assert len(out) > 100
-
- log.info('Received', out)
-
- yield
-
-
-@contextlib.contextmanager
-def task(ctx, config):
- """
- Run some s3-tests suite against rgw, verify opslog socket returns data
-
- Must restrict testing to a particular client::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests: [client.0]
-
- To pass extra arguments to nose (e.g. to run a certain test)::
-
- tasks:
- - ceph:
- - rgw: [client.0]
- - s3tests:
- client.0:
- extra_args: ['test_s3:test_object_acl_grand_public_read']
- client.1:
- extra_args: ['--exclude', 'test_100_continue']
- """
- assert config is None or isinstance(config, list) \
- or isinstance(config, dict), \
- "task s3tests only supports a list or dictionary for configuration"
- all_clients = ['client.{id}'.format(id=id_)
- for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
- if config is None:
- config = all_clients
- if isinstance(config, list):
- config = dict.fromkeys(config)
- clients = config.keys()
-
- overrides = ctx.config.get('overrides', {})
- # merge each client section, not the top level.
- for (client, cconf) in config.iteritems():
- teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
-
- log.debug('config is %s', config)
-
- s3tests_conf = {}
- for client in clients:
- s3tests_conf[client] = ConfigObj(
- indent_type='',
- infile={
- 'DEFAULT':
- {
- 'port' : 7280,
- 'is_secure' : 'no',
- },
- 'fixtures' : {},
- 's3 main' : {},
- 's3 alt' : {},
- }
- )
-
- with contextutil.nested(
- lambda: download(ctx=ctx, config=config),
- lambda: create_users(ctx=ctx, config=dict(
- clients=clients,
- s3tests_conf=s3tests_conf,
- )),
- lambda: configure(ctx=ctx, config=dict(
- clients=config,
- s3tests_conf=s3tests_conf,
- )),
- lambda: run_tests(ctx=ctx, config=config),
- ):
- yield
--- /dev/null
+"""
+rgw s3tests logging wrappers
+"""
+from cStringIO import StringIO
+from configobj import ConfigObj
+import contextlib
+import logging
+import s3tests
+
+from teuthology import misc as teuthology
+from teuthology import contextutil
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def download(ctx, config):
+ """
+ Run s3tests download function
+ """
+ return s3tests.download(ctx, config)
+
+def _config_user(s3tests_conf, section, user):
+ """
+ Run s3tests user config function
+ """
+ return s3tests._config_user(s3tests_conf, section, user)
+
+@contextlib.contextmanager
+def create_users(ctx, config):
+ """
+ Run s3tests user create function
+ """
+ return s3tests.create_users(ctx, config)
+
+@contextlib.contextmanager
+def configure(ctx, config):
+ """
+ Run s3tests user configure function
+ """
+ return s3tests.configure(ctx, config)
+
+@contextlib.contextmanager
+def run_tests(ctx, config):
+ """
+ Run remote netcat tests
+ """
+ assert isinstance(config, dict)
+ testdir = teuthology.get_testdir(ctx)
+ for client, client_config in config.iteritems():
+ client_config['extra_args'] = [
+ 's3tests.functional.test_s3:test_bucket_list_return_data',
+ ]
+# args = [
+# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client),
+# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir),
+# '-w',
+# '{tdir}/s3-tests'.format(tdir=testdir),
+# '-v',
+# 's3tests.functional.test_s3:test_bucket_list_return_data',
+# ]
+# if client_config is not None and 'extra_args' in client_config:
+# args.extend(client_config['extra_args'])
+#
+# ctx.cluster.only(client).run(
+# args=args,
+# )
+
+ s3tests.run_tests(ctx, config)
+
+ netcat_out = StringIO()
+
+ for client, client_config in config.iteritems():
+ ctx.cluster.only(client).run(
+ args = [
+ 'netcat',
+ '-w', '5',
+ '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir),
+ ],
+ stdout = netcat_out,
+ )
+
+ out = netcat_out.getvalue()
+
+ assert len(out) > 100
+
+ log.info('Received', out)
+
+ yield
+
+
+@contextlib.contextmanager
+def task(ctx, config):
+ """
+ Run some s3-tests suite against rgw, verify opslog socket returns data
+
+ Must restrict testing to a particular client::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests: [client.0]
+
+ To pass extra arguments to nose (e.g. to run a certain test)::
+
+ tasks:
+ - ceph:
+ - rgw: [client.0]
+ - s3tests:
+ client.0:
+ extra_args: ['test_s3:test_object_acl_grand_public_read']
+ client.1:
+ extra_args: ['--exclude', 'test_100_continue']
+ """
+ assert config is None or isinstance(config, list) \
+ or isinstance(config, dict), \
+ "task s3tests only supports a list or dictionary for configuration"
+ all_clients = ['client.{id}'.format(id=id_)
+ for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')]
+ if config is None:
+ config = all_clients
+ if isinstance(config, list):
+ config = dict.fromkeys(config)
+ clients = config.keys()
+
+ overrides = ctx.config.get('overrides', {})
+ # merge each client section, not the top level.
+ for (client, cconf) in config.iteritems():
+ teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {}))
+
+ log.debug('config is %s', config)
+
+ s3tests_conf = {}
+ for client in clients:
+ s3tests_conf[client] = ConfigObj(
+ indent_type='',
+ infile={
+ 'DEFAULT':
+ {
+ 'port' : 7280,
+ 'is_secure' : 'no',
+ },
+ 'fixtures' : {},
+ 's3 main' : {},
+ 's3 alt' : {},
+ }
+ )
+
+ with contextutil.nested(
+ lambda: download(ctx=ctx, config=config),
+ lambda: create_users(ctx=ctx, config=dict(
+ clients=clients,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: configure(ctx=ctx, config=dict(
+ clients=config,
+ s3tests_conf=s3tests_conf,
+ )),
+ lambda: run_tests(ctx=ctx, config=config),
+ ):
+ yield