From c6b5c01a168adcd70cfeba74fd9abcf1e3146ab4 Mon Sep 17 00:00:00 2001 From: Zack Cerza Date: Wed, 5 Feb 2014 20:48:05 -0600 Subject: [PATCH] Split out devstack-ceph configuration This is starting to get long, so create configure_devstack_and_ceph() Signed-off-by: Zack Cerza --- teuthology/task/devstack.py | 96 +++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/teuthology/task/devstack.py b/teuthology/task/devstack.py index 5912ca6e7f..74e82b6b37 100644 --- a/teuthology/task/devstack.py +++ b/teuthology/task/devstack.py @@ -13,8 +13,6 @@ http://ceph.com/docs/master/rbd/rbd-openstack/ log = logging.getLogger(__name__) DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git' -is_devstack_node = lambda role: role.startswith('devstack') -is_osd_node = lambda role: role.startswith('osd') @contextlib.contextmanager @@ -29,58 +27,16 @@ def task(ctx, config): config = {} if not isinstance(config, dict): raise TypeError("config must be a dict") - pool_size = config.get('pool_size', 128) # SETUP + is_devstack_node = lambda role: role.startswith('devstack') + is_osd_node = lambda role: role.startswith('osd') devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0] an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0] install_devstack(devstack_node) try: # OTHER STUFF - - # Create pools on Ceph cluster - for pool_name in ['volumes', 'images', 'backups']: - args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size] - an_osd_node.run(args=args) - - # Copy ceph.conf to OpenStack node - misc.copy_file(an_osd_node, '/etc/ceph/ceph.conf', devstack_node) - # This is where we would install python-ceph and ceph-common but it - # appears the ceph task will do that for us. - ceph_auth_cmds = [ - ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa - ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', - 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa - ] - for cmd in ceph_auth_cmds: - an_osd_node.run(args=cmd) - - # Copy ceph auth keys to devstack node - def copy_key(from_remote, key_name, to_remote, dest_path, owner): - key_stringio = StringIO() - from_remote.run( - args=['ceph', 'auth', 'get-or-create', key_name], - stdout=key_stringio) - misc.sudo_write_file(to_remote, dest_path, - key_stringio, owner=owner) - keys = [ - dict(name='client.glance', - path='/etc/ceph/ceph.client.glance.keyring', - owner='glance:glance'), - dict(name='client.cinder', - path='/etc/ceph/ceph.client.cinder.keyring', - owner='cinder:cinder'), - dict(name='client.cinder-backup', - path='/etc/ceph/ceph.client.cinder-backup.keyring', - owner='cinder:cinder'), - ] - for key_dict in keys: - copy_key(an_osd_node, key_dict['name'], devstack_node, - key_dict['path'], key_dict['owner']) - + configure_devstack_and_ceph(ctx, devstack_node, an_osd_node) yield #except Exception as e: # FAIL @@ -98,3 +54,49 @@ def install_devstack(devstack_node): log.info("Installing devstack...") args = ['cd', 'devstack', run.Raw('&&'), './stack.sh'] devstack_node.run(args=args) + + +def configure_devstack_and_ceph(config, devstack_node, ceph_node): + # Create pools on Ceph cluster + pool_size = config.get('pool_size', 128) + for pool_name in ['volumes', 'images', 'backups']: + args = ['ceph', 'osd', 'pool', 'create', pool_name, pool_size] + ceph_node.run(args=args) + + # Copy ceph.conf to OpenStack node + misc.copy_file(ceph_node, '/etc/ceph/ceph.conf', devstack_node) + # This is where we would install python-ceph and ceph-common but it + # appears the ceph task will do that for us. + ceph_auth_cmds = [ + ['ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa + ['ceph', 'auth', 'get-or-create', 'client.glance', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa + ['ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa + ] + for cmd in ceph_auth_cmds: + ceph_node.run(args=cmd) + + # Copy ceph auth keys to devstack node + def copy_key(from_remote, key_name, to_remote, dest_path, owner): + key_stringio = StringIO() + from_remote.run( + args=['ceph', 'auth', 'get-or-create', key_name], + stdout=key_stringio) + misc.sudo_write_file(to_remote, dest_path, + key_stringio, owner=owner) + keys = [ + dict(name='client.glance', + path='/etc/ceph/ceph.client.glance.keyring', + owner='glance:glance'), + dict(name='client.cinder', + path='/etc/ceph/ceph.client.cinder.keyring', + owner='cinder:cinder'), + dict(name='client.cinder-backup', + path='/etc/ceph/ceph.client.cinder-backup.keyring', + owner='cinder:cinder'), + ] + for key_dict in keys: + copy_key(ceph_node, key_dict['name'], devstack_node, + key_dict['path'], key_dict['owner']) -- 2.39.5