siteconfig.rst
detailed_test_config.rst
openstack_backend.rst
+ libcloud_backend.rst
downburst_vms.rst
INSTALL.rst
LAB_SETUP.rst
--- /dev/null
+.. _libcloud-backend:
+
+LibCloud backend
+================
+This is an *experimental* provisioning backend that eventually intends to support several libcloud drivers. At this time only the OpenStack driver is supported.
+
+Prerequisites
+-------------
+* An account with an OpenStack provider that supports Nova and Cinder
+* A DNS server supporting `RFC 2136 <https://tools.ietf.org/html/rfc2136>`_. We use `bind <https://www.isc.org/downloads/bind/>`_ and `this ansible role <https://github.com/ceph/ceph-cm-ansible/blob/master/roles/nameserver/README.rst>`_ to help configure ours.
+* An `nsupdate-web <https://github.com/zmc/nsupdate-web>`_ instance configured to update DNS records. We use `an ansible role <https://github.com/ceph/ceph-cm-ansible/blob/master/roles/nsupdate_web/README.rst>`_ for this as well.
+* Configuration in `teuthology.yaml` for this backend itself (see :ref:`libcloud_config`) and `nsupdate-web`
+* You will also need to choose a maximum number of nodes to be running at once, and create records in your paddles database for each one - making sure to set `is_vm` to `True` for each.
+
+.. _libcloud_config:
+
+Configuration
+-------------
+An example configuration using OVH as an OpenStack provider::
+
+ libcloud:
+ providers:
+ ovh: # This string is the 'machine type' value you will use when locking these nodes
+ driver: openstack
+ driver_args: # driver args are passed directly to the libcloud driver
+ username: 'my_ovh_username'
+ password: 'my_ovh_password'
+ ex_force_auth_url: 'https://auth.cloud.ovh.net/v2.0/tokens'
+ ex_force_auth_version: '2.0_password'
+ ex_tenant_name: 'my_tenant_name'
+ ex_force_service_region: 'my_region'
+
+Why nsupdate-web?
+-----------------
+While we could have supported directly calling `nsupdate <https://en.wikipedia.org/wiki/Nsupdate>`_, we chose not to. There are a few reasons for this:
+
+* To avoid piling on yet another feature of teuthology that could be left up to a separate service
+* To avoid teuthology users having to request, obtain and safeguard the private key that nsupdate requires to function
+* Because we use one subdomain for all of Sepia's test nodes, we had to enable dynamic DNS for that whole zone (this is a limitation of bind). However, we do not want users to be able to push DNS updates for the entire zone. Instead, we gave nsupdate-web the ability to accept or reject requests based on whether the hostname matches a configurable regular expression. The private key itself is not shared with non-admin users.
+
+Bugs
+----
+At this time, only OVH has been tested as a provider. PRs are welcome to support more!
use_conserver: true
conserver_master: conserver.front.sepia.ceph.com
conserver_port: 3109
+
+ # Settings for [nsupdate-web](https://github.com/zmc/nsupdate-web)
+ # Used by the [libcloud](https://libcloud.apache.org/) backend
+ nsupdate_url: http://nsupdate.front.sepia.ceph.com/update
'libvirt-python',
'python-dateutil',
'manhole',
+ 'apache-libcloud',
+ # For apache-libcloud when using python < 2.7.9
+ 'backports.ssl_match_hostname',
],
'lab_domain': 'front.sepia.ceph.com',
'lock_server': 'http://paddles.front.sepia.ceph.com/',
'max_job_time': 259200, # 3 days
+ 'nsupdate_url': 'http://nsupdate.front.sepia.ceph.com/update',
'results_server': 'http://paddles.front.sepia.ceph.com/',
'results_ui_server': 'http://pulpito.ceph.com/',
'results_sending_email': 'teuthology',
--- /dev/null
+import logging
+
+from teuthology.config import config
+
+import openstack
+
+log = logging.getLogger(__name__)
+
+
+supported_drivers = dict(
+ openstack=dict(
+ provider=openstack.OpenStackProvider,
+ provisioner=openstack.OpenStackProvisioner,
+ ),
+)
+
+
+def get_types():
+ types = list()
+ if 'libcloud' in config and 'providers' in config.libcloud:
+ types = config.libcloud['providers'].keys()
+ return types
+
+
+def get_provider_conf(node_type):
+ all_providers = config.libcloud['providers']
+ provider_conf = all_providers[node_type]
+ return provider_conf
+
+
+def get_provider(node_type):
+ provider_conf = get_provider_conf(node_type)
+ driver = provider_conf['driver']
+ provider_cls = supported_drivers[driver]['provider']
+ return provider_cls(name=node_type, conf=provider_conf)
+
+
+def get_provisioner(node_type, name, os_type, os_version, conf=None):
+ provider = get_provider(node_type)
+ provider_conf = get_provider_conf(node_type)
+ driver = provider_conf['driver']
+ provisioner_cls = supported_drivers[driver]['provisioner']
+ return provisioner_cls(
+ provider=provider,
+ name=name,
+ os_type=os_type,
+ os_version=os_version,
+ conf=conf,
+ )
--- /dev/null
+import logging
+from copy import deepcopy
+
+from libcloud.compute.providers import get_driver
+from libcloud.compute.types import Provider as lc_Provider
+
+import teuthology.orchestra.remote
+import teuthology.provision.cloud
+from teuthology.misc import canonicalize_hostname, decanonicalize_hostname
+
+
+log = logging.getLogger(__name__)
+
+
+class Provider(object):
+ _driver_posargs = list()
+
+ def __init__(self, name, conf):
+ self.name = name
+ self.conf = conf
+ self.driver_name = self.conf['driver']
+
+ @property
+ def driver(self):
+ driver_type = get_driver(
+ getattr(lc_Provider, self.driver_name.upper())
+ )
+ driver_args = deepcopy(self.conf['driver_args'])
+ driver = driver_type(
+ *[driver_args.pop(arg_name) for arg_name in self._driver_posargs],
+ **driver_args
+ )
+ return driver
+
+
+class Provisioner(object):
+ def __init__(
+ self, provider, name, os_type=None, os_version=None,
+ conf=None, user='ubuntu',
+ ):
+ if isinstance(provider, basestring):
+ provider = teuthology.provision.cloud.get_provider(provider)
+ self.provider = provider
+ self.name = decanonicalize_hostname(name)
+ self.hostname = canonicalize_hostname(name, user=None)
+ self.os_type = os_type
+ self.os_version = os_version
+ self.user = user
+
+ def create(self):
+ try:
+ return self._create()
+ except Exception:
+ log.exception("Failed to create %s", self.name)
+ return False
+
+ def _create(self):
+ pass
+
+ def destroy(self):
+ try:
+ return self._destroy()
+ except Exception:
+ log.exception("Failed to destroy %s", self.name)
+ return False
+
+ def _destroy(self):
+ pass
+
+ @property
+ def remote(self):
+ if not hasattr(self, '_remote'):
+ self._remote = teuthology.orchestra.remote.Remote(
+ "%s@%s" % (self.user, self.name),
+ )
+ return self._remote
+
+ def __repr__(self):
+ template = "%s(provider='%s', name='%s', os_type='%s', " \
+ "os_version='%s')"
+ return template % (
+ self.__class__.__name__,
+ self.provider.name,
+ self.name,
+ self.os_type,
+ self.os_version,
+ )
--- /dev/null
+import logging
+import requests
+import socket
+import time
+import urllib
+import yaml
+
+from copy import deepcopy
+
+from paramiko import AuthenticationException
+from paramiko.ssh_exception import NoValidConnectionsError
+
+from teuthology.config import config
+from teuthology.contextutil import safe_while
+
+import base
+import util
+from teuthology.provision.cloud.base import Provider
+
+
+log = logging.getLogger(__name__)
+
+
+class OpenStackProvider(Provider):
+ _driver_posargs = ['username', 'password']
+
+ @property
+ def images(self):
+ if not hasattr(self, '_images'):
+ self._images = self.driver.list_images()
+ return self._images
+
+ @property
+ def sizes(self):
+ if not hasattr(self, '_sizes'):
+ self._sizes = self.driver.list_sizes()
+ return self._sizes
+
+ @property
+ def networks(self):
+ if not hasattr(self, '_networks'):
+ try:
+ self._networks = self.driver.ex_list_networks()
+ except AttributeError:
+ log.warn("Unable to list networks for %s", self.driver)
+ self._networks = list()
+ return self._networks
+
+ @property
+ def security_groups(self):
+ if not hasattr(self, '_security_groups'):
+ try:
+ self._security_groups = self.driver.ex_list_security_groups()
+ except AttributeError:
+ log.warn("Unable to list security groups for %s", self.driver)
+ self._security_groups = list()
+ return self._security_groups
+
+
+class OpenStackProvisioner(base.Provisioner):
+ _sentinel_path = '/.teuth_provisioned'
+
+ defaults = dict(
+ openstack=dict(
+ machine=dict(
+ disk=20,
+ ram=8000,
+ cpus=1,
+ ),
+ volumes=dict(
+ count=0,
+ size=0,
+ ),
+ )
+ )
+
+ def __init__(
+ self,
+ provider, name, os_type=None, os_version=None,
+ conf=None,
+ user='ubuntu',
+ ):
+ super(OpenStackProvisioner, self).__init__(
+ provider, name, os_type, os_version, conf=conf, user=user,
+ )
+ self._read_conf(conf)
+
+ def _read_conf(self, conf=None):
+ """
+ Looks through the following in order:
+
+ the 'conf' arg
+ conf[DRIVER_NAME]
+ teuthology.config.config.DRIVER_NAME
+ self.defaults[DRIVER_NAME]
+
+ It will use the highest value for each of the following: disk, RAM,
+ cpu, volume size and count
+
+ The resulting configuration becomes the new instance configuration
+ and is stored as self.conf
+
+ :param conf: The instance configuration
+
+ :return: None
+ """
+ driver_name = self.provider.driver.name.lower()
+ full_conf = conf or dict()
+ driver_conf = full_conf.get(driver_name, dict())
+ legacy_conf = getattr(config, driver_name) or dict()
+ defaults = self.defaults.get(driver_name, dict())
+ confs = list()
+ for obj in (full_conf, driver_conf, legacy_conf, defaults):
+ obj = deepcopy(obj)
+ if isinstance(obj, list):
+ confs.extend(obj)
+ else:
+ confs.append(obj)
+ self.conf = util.combine_dicts(confs, lambda x, y: x > y)
+
+ def _create(self):
+ log.debug("Creating node: %s", self)
+ log.debug("Selected size: %s", self.size)
+ log.debug("Selected image: %s", self.image)
+ create_args = dict(
+ name=self.name,
+ size=self.size,
+ image=self.image,
+ ex_userdata=self.userdata,
+ )
+ networks = self.provider.networks
+ if networks:
+ create_args['networks'] = networks
+ security_groups = self.security_groups
+ if security_groups:
+ create_args['ex_security_groups'] = security_groups
+ self._node = self.provider.driver.create_node(
+ **create_args
+ )
+ log.debug("Created node: %s", self.node)
+ results = self.provider.driver.wait_until_running(
+ nodes=[self.node],
+ )
+ self._node, self.ips = results[0]
+ log.debug("Node started: %s", self.node)
+ self._create_volumes()
+ self._update_dns()
+ # Give cloud-init a few seconds to bring up the network, start sshd,
+ # and install the public key
+ time.sleep(20)
+ self._wait_for_ready()
+ return self.node
+
+ def _create_volumes(self):
+ vol_count = self.conf['volumes']['count']
+ vol_size = self.conf['volumes']['size']
+ name_templ = "%s_%0{0}d".format(len(str(vol_count - 1)))
+ vol_names = [name_templ % (self.name, i)
+ for i in range(vol_count)]
+ try:
+ for name in vol_names:
+ volume = self.provider.driver.create_volume(
+ vol_size,
+ name,
+ )
+ log.info("Created volume %s", volume)
+ self.provider.driver.attach_volume(
+ self.node,
+ volume,
+ device=None,
+ )
+ except Exception:
+ log.exception("Failed to create or attach volume!")
+ self._destroy_volumes()
+ return False
+ return True
+
+ def _destroy_volumes(self):
+ all_volumes = self.provider.driver.list_volumes()
+ our_volumes = [vol for vol in all_volumes
+ if vol.name.startswith("%s_" % self.name)]
+ for vol in our_volumes:
+ try:
+ self.provider.driver.detach_volume(vol)
+ except Exception:
+ log.exception("Could not detach volume %s", vol)
+ try:
+ self.provider.driver.destroy_volume(vol)
+ except Exception:
+ log.exception("Could not destroy volume %s", vol)
+
+ def _update_dns(self):
+ query = urllib.urlencode(dict(
+ name=self.name,
+ ip=self.ips[0],
+ ))
+ nsupdate_url = "%s?%s" % (
+ config.nsupdate_url,
+ query,
+ )
+ resp = requests.get(nsupdate_url)
+ resp.raise_for_status()
+
+ def _wait_for_ready(self):
+ with safe_while(sleep=6, tries=20) as proceed:
+ while proceed():
+ try:
+ self.remote.connect()
+ break
+ except (
+ socket.error,
+ NoValidConnectionsError,
+ AuthenticationException,
+ ):
+ pass
+ cmd = "while [ ! -e '%s' ]; do sleep 5; done" % self._sentinel_path
+ self.remote.run(args=cmd, timeout=600)
+ log.info("Node is ready: %s", self.node)
+
+ @property
+ def image(self):
+ os_specs = [
+ '{os_type} {os_version}',
+ '{os_type}-{os_version}',
+ ]
+ for spec in os_specs:
+ matches = [image for image in self.provider.images
+ if spec.format(
+ os_type=self.os_type,
+ os_version=self.os_version,
+ ) in image.name.lower()]
+ if matches:
+ break
+ if not matches:
+ raise RuntimeError(
+ "Could not find an image for %s %s",
+ self.os_type,
+ self.os_version,
+ )
+ return matches[0]
+
+ @property
+ def size(self):
+ ram = self.conf['machine']['ram']
+ disk = self.conf['machine']['disk']
+ cpu = self.conf['machine']['cpus']
+
+ def good_size(size):
+ if (size.ram < ram or size.disk < disk or size.vcpus < cpu):
+ return False
+ return True
+
+ all_sizes = self.provider.sizes
+ good_sizes = filter(good_size, all_sizes)
+ smallest_match = sorted(
+ good_sizes,
+ key=lambda s: (s.ram, s.disk, s.vcpus)
+ )[0]
+ return smallest_match
+
+ @property
+ def security_groups(self):
+ group_names = self.provider.conf.get('security_groups')
+ if group_names is None:
+ return
+ result = list()
+ groups = self.provider.security_groups
+ for name in group_names:
+ matches = [group for group in groups if group.name == name]
+ if not matches:
+ msg = "No security groups found with name '%s'"
+ elif len(matches) > 1:
+ msg = "More than one security group found with name '%s'"
+ elif len(matches) == 1:
+ result.append(matches[0])
+ continue
+ raise RuntimeError(msg % name)
+ return result
+
+ @property
+ def userdata(self):
+ base_config = dict(
+ user=self.user,
+ manage_etc_hosts=True,
+ hostname=self.hostname,
+ packages=[
+ 'git',
+ 'wget',
+ 'python',
+ ],
+ runcmd=[
+ # Remove the user's password so that console logins are
+ # possible
+ ['passwd', '-d', self.user],
+ ['touch', self._sentinel_path]
+ ],
+ )
+ ssh_pubkey = util.get_user_ssh_pubkey()
+ if ssh_pubkey:
+ authorized_keys = base_config.get('ssh_authorized_keys', list())
+ authorized_keys.append(ssh_pubkey)
+ base_config['ssh_authorized_keys'] = authorized_keys
+ user_str = "#cloud-config\n" + yaml.safe_dump(base_config)
+ return user_str
+
+ @property
+ def node(self):
+ if not hasattr(self, '_node'):
+ nodes = self.provider.driver.list_nodes()
+ for node in nodes:
+ matches = [node for node in nodes if node.name == self.name]
+ msg = "Unknown error locating %s"
+ if not matches:
+ msg = "No nodes found with name '%s'" % self.name
+ log.warn(msg)
+ return
+ elif len(matches) > 1:
+ msg = "More than one node found with name '%s'"
+ elif len(matches) == 1:
+ self._node = matches[0]
+ break
+ raise RuntimeError(msg % self.name)
+ return self._node
+
+ def _destroy(self):
+ if not self.node:
+ return True
+ log.info("Destroying node: %s", self.node)
+ self._destroy_volumes()
+ return self.node.destroy()
--- /dev/null
+from libcloud.compute.providers import get_driver
+from mock import patch
+
+from teuthology.config import config
+from teuthology.provision import cloud
+
+from test_cloud_init import dummy_config, dummy_drivers
+
+
+class TestBase(object):
+ def setup(self):
+ config.load()
+ config.libcloud = dummy_config
+ cloud.supported_drivers['dummy'] = dummy_drivers
+
+ def teardown(self):
+ del cloud.supported_drivers['dummy']
+
+
+class TestProvider(TestBase):
+ def test_init(self):
+ obj = cloud.get_provider('my_provider')
+ assert obj.name == 'my_provider'
+ assert obj.driver_name == 'dummy'
+ assert obj.conf == dummy_config['providers']['my_provider']
+
+ def test_driver(self):
+ obj = cloud.get_provider('my_provider')
+ assert isinstance(obj.driver, get_driver('dummy'))
+
+
+class TestProvisioner(TestBase):
+ klass = cloud.base.Provisioner
+
+ def get_obj(
+ self, name='node_name', os_type='ubuntu', os_version='ubuntu'):
+ return cloud.get_provisioner(
+ 'my_provider',
+ 'node_name',
+ 'ubuntu',
+ '16.04',
+ )
+
+ def test_init_provider_string(self):
+ obj = self.klass('my_provider', 'ubuntu', '16.04')
+ assert obj.provider.name == 'my_provider'
+
+ def test_create(self):
+ obj = self.get_obj()
+ with patch.object(
+ self.klass,
+ '_create',
+ ) as m_create:
+ for val in [True, False]:
+ m_create.return_value = val
+ res = obj.create()
+ assert res is val
+ m_create.assert_called_once_with()
+ m_create.reset_mock()
+ m_create.side_effect = RuntimeError
+ res = obj.create()
+ assert res is False
+ assert obj.create() is None
+
+ def test_destroy(self):
+ obj = self.get_obj()
+ with patch.object(
+ self.klass,
+ '_destroy',
+ ) as m_destroy:
+ for val in [True, False]:
+ m_destroy.return_value = val
+ res = obj.destroy()
+ assert res is val
+ m_destroy.assert_called_once_with()
+ m_destroy.reset_mock()
+ m_destroy.side_effect = RuntimeError
+ res = obj.destroy()
+ assert res is False
+ assert obj.destroy() is None
+
+ def test_remote(self):
+ obj = self.get_obj()
+ assert obj.remote.shortname == 'node_name'
+
+ def test_repr(self):
+ obj = self.get_obj()
+ assert repr(obj) == \
+ "Provisioner(provider='my_provider', name='node_name', os_type='ubuntu', os_version='16.04')" # noqa
+
--- /dev/null
+from teuthology.config import config
+from teuthology.provision import cloud
+
+dummy_config = dict(
+ providers=dict(
+ my_provider=dict(
+ driver='dummy',
+ driver_args=dict(
+ creds=0,
+ ),
+ conf_1='1',
+ conf_2='2',
+ )
+ )
+)
+
+
+class DummyProvider(cloud.base.Provider):
+ # For libcloud's dummy driver
+ _driver_posargs = ['creds']
+
+dummy_drivers = dict(
+ provider=DummyProvider,
+ provisioner=cloud.base.Provisioner,
+)
+
+
+class TestInit(object):
+ def setup(self):
+ config.load()
+ config.libcloud = dummy_config
+ cloud.supported_drivers['dummy'] = dummy_drivers
+
+ def teardown(self):
+ del cloud.supported_drivers['dummy']
+
+ def test_get_types(self):
+ assert cloud.get_types() == ['my_provider']
+
+ def test_get_provider_conf(self):
+ expected = dummy_config['providers']['my_provider']
+ assert cloud.get_provider_conf('my_provider') == expected
+
+ def test_get_provider(self):
+ obj = cloud.get_provider('my_provider')
+ assert obj.name == 'my_provider'
+ assert obj.driver_name == 'dummy'
+
+ def test_get_provisioner(self):
+ obj = cloud.get_provisioner(
+ 'my_provider',
+ 'node_name',
+ 'ubuntu',
+ '16.04',
+ dict(foo='bar'),
+ )
+ assert obj.provider.name == 'my_provider'
+ assert obj.name == 'node_name'
+ assert obj.os_type == 'ubuntu'
+ assert obj.os_version == '16.04'
--- /dev/null
+from mock import patch, MagicMock
+from pytest import mark
+
+from teuthology.provision.cloud import util
+
+
+@mark.parametrize(
+ 'path, exists',
+ [
+ ('/fake/path', True),
+ ('/fake/path', False),
+ ]
+)
+def test_get_user_ssh_pubkey(path, exists):
+ with patch('os.path.exists') as m_exists:
+ m_exists.return_value = exists
+ with patch('teuthology.provision.cloud.util.file') as m_file:
+ m_file.return_value = MagicMock(spec=file)
+ util.get_user_ssh_pubkey(path)
+ if exists:
+ assert m_file.called_once_with(path, 'rb')
+
+
+@mark.parametrize(
+ 'input_, func, expected',
+ [
+ [
+ [
+ dict(sub0=dict(key0=0, key1=0)),
+ dict(sub0=dict(key1=1, key2=2)),
+ ],
+ lambda x, y: x > y,
+ dict(sub0=dict(key0=0, key1=1, key2=2))
+ ],
+ [
+ [
+ dict(),
+ dict(sub0=dict(key1=1, key2=2)),
+ ],
+ lambda x, y: x > y,
+ dict(sub0=dict(key1=1, key2=2))
+ ],
+ [
+ [
+ dict(sub0=dict(key1=1, key2=2)),
+ dict(),
+ ],
+ lambda x, y: x > y,
+ dict(sub0=dict(key1=1, key2=2))
+ ],
+ [
+ [
+ dict(sub0=dict(key0=0, key1=0, key2=0)),
+ dict(sub0=dict(key0=1, key2=3), sub1=dict(key0=0)),
+ dict(sub0=dict(key0=3, key1=2, key2=1)),
+ dict(sub0=dict(key1=3),
+ sub1=dict(key0=3, key1=0)),
+ ],
+ lambda x, y: x > y,
+ dict(sub0=dict(key0=3, key1=3, key2=3),
+ sub1=dict(key0=3, key1=0))
+ ],
+ ]
+)
+def test_combine_dicts(input_, func, expected):
+ assert util.combine_dicts(input_, func) == expected
--- /dev/null
+import socket
+import urlparse
+import yaml
+
+from copy import deepcopy
+from libcloud.compute.providers import get_driver
+from mock import patch, Mock, DEFAULT
+from pytest import raises, mark
+
+from teuthology.config import config
+from teuthology.exceptions import MaxWhileTries
+from teuthology.provision import cloud
+
+test_config = dict(
+ providers=dict(
+ my_provider=dict(
+ driver='openstack',
+ driver_args=dict(
+ username='user',
+ password='password',
+ ex_force_auth_url='http://127.0.0.1:9999/v2.0/tokens',
+ ),
+ )
+ )
+)
+
+
+def get_fake_obj(mock_args=None, attributes=None):
+ if mock_args is None:
+ mock_args = dict()
+ if attributes is None:
+ attributes = dict()
+ obj = Mock(**mock_args)
+ for name, value in attributes.items():
+ setattr(obj, name, value)
+ return obj
+
+
+class TestOpenStackBase(object):
+ def setup(self):
+ config.load()
+ config.libcloud = deepcopy(test_config)
+ self.start_patchers()
+
+ def start_patchers(self):
+ self.patchers = dict()
+ self.patchers['m_list_images'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.list_images'
+ )
+ self.patchers['m_list_sizes'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.list_sizes'
+ )
+ self.patchers['m_ex_list_networks'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStack_1_1_NodeDriver.ex_list_networks'
+ )
+ self.patchers['m_ex_list_security_groups'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStack_1_1_NodeDriver.ex_list_security_groups'
+ )
+ self.patchers['m_get_user_ssh_pubkey'] = patch(
+ 'teuthology.provision.cloud.util.get_user_ssh_pubkey'
+ )
+ self.patchers['m_list_nodes'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.list_nodes'
+ )
+ self.patchers['m_create_node'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStack_1_1_NodeDriver.create_node'
+ )
+ self.patchers['m_wait_until_running'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.wait_until_running'
+ )
+ self.patchers['m_create_volume'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.create_volume'
+ )
+ self.patchers['m_attach_volume'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.attach_volume'
+ )
+ self.patchers['m_detach_volume'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.detach_volume'
+ )
+ self.patchers['m_list_volumes'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.list_volumes'
+ )
+ self.patchers['m_destroy_volume'] = patch(
+ 'libcloud.compute.drivers.openstack'
+ '.OpenStackNodeDriver.destroy_volume'
+ )
+ self.patchers['m_sleep'] = patch(
+ 'time.sleep'
+ )
+ self.patchers['m_get'] = patch(
+ 'requests.get'
+ )
+ self.mocks = dict()
+ for name, patcher in self.patchers.items():
+ self.mocks[name] = patcher.start()
+
+ def teardown(self):
+ for patcher in self.patchers.values():
+ patcher.stop()
+
+
+class TestOpenStackProvider(TestOpenStackBase):
+ klass = cloud.openstack.OpenStackProvider
+
+ def test_init(self):
+ obj = cloud.get_provider('my_provider')
+ assert obj.name == 'my_provider'
+ assert obj.driver_name == 'openstack'
+ assert obj.conf == test_config['providers']['my_provider']
+
+ def test_driver(self):
+ obj = cloud.get_provider('my_provider')
+ assert isinstance(obj.driver, get_driver('openstack'))
+
+ def test_images(self):
+ obj = cloud.get_provider('my_provider')
+ self.mocks['m_list_images'].return_value = ['image0', 'image1']
+ assert not hasattr(obj, '_images')
+ assert obj.images == ['image0', 'image1']
+ assert hasattr(obj, '_images')
+
+ def test_sizes(self):
+ obj = cloud.get_provider('my_provider')
+ self.mocks['m_list_sizes'].return_value = ['size0', 'size1']
+ assert not hasattr(obj, '_sizes')
+ assert obj.sizes == ['size0', 'size1']
+ assert hasattr(obj, '_sizes')
+
+ def test_networks(self):
+ obj = cloud.get_provider('my_provider')
+ self.mocks['m_ex_list_networks'].return_value = ['net0', 'net1']
+ assert not hasattr(obj, '_networks')
+ assert obj.networks == ['net0', 'net1']
+ assert hasattr(obj, '_networks')
+ self.mocks['m_ex_list_networks'].side_effect = AttributeError
+ obj = cloud.get_provider('my_provider')
+ assert not hasattr(obj, '_networks')
+ assert obj.networks == list()
+ assert hasattr(obj, '_networks')
+
+ def test_security_groups(self):
+ obj = cloud.get_provider('my_provider')
+ self.mocks['m_ex_list_security_groups'].return_value = ['sg0', 'sg1']
+ assert not hasattr(obj, '_security_groups')
+ assert obj.security_groups == ['sg0', 'sg1']
+ assert hasattr(obj, '_security_groups')
+ self.mocks['m_ex_list_security_groups'].side_effect = AttributeError
+ obj = cloud.get_provider('my_provider')
+ assert not hasattr(obj, '_security_groups')
+ assert obj.security_groups == list()
+ assert hasattr(obj, '_security_groups')
+
+
+class TestOpenStackProvisioner(TestOpenStackBase):
+ klass = cloud.openstack.OpenStackProvisioner
+
+ def get_obj(
+ self, name='node_name', os_type='ubuntu',
+ os_version='16.04', conf=None):
+ return cloud.get_provisioner(
+ node_type='my_provider',
+ name=name,
+ os_type=os_type,
+ os_version=os_version,
+ conf=conf,
+ )
+
+ def test_init(self):
+ with patch.object(
+ self.klass,
+ '_read_conf',
+ ) as m_read_conf:
+ self.get_obj()
+ assert len(m_read_conf.call_args_list) == 1
+
+ @mark.parametrize(
+ 'input_conf',
+ [
+ dict(machine=dict(
+ disk=42,
+ ram=9001,
+ cpus=3,
+ )),
+ dict(volumes=dict(
+ count=3,
+ size=100,
+ )),
+ dict(),
+ dict(
+ machine=dict(
+ disk=1,
+ ram=2,
+ cpus=3,
+ ),
+ volumes=dict(
+ count=4,
+ size=5,
+ )
+ ),
+ dict(
+ machine=dict(
+ disk=100,
+ ),
+ ),
+ ]
+ )
+ def test_read_conf(self, input_conf):
+ obj = self.get_obj(conf=input_conf)
+ for topic in ['machine', 'volumes']:
+ combined = cloud.util.combine_dicts(
+ [input_conf, config.openstack],
+ lambda x, y: x > y,
+ )
+ assert obj.conf[topic] == combined[topic]
+
+ @mark.parametrize(
+ 'input_conf, expected_machine, expected_vols',
+ [
+ [
+ dict(openstack=[
+ dict(machine=dict(disk=64, ram=10000, cpus=3)),
+ dict(volumes=dict(count=1, size=1)),
+ ]),
+ dict(disk=64, ram=10000, cpus=3),
+ dict(count=1, size=1),
+ ],
+ [
+ dict(openstack=[
+ dict(machine=dict(cpus=3)),
+ dict(machine=dict(disk=1, ram=9000)),
+ dict(machine=dict(disk=50, ram=2, cpus=1)),
+ dict(machine=dict()),
+ dict(volumes=dict()),
+ dict(volumes=dict(count=0, size=0)),
+ dict(volumes=dict(count=1, size=0)),
+ dict(volumes=dict(size=1)),
+ ]),
+ dict(disk=50, ram=9000, cpus=3),
+ dict(count=1, size=1),
+ ],
+ [
+ dict(openstack=[
+ dict(volumes=dict(count=3, size=30)),
+ dict(volumes=dict(size=50)),
+ ]),
+ None,
+ dict(count=3, size=50),
+ ],
+ [
+ dict(openstack=[
+ dict(machine=dict(disk=100)),
+ dict(volumes=dict(count=3, size=30)),
+ ]),
+ dict(disk=100, ram=8000, cpus=1),
+ dict(count=3, size=30),
+ ],
+ ]
+ )
+ def test_read_conf_legacy(
+ self, input_conf, expected_machine, expected_vols):
+ obj = self.get_obj(conf=input_conf)
+ if expected_machine is not None:
+ assert obj.conf['machine'] == expected_machine
+ else:
+ assert obj.conf['machine'] == config.openstack['machine']
+ if expected_vols is not None:
+ assert obj.conf['volumes'] == expected_vols
+
+ @mark.parametrize(
+ "os_type, os_version, should_find",
+ [
+ ('centos', '7', True),
+ ('BeOS', '42', False),
+ ]
+ )
+ def test_image(self, os_type, os_version, should_find):
+ image_attrs = [
+ dict(name='ubuntu-14.04'),
+ dict(name='ubuntu-16.04'),
+ dict(name='centos-7.0'),
+ ]
+ fake_images = list()
+ for item in image_attrs:
+ fake_images.append(
+ get_fake_obj(attributes=item)
+ )
+ obj = self.get_obj(os_type=os_type, os_version=os_version)
+ self.mocks['m_list_images'].return_value = fake_images
+ if should_find:
+ assert obj.os_version in obj.image.name
+ assert obj.image in fake_images
+ else:
+ with raises(RuntimeError):
+ obj.image
+
+ @mark.parametrize(
+ "input_attrs, func_or_exc",
+ [
+ (dict(ram=2**16),
+ lambda s: s.ram == 2**16),
+ (dict(disk=9999),
+ lambda s: s.disk == 9999),
+ (dict(cpus=99),
+ lambda s: s.vcpus == 99),
+ (dict(ram=2**16, disk=9999, cpus=99),
+ IndexError),
+ ]
+ )
+ def test_size(self, input_attrs, func_or_exc):
+ size_attrs = [
+ dict(ram=8000, disk=9999, vcpus=99),
+ dict(ram=2**16, disk=20, vcpus=99),
+ dict(ram=2**16, disk=9999, vcpus=1),
+ ]
+ fake_sizes = list()
+ for item in size_attrs:
+ fake_sizes.append(
+ get_fake_obj(attributes=item)
+ )
+ base_spec = dict(machine=dict(
+ ram=1,
+ disk=1,
+ cpus=1,
+ ))
+ spec = deepcopy(base_spec)
+ spec['machine'].update(input_attrs)
+ obj = self.get_obj(conf=spec)
+ self.mocks['m_list_sizes'].return_value = fake_sizes
+ if isinstance(func_or_exc, type):
+ with raises(func_or_exc):
+ obj.size
+ else:
+ assert obj.size in fake_sizes
+ assert func_or_exc(obj.size) is True
+
+ @mark.parametrize(
+ "wanted_groups",
+ [
+ ['group1'],
+ ['group0', 'group2'],
+ [],
+ ]
+ )
+ def test_security_groups(self, wanted_groups):
+ group_names = ['group0', 'group1', 'group2']
+ fake_groups = list()
+ for name in group_names:
+ fake_groups.append(
+ get_fake_obj(attributes=dict(name=name))
+ )
+ self.mocks['m_ex_list_security_groups'].return_value = fake_groups
+ obj = self.get_obj()
+ assert obj.security_groups is None
+ obj = self.get_obj()
+ obj.provider.conf['security_groups'] = wanted_groups
+ assert [g.name for g in obj.security_groups] == wanted_groups
+
+ def test_security_groups_exc(self):
+ fake_groups = [
+ get_fake_obj(attributes=dict(name='sg')) for i in range(2)
+ ]
+ obj = self.get_obj()
+ obj.provider.conf['security_groups'] = ['sg']
+ with raises(RuntimeError):
+ obj.security_groups
+ self.mocks['m_ex_list_security_groups'].return_value = fake_groups
+ obj = self.get_obj()
+ obj.provider.conf['security_groups'] = ['sg']
+ with raises(RuntimeError):
+ obj.security_groups
+
+ @mark.parametrize(
+ "ssh_key",
+ [
+ 'my_ssh_key',
+ None,
+ ]
+ )
+ def test_userdata(self, ssh_key):
+ self.mocks['m_get_user_ssh_pubkey'].return_value = ssh_key
+ obj = self.get_obj()
+ userdata = yaml.safe_load(obj.userdata)
+ assert userdata['user'] == obj.user
+ assert userdata['hostname'] == obj.hostname
+ if ssh_key:
+ assert userdata['ssh_authorized_keys'] == [ssh_key]
+ else:
+ assert 'ssh_authorized_keys' not in userdata
+
+ @mark.parametrize(
+ 'wanted_name, should_find, exception',
+ [
+ ('node0', True, None),
+ ('node1', True, None),
+ ('node2', False, RuntimeError),
+ ('node3', False, None),
+ ]
+ )
+ def test_node(self, wanted_name, should_find, exception):
+ node_names = ['node0', 'node1', 'node2', 'node2']
+ fake_nodes = list()
+ for name in node_names:
+ fake_nodes.append(
+ get_fake_obj(attributes=dict(name=name))
+ )
+ self.mocks['m_list_nodes'].return_value = fake_nodes
+ obj = self.get_obj(name=wanted_name)
+ if should_find:
+ assert obj.node.name == wanted_name
+ elif exception:
+ with raises(exception) as excinfo:
+ obj.node
+ assert excinfo.value.message
+ else:
+ assert obj.node is None
+
+ @mark.parametrize(
+ 'networks, security_groups',
+ [
+ ([], []),
+ (['net0'], []),
+ ([], ['sg0']),
+ (['net0'], ['sg0']),
+ ]
+ )
+ def test_create(self, networks, security_groups):
+ node_name = 'node0'
+ fake_sizes = [
+ get_fake_obj(
+ attributes=dict(ram=2**16, disk=9999, vcpus=99)),
+ ]
+ fake_security_groups = [
+ get_fake_obj(attributes=dict(name=name))
+ for name in security_groups
+ ]
+ self.mocks['m_ex_list_networks'].return_value = networks
+ self.mocks['m_ex_list_security_groups'].return_value = \
+ fake_security_groups
+ self.mocks['m_list_sizes'].return_value = fake_sizes
+ fake_images = [
+ get_fake_obj(attributes=dict(name='ubuntu-16.04')),
+ ]
+ self.mocks['m_list_images'].return_value = fake_images
+ self.mocks['m_get_user_ssh_pubkey'].return_value = 'ssh_key'
+ fake_node = get_fake_obj(attributes=dict(name=node_name))
+ fake_ips = ['555.123.4.0']
+ self.mocks['m_create_node'].return_value = fake_node
+ self.mocks['m_wait_until_running'].return_value = \
+ [(fake_node, fake_ips)]
+ obj = self.get_obj(name=node_name)
+ obj._networks = networks
+ obj.provider.conf['security_groups'] = security_groups
+ p_wait_for_ready = patch(
+ 'teuthology.provision.cloud.openstack.OpenStackProvisioner'
+ '._wait_for_ready'
+ )
+ with p_wait_for_ready:
+ res = obj.create()
+ assert res is obj.node
+
+ def test_update_dns(self):
+ config.nsupdate_url = 'nsupdate_url'
+ obj = self.get_obj()
+ obj.name = 'x'
+ obj.ips = ['y']
+ obj._update_dns()
+ call_args = self.mocks['m_get'].call_args_list
+ assert len(call_args) == 1
+ url_base, query_string = call_args[0][0][0].split('?')
+ assert url_base == 'nsupdate_url'
+ parsed_query = urlparse.parse_qs(query_string)
+ assert parsed_query == dict(name=['x'], ip=['y'])
+
+ @mark.parametrize(
+ 'node',
+ [None, Mock()]
+ )
+ def test_destroy(self, node):
+ obj = self.get_obj()
+ obj._node = node
+ result = obj.destroy()
+ if not node:
+ assert result is True
+ else:
+ assert node.destroy.called_once_with()
+
+ _volume_matrix = (
+ 'count, size, should_succeed',
+ [
+ (1, 10, True),
+ (0, 10, True),
+ (10, 1, True),
+ (1, 10, False),
+ (10, 1, False),
+ ]
+ )
+
+ @mark.parametrize(*_volume_matrix)
+ def test_create_volumes(self, count, size, should_succeed):
+ obj_conf = dict(volumes=dict(count=count, size=size))
+ obj = self.get_obj(conf=obj_conf)
+ node = get_fake_obj()
+ if not should_succeed:
+ obj.provider.driver.create_volume.side_effect = Exception
+ obj._node = node
+ with patch.object(obj, '_destroy_volumes'):
+ result = obj._create_volumes()
+ assert result is should_succeed
+ if should_succeed:
+ create_calls = obj.provider.driver.create_volume.call_args_list
+ attach_calls = obj.provider.driver.attach_volume.call_args_list
+ assert len(create_calls) == count
+ assert len(attach_calls) == count
+ for i in range(count):
+ vol_size, vol_name = create_calls[i][0]
+ assert vol_size == size
+ assert vol_name == '%s_%s' % (obj.name, i)
+ assert attach_calls[i][0][0] is obj._node
+ assert attach_calls[i][1]['device'] is None
+ else:
+ assert obj._destroy_volumes.called_once_with()
+
+ @mark.parametrize(*_volume_matrix)
+ def test_destroy_volumes(self, count, size, should_succeed):
+ obj_conf = dict(volumes=dict(count=count, size=size))
+ obj = self.get_obj(conf=obj_conf)
+ fake_volumes = list()
+ for i in range(count):
+ vol_name = '%s_%s' % (obj.name, i)
+ fake_volumes.append(
+ get_fake_obj(attributes=dict(name=vol_name))
+ )
+ obj.provider.driver.list_volumes.return_value = fake_volumes
+ obj._destroy_volumes()
+ detach_calls = obj.provider.driver.detach_volume.call_args_list
+ destroy_calls = obj.provider.driver.destroy_volume.call_args_list
+ assert len(detach_calls) == count
+ assert len(destroy_calls) == count
+ assert len(obj.provider.driver.detach_volume.call_args_list) == count
+ assert len(obj.provider.driver.destroy_volume.call_args_list) == count
+ obj.provider.driver.detach_volume.reset_mock()
+ obj.provider.driver.destroy_volume.reset_mock()
+ obj.provider.driver.detach_volume.side_effect = Exception
+ obj.provider.driver.destroy_volume.side_effect = Exception
+ obj._destroy_volumes()
+ assert len(obj.provider.driver.detach_volume.call_args_list) == count
+ assert len(obj.provider.driver.destroy_volume.call_args_list) == count
+
+ def test_destroy_volumes_exc(self):
+ obj = self.get_obj()
+ obj.provider.driver.detach_volume.side_effect = Exception
+
+ def test_wait_for_ready(self):
+ obj = self.get_obj()
+ obj._node = get_fake_obj(attributes=dict(name='node_name'))
+ with patch.multiple(
+ 'teuthology.orchestra.remote.Remote',
+ connect=DEFAULT,
+ run=DEFAULT,
+ ) as mocks:
+ obj._wait_for_ready()
+ mocks['connect'].side_effect = socket.error
+ with raises(MaxWhileTries):
+ obj._wait_for_ready()
--- /dev/null
+import os
+
+
+def get_user_ssh_pubkey(path='~/.ssh/id_rsa.pub'):
+ full_path = os.path.expanduser(path)
+ if not os.path.exists(full_path):
+ return
+ with file(full_path, 'rb') as f:
+ return f.read().strip()
+
+
+def combine_dicts(list_of_dicts, func):
+ """
+ A useful function to merge a list of dicts. Most of the work is done by
+ selective_update().
+
+ :param list_of_dicts: A list of dicts to combine using selective_update()
+ :param func: A comparison function that will be passed to
+ selective_update() along with values from each input
+ dict
+ :returns: The new, merged, dict
+ """
+ new_dict = dict()
+ for item in list_of_dicts:
+ selective_update(new_dict, item, func)
+ return new_dict
+
+
+def selective_update(a, b, func):
+ """
+ Given two dicts and a comparison function, recursively inspects key-value
+ pairs in the second dict and merges them into the first dict if func()
+ returns a "Truthy" value.
+
+ Example:
+
+ >>> a = dict(x=0, y=1, z=3)
+ >>> b = dict(x=1, y=2, z=0)
+ >>> selective_update(a, b, lambda foo, bar: foo > bar)
+ >>> print a
+ {'x': 1, 'y': 2, 'z': 3}
+
+ :param a: A dict. This is modified in-place!
+ :param b: Another dict.
+ :param func: A binary comparison function that will be called similarly to:
+ func(a[key], b[key]) for each key in b.
+ """
+ for key, value in b.items():
+ if key not in a:
+ a[key] = value
+ continue
+ if isinstance(value, dict):
+ selective_update(a[key], value, func)
+ if func(value, a[key]):
+ a[key] = value