A Ceph Manager Orchestrator that uses a external REST API service to execute Ansible playbooks.
get_inventory implementation
Signed-off-by: Juan Miguel Olmo Martínez <jolmomar@redhat.com>
Document how to use CLI through Orchestrator CLI
Signed-off-by: Juan Miguel Olmo Martínez <jolmomar@redhat.com>
--- /dev/null
+
+.. _ansible-module:
+
+====================
+Ansible Orchestrator
+====================
+
+This module is a :ref:`Ceph orchestrator <orchestrator-modules>` module that uses `Ansible Runner Service <https://github.com/pcuzner/ansible-runner-service>`_ (a RESTful API server) to execute Ansible playbooks in order to satisfy the different operations supported.
+
+These operations basically (and for the moment) are:
+
+- Get an inventory of the Ceph cluster nodes and all the storage devices present in each node
+- ...
+- ...
+
+
+Usage
+=====
+
+Enable the module:
+
+::
+
+ # ceph mgr module enable ansible
+
+Disable the module
+
+::
+
+ # ceph mgr module disable ansible
+
+
+Enable the Ansible orchestrator module and use it with the :ref:`CLI <orchestrator-cli-module>`:
+
+::
+
+ ceph mgr module enable orchestrator_cli
+ ceph mgr module enable ansible
+ ceph orchestrator set backend ansible
+
+
+Configuration
+=============
+
+Configuration must be set once the module is enabled by first time.
+
+This can be done in one monitor node via the configuration key facility on a
+cluster-wide level (so they apply to all manager instances) as follows::
+
+
+ # ceph config set mgr mgr/ansible/server_addr <ip_address/server_name>
+ # ceph config set mgr mgr/ansible/server_port <port>
+ # ceph config set mgr mgr/ansible/username <username>
+ # ceph config set mgr mgr/ansible/password <password>
+ # ceph config set mgr mgr/ansible/verify_server <verify_server_value>
+
+Where:
+
+ * <ip_address/server_name>: Is the ip address/hostname of the server where the Ansible Runner Service is available.
+ * <port>: The port number where the Ansible Runner Service is listening
+ * <username>: The username of one authorized user in the Ansible Runner Service
+ * <password>: The password of the authorized user.
+ * <verify_server_value>: Either a boolean, in which case it controls whether the server's TLS certificate is verified, or a string, in which case it must be a path to a CA bundle to use in the verification. Defaults to ``True``.
+
+
+Debugging
+=========
+
+Any kind of incident with this orchestrator module can be debugged using the Ceph manager logs:
+
+Set the right log level in order to debug properly. Remember that the python log levels debug, info, warn, err are mapped into the Ceph severities 20, 4, 1 and 0 respectively.
+
+And use the "active" manager node: ( "ceph -s" command in one monitor give you this information)
+
+* Check current debug level::
+
+ [@mgr0 ~]# ceph daemon mgr.mgr0 config show | grep debug_mgr
+ "debug_mgr": "1/5",
+ "debug_mgrc": "1/5",
+
+* Change the log level to "debug"::
+
+ [mgr0 ~]# ceph daemon mgr.mgr0 config set debug_mgr 20/5
+ {
+ "success": ""
+ }
+
+* Restore "info" log level::
+
+ [mgr0 ~]# ceph daemon mgr.mgr0 config set debug_mgr 1/5
+ {
+ "success": ""
+ }
+
+
+Operations
+==========
+
+**Inventory:**
+
+Get the list of storage devices installed in all the cluster nodes. The output format is::
+
+ [host:
+ device_name (type_of_device , size_in_bytes)]
+
+Example::
+
+ [root@mon0 ~]# ceph orchestrator device ls
+ 192.168.121.160:
+ vda (hdd, 44023414784b)
+ sda (hdd, 53687091200b)
+ sdb (hdd, 53687091200b)
+ sdc (hdd, 53687091200b)
+ 192.168.121.36:
+ vda (hdd, 44023414784b)
+ 192.168.121.201:
+ vda (hdd, 44023414784b)
+ 192.168.121.70:
+ vda (hdd, 44023414784b)
+ sda (hdd, 53687091200b)
+ sdb (hdd, 53687091200b)
+ sdc (hdd, 53687091200b)
Rook plugin <rook>
DeepSea plugin <deepsea>
Insights plugin <insights>
+ Ansible plugin <ansible>
add_subdirectory(dashboard)
add_subdirectory(insights)
+add_subdirectory(ansible)
--- /dev/null
+set(MGR_ANSIBLE_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-ansible-virtualenv)
+
+add_custom_target(mgr-ansible-test-venv
+ COMMAND ${CMAKE_SOURCE_DIR}/src/tools/setup-virtualenv.sh --python=${MGR_PYTHON_EXECUTABLE} ${MGR_ANSIBLE_VIRTUALENV}
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/pybind/mgr/ansible
+ COMMENT "ansible tests virtualenv is being created")
+add_dependencies(tests mgr-ansible-test-venv)
--- /dev/null
+from __future__ import absolute_import
+import os
+
+if 'UNITTEST' not in os.environ:
+ from .module import Module
+else:
+ import sys
+ import mock
+ sys.modules['ceph_module'] = mock.Mock()
--- /dev/null
+"""
+Tool module to interact with the Ansible Runner Service
+"""
+import requests
+import json
+import re
+
+# Ansible Runner events finished
+
+
+# Ansible Runner service API endpoints
+API_URL = "api"
+LOGIN_URL = "api/v1/login"
+PLAYBOOK_EXEC_URL = "api/v1/playbooks"
+PLAYBOOK_EVENTS = "api/v1/jobs/%s/events"
+EVENT_DATA_URL = "api/v1/jobs/%s/events/%s"
+
+class ExecutionStatusCode(object):
+ """Execution status of playbooks ( 'msg' field in playbook status request)
+ """
+
+ SUCCESS = 0 # Playbook has been executed succesfully" msg = successful
+ ERROR = 1 # Playbook has finished with error msg = failed
+ ON_GOING = 2 # Playbook is being executed msg = running
+ NOT_LAUNCHED = 3 # Not initialized
+
+
+class PlayBookExecution(object):
+ """Object to provide all the results of a Playbook execution
+ """
+
+ def __init__(self, rest_client, playbook, logger, result_pattern="", the_params={}):
+
+ self.rest_client = rest_client
+
+ # Identifier of the playbook execution
+ self.play_uuid = "-"
+
+ # Pattern used to extract the result from the events
+ self.result_task_pattern = result_pattern
+
+ # Playbook name
+ self.playbook = playbook
+
+ # Params used in the playbook
+ self.params = the_params
+
+ # Logger
+ self.log = logger
+
+ def launch(self):
+ """ Launch the playbook execution
+ """
+
+ endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.playbook)
+
+ response = self.rest_client.http_post(endpoint, self.params)
+
+ if response:
+ self.play_uuid = json.loads(response.text)["data"]["play_uuid"]
+ self.log.info("Playbook execution launched succesfuly")
+ else:
+ # An error launching the execution implies play_uuid empty
+ self.play_uuid = ""
+ self.log.error("Playbook launch error. \
+ Check <endpoint> request result")
+
+ def get_status(self):
+ """ Return the status of the execution
+
+ In the msg field of the respons we can find:
+ "msg": "successful"
+ "msg": "running"
+ "msg": "failed"
+ """
+
+ status_value = ExecutionStatusCode.NOT_LAUNCHED
+
+ if self.play_uuid == '-': # Initialized
+ status_value = ExecutionStatusCode.NOT_LAUNCHED
+ elif self.play_uuid == '': # Error launching playbook
+ status_value = ExecutionStatusCode.ERROR
+ else:
+ endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.play_uuid)
+ response = self.rest_client.http_get(endpoint)
+
+ if response:
+ the_status = json.loads(response.text)["msg"]
+ if the_status == 'successful':
+ status_value = ExecutionStatusCode.SUCCESS
+ elif the_status == 'failed':
+ status_value = ExecutionStatusCode.ERROR
+ else:
+ status_value = ExecutionStatusCode.ON_GOING
+ else:
+ status_value = ExecutionStatusCode.ERROR
+
+ self.log.info("Requested playbook execution status is: %s", status_value)
+ return status_value
+
+ def get_result(self, event_filter=""):
+ """Get the data of the events filtered by a task pattern and
+ a event filter
+
+ @returns: the events that matches with the patterns provided
+ """
+
+ if not self.result_task_pattern or not self.play_uuid:
+ result_events = {}
+
+ response = self.rest_client.http_get(PLAYBOOK_EVENTS % self.play_uuid)
+
+ if not response:
+ result_events = {}
+ else:
+ events = json.loads(response.text)["data"]["events"]
+ result_events = {event:data for event,data in events.items()
+ if "task" in data and
+ re.match(self.result_task_pattern, data["task"])}
+ if event_filter:
+ result_events = {event:data for event,data in result_events.items()
+ if re.match(event_filter, data['event'])}
+
+ self.log.info("Requested playbook result is: %s", json.dumps(result_events))
+ return result_events
+
+class Client(object):
+ """An utility object that allows to connect with the Ansible runner service
+ and execute easily playbooks
+ """
+
+ def __init__(self, server_url, user, password, verify_server, logger):
+ """Provide an https client to make easy interact with the Ansible
+ Runner Service"
+
+ @param servers_url: The base URL >server>:<port> of the Ansible Runner Service
+ @param user: User name of the authorized user
+ @param password: Password of the authotized user
+ @param verify_server: Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``.
+ @param logger: Log file
+ """
+ self.server_url = server_url
+ self.user = user
+ self.password = password
+ self.log = logger
+ self.auth = (self.user, self.password)
+ if not verify_server:
+ self.verify_server = True
+ elif verify_server.lower().strip() == 'false':
+ self.verify_server = False
+ else:
+ self.verify_server = verify_server
+
+ # Once authenticated this token will be used in all the requests
+ self.token = ""
+
+ self.server_url = "https://{0}".format(self.server_url)
+
+ # Log in the server and get a token
+ self.login()
+
+ def login(self):
+ """ Login with user credentials to obtain a valid token
+ """
+
+ response = None
+
+ the_url = "%s/%s" % (self.server_url, LOGIN_URL)
+ response = requests.get(the_url,
+ auth = self.auth,
+ verify = self.verify_server)
+
+ if response.status_code != requests.codes.ok:
+ self.log.error("login error <<%s>> (%s):%s",
+ the_url, response.status_code, response.text)
+ else:
+ self.log.info("login succesful <<%s>> (%s):%s",
+ the_url, response.status_code, response.text)
+
+ if response:
+ self.token = json.loads(response.text)["data"]["token"]
+ self.log.info("Connection with Ansible Runner Service is operative")
+
+
+ def is_operative(self):
+ """Indicates if the connection with the Ansible runner Server is ok
+ """
+
+ # No Token... this means we haven't used yet the service.
+ if not self.token:
+ return False
+
+ # Check the service
+ response = self.http_get(API_URL)
+
+ if response:
+ return response.status_code == requests.codes.ok
+ else:
+ return False
+
+ def http_get(self, endpoint):
+ """Execute an http get request
+
+ @param endpoint: Ansible Runner service RESTful API endpoint
+
+ @returns: A requests object
+ """
+
+ response = None
+
+ try:
+ the_url = "%s/%s" % (self.server_url, endpoint)
+ r = requests.get(the_url,
+ verify = self.verify_server,
+ headers = {"Authorization": self.token})
+
+ if r.status_code != requests.codes.ok:
+ self.log.error("http GET %s <--> (%s - %s)\n%s",
+ the_url, r.status_code, r.reason, r.text)
+ else:
+ self.log.info("http GET %s <--> (%s - %s)",
+ the_url, r.status_code, r.text)
+
+ response = r
+
+ except Exception:
+ self.log.exception("Ansible runner service(GET %s)", the_url)
+
+ return response
+
+ def http_post(self, endpoint, payload):
+ """Execute an http post request
+
+ @param endpoint: Ansible Runner service RESTful API endpoint
+ @param payload: Dictionary with the data used in the post request
+
+ @returns: A requests object
+ """
+
+ response = None
+
+ try:
+ the_url = "%s/%s" % (self.server_url, endpoint)
+ r = requests.post(the_url,
+ verify = self.verify_server,
+ headers = {"Authorization": self.token,
+ "Content-type": "application/json"},
+ data = payload)
+
+ if r.status_code != requests.codes.ok:
+ self.log.error("http POST %s [%s] <--> (%s - %s)\n%s",
+ the_url, payload, r.status_code, r.reason, r.text)
+ else:
+ self.log.info("http POST %s <--> (%s - %s)",
+ the_url, r.status_code, r.text)
+ response = r
+
+ except Exception:
+ self.log.exception("Ansible runner service(POST %s)", the_url)
+
+ return response
+
+ def http_put(self, endpoint, payload):
+ """Execute an http put request
+
+ @param endpoint: Ansible Runner service RESTful API endpoint
+ @param payload: Dictionary with the data used in the put request
+
+ @returns: A requests object
+ """
+ # TODO
+ raise NotImplementedError("TODO")
--- /dev/null
+"""
+ceph-mgr Ansible orchestrator module
+
+The external Orchestrator is the Ansible runner service (RESTful https service)
+"""
+
+# Python stuff
+from threading import Event
+import errno
+import json
+
+# Ceph stuff
+from mgr_module import MgrModule
+import orchestrator
+
+# Orchestrator stuff
+# A Client is used to communicate with the Ansible Runner service
+from ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode,\
+ EVENT_DATA_URL
+
+# Constants section
+
+# Time to clean the completions list
+WAIT_PERIOD = 10
+
+
+# List of playbooks names used
+
+# Name of the playbook used in the "get_inventory" method.
+# This playbook is expected to provide a list of storage devices in the host
+# where the playbook is executed.
+GET_STORAGE_DEVICES_CATALOG_PLAYBOOK = "host-disks.yml"
+
+
+
+class AnsibleReadOperation(orchestrator.ReadCompletion):
+ """ A read operation means to obtain information from the cluster.
+ """
+
+ def __init__(self, client, playbook, logger, result_pattern, params):
+ super(AnsibleReadOperation, self).__init__()
+
+
+ # Private attributes
+ self.playbook = playbook
+ self._is_complete = False
+ self._is_errored = False
+ self._result = []
+
+ # Error description in operation
+ self.error = ""
+
+ # Ansible Runner Service client
+ self.ar_client = client
+
+ # Logger
+ self.log = logger
+
+ # An aditional filter of result events based in the event
+ self.event_filter = ""
+
+ # Function assigned dinamically to process the result
+ self.process_output = None
+
+ # Playbook execution object
+ self.pb_execution = PlayBookExecution(client,
+ playbook,
+ logger,
+ result_pattern,
+ params)
+
+ @property
+ def is_complete(self):
+ return self._is_complete
+
+ @property
+ def is_errored(self):
+ return self._is_errored
+
+ @property
+ def result(self):
+ return self._result
+
+ @property
+ def status(self):
+ """Return the status code of the operation
+ updating conceptually 'linked' attributes
+ """
+ current_status = self.pb_execution.get_status()
+
+ self._is_complete = (current_status == ExecutionStatusCode.SUCCESS) or \
+ (current_status == ExecutionStatusCode.ERROR)
+
+ self._is_errored = (current_status == ExecutionStatusCode.ERROR)
+
+ return current_status
+
+ def execute_playbook(self):
+ """Execute the playbook with the provided params.
+ """
+
+ self.pb_execution.launch()
+
+ def update_result(self):
+ """Output of the read operation
+
+ The result of the playbook execution can be customized through the
+ function provided as 'process_output' attribute
+
+ @return string: Result of the operation formatted if it is possible
+ """
+
+ processed_result = []
+
+ if self._is_complete:
+ raw_result = self.pb_execution.get_result(self.event_filter)
+
+ if self.process_output:
+ processed_result = self.process_output(
+ raw_result,
+ self.ar_client,
+ self.pb_execution.play_uuid)
+ else:
+ processed_result = raw_result
+
+ self._result = processed_result
+
+
+class AnsibleChangeOperation(orchestrator.WriteCompletion):
+ """Operations that changes the "cluster" state
+
+ Modifications/Changes (writes) are a two-phase thing, firstly execute
+ the playbook that is going to change elements in the Ceph Cluster.
+ When the playbook finishes execution (independently of the result),
+ the modification/change operation has finished.
+ """
+ def __init__(self):
+ super(AnsibleChangeOperation, self).__init__()
+
+ self.error = False
+ @property
+ def status(self):
+ """Return the status code of the operation
+ """
+ #TODO
+ return 0
+
+ @property
+ def is_persistent(self):
+ """
+ Has the operation updated the orchestrator's configuration
+ persistently? Typically this would indicate that an update
+ had been written to a manifest, but that the update
+ had not necessarily been pushed out to the cluster.
+
+ In the case of Ansible is always False.
+ because a initiated playbook execution will need always to be
+ relaunched if it fails.
+ """
+
+ return False
+
+ @property
+ def is_effective(self):
+ """Has the operation taken effect on the cluster?
+ For example, if we were adding a service, has it come up and appeared
+ in Ceph's cluster maps?
+
+ In the case of Ansible, this will be True if the playbooks has been
+ executed succesfully.
+
+ @return Boolean: if the playbook has been executed succesfully
+ """
+
+ return self.status == ExecutionStatusCode.SUCCESS
+
+ @property
+ def is_errored(self):
+ return self.error
+
+ @property
+ def is_complete(self):
+ return self.is_errored or (self.is_persistent and self.is_effective)
+
+
+class Module(MgrModule, orchestrator.Orchestrator):
+ """An Orchestrator that an external Ansible runner service to perform
+ operations
+ """
+
+ OPTIONS = [
+ {'name': 'server_url'},
+ {'name': 'username'},
+ {'name': 'password'},
+ {'name': 'verify_server'} # Check server identity (Boolean/path to CA bundle)
+
+ ]
+
+ def __init__(self, *args, **kwargs):
+ """
+ """
+ super(Module, self).__init__(*args, **kwargs)
+
+ self.run = False
+
+ self.all_completions = []
+
+ self.ar_client = None
+
+ def available(self):
+ """ Check if Ansible Runner service is working
+ """
+ # TODO
+ return (True, "Everything ready")
+
+ def wait(self, completions):
+ """Given a list of Completion instances, progress any which are
+ incomplete.
+
+ @param completions: list of Completion instances
+ @Returns : List with completions operations pending
+ """
+
+ # Check progress and update status in each operation
+ for operation in completions:
+ self.log.info("playbook <%s> status:%s", operation.playbook, operation.status)
+ if operation.is_complete:
+ operation.update_result()
+
+ completions = filter(lambda x: not x.is_complete, completions)
+
+ self.log.info("Operations pending: %s", len(completions))
+
+ return completions
+
+ def serve(self):
+ """ Mandatory for standby modules
+ """
+ self.log.info("Starting Ansible Orchestrator module ...")
+
+ # Verify config options (Just that settings are available)
+ self.verify_config()
+
+ # Ansible runner service client
+ try:
+ self.ar_client = Client(server_url = self.get_config('server_url', ''),
+ user = self.get_config('username', ''),
+ password = self.get_config('password', ''),
+ verify_server = self.get_config('verify_server', True),
+ logger = self.log)
+ except Exception:
+ self.log.exception("Ansible Runner Service not available. "
+ "Check external server status/TLS identity or "
+ "connection options. If configuration options changed"
+ " try to disable/enable the module.")
+ self.shutdown()
+ return
+
+ self.run = True
+
+ def shutdown(self):
+ self.log.info('Stopping Ansible orchestrator module')
+ self.run = False
+
+ def get_inventory(self, node_filter=None):
+ """
+
+ @param : node_filter instance
+ @Return : A AnsibleReadOperation instance (Completion Object)
+ """
+
+ # Create a new read completion object for execute the playbook
+ ansible_operation = AnsibleReadOperation(client = self.ar_client,
+ playbook = GET_STORAGE_DEVICES_CATALOG_PLAYBOOK,
+ logger = self.log,
+ result_pattern = "RESULTS",
+ params = "{}")
+
+ # Assign the process_output function
+ ansible_operation.process_output = process_inventory_json
+ ansible_operation.event_filter = "runner_on_ok"
+
+ # Execute the playbook to obtain data
+ ansible_operation.execute_playbook()
+
+ self.all_completions.append(ansible_operation)
+
+ return ansible_operation
+
+ def create_osds(self, osd_spec):
+ """
+ Create one or more OSDs within a single Drive Group.
+
+ The principal argument here is the drive_group member
+ of OsdSpec: other fields are advisory/extensible for any
+ finer-grained OSD feature enablement (choice of backing store,
+ compression/encryption, etc).
+
+ :param osd_spec: OsdCreationSpec
+ """
+
+ def verify_config(self):
+
+ if not self.get_config('server_url', ''):
+ self.log.error(
+ "No Ansible Runner Service base URL <server_name>:<port>"
+ "Try 'ceph config set mgr mgr/%s/server_url <server name/ip>:<port>'",
+ self.module_name)
+
+ if not self.get_config('username', ''):
+ self.log.error(
+ "No Ansible Runner Service user. "
+ "Try 'ceph config set mgr mgr/%s/username <string value>'",
+ self.module_name)
+
+ if not self.get_config('password', ''):
+ self.log.error(
+ "No Ansible Runner Service User password. "
+ "Try 'ceph config set mgr mgr/%s/password <string value>'",
+ self.module_name)
+
+ if not self.get_config('verify_server', ''):
+ self.log.error(
+ "TLS server identity verification is enabled by default."
+ "Use 'ceph config set mgr mgr/{0}/verify_server False' to disable it."
+ "Use 'ceph config set mgr mgr/{0}/verify_server <path>' to "
+ "point the CA bundle path used for verification".format(self.module_name))
+
+
+# Auxiliary functions
+#==============================================================================
+
+def process_inventory_json(inventory_events, ar_client, playbook_uuid):
+ """ Adapt the output of the playbook used in 'get_inventory'
+ to the Orchestrator expected output (list of InventoryNode)
+
+ @param inventory_events: events dict with the results
+
+ Example:
+ inventory_events =
+ {'37-100564f1-9fed-48c2-bd62-4ae8636dfcdb': {'host': '192.168.121.254',
+ 'task': 'RESULTS',
+ 'event': 'runner_on_ok'},
+ '36-2016b900-e38f-7dcd-a2e7-00000000000e': {'host': '192.168.121.252'
+ 'task': 'RESULTS',
+ 'event': 'runner_on_ok'}}
+ @param ar_client: Ansible Runner Service client
+ @param playbook_uuid: Palybooud identifier
+
+ @return : list of InventoryNode
+ """
+
+ #Obtain the needed data for each result event
+ inventory_nodes = []
+
+ # Loop over the result events and request the event data
+ for event_key, data in inventory_events.items():
+ event_response = ar_client.http_get(EVENT_DATA_URL % (playbook_uuid,
+ event_key))
+
+ # Process the data for each event
+ if event_response:
+ event_data = json.loads(event_response.text)["data"]["event_data"]
+
+ free_disks = event_data["res"]["disks_catalog"]
+ for item, data in free_disks.items():
+ if item not in [host.name for host in inventory_nodes]:
+
+ devs = []
+ for dev_key, dev_data in data.items():
+ if dev_key not in [device.id for device in devs]:
+ dev = orchestrator.InventoryDevice()
+ dev.id = dev_key
+ dev.type = 'hdd' if dev_data["rotational"] else "sdd/nvme"
+ dev.size = dev_data["sectorsize"] * dev_data["sectors"]
+ devs.append(dev)
+
+ inventory_nodes.append(
+ orchestrator.InventoryNode(item, devs))
+
+
+ return inventory_nodes
--- /dev/null
+#!/usr/bin/env bash
+
+# run from ./ or from ../
+: ${MGR_ANSIBLE_VIRTUALENV:=/tmp/mgr-ansible-virtualenv}
+: ${WITH_PYTHON2:=ON}
+: ${WITH_PYTHON3:=ON}
+: ${CEPH_BUILD_DIR:=$PWD/.tox}
+test -d ansible && cd ansible
+
+if [ -e tox.ini ]; then
+ TOX_PATH=$(readlink -f tox.ini)
+else
+ TOX_PATH=$(readlink -f $(dirname $0)/tox.ini)
+fi
+
+# tox.ini will take care of this.
+unset PYTHONPATH
+export CEPH_BUILD_DIR=$CEPH_BUILD_DIR
+
+source ${MGR_ANSIBLE_VIRTUALENV}/bin/activate
+
+if [ "$WITH_PYTHON2" = "ON" ]; then
+ ENV_LIST+="py27"
+fi
+if [ "$WITH_PYTHON3" = "ON" ]; then
+ ENV_LIST+="py3"
+fi
+
+tox -c ${TOX_PATH} -e ${ENV_LIST}
--- /dev/null
+{
+ "status": "OK",
+ "msg": "",
+ "data": {
+ "events": {
+ "2-6edf768f-2923-44e1-b884-f0227b811cfc": {
+ "event": "playbook_on_start"
+ },
+ "3-2016b900-e38f-7dcd-a2e7-000000000008": {
+ "event": "playbook_on_play_start"
+ },
+ "4-2016b900-e38f-7dcd-a2e7-000000000012": {
+ "event": "playbook_on_task_start",
+ "task": "Gathering Facts"
+ },
+ "5-19ae1e5e-aa2d-479e-845a-ef0253cc1f99": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "Gathering Facts"
+ },
+ "6-aad3acc4-06a3-4c97-82ff-31e9e484b1f5": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "Gathering Facts"
+ },
+ "7-55298017-3e7d-4734-b316-bbe13ce1da5e": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Gathering Facts"
+ },
+ "8-2016b900-e38f-7dcd-a2e7-00000000000a": {
+ "event": "playbook_on_task_start",
+ "task": "setup"
+ },
+ "9-2085ccb6-e337-4b9f-bc38-1d8bbf9b973f": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "setup"
+ },
+ "10-e14cdbbc-4883-436c-a41c-a8194ec69075": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "setup"
+ },
+ "11-6d815a26-df53-4240-b8b6-2484e88e4f48": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "setup"
+ },
+ "12-2016b900-e38f-7dcd-a2e7-00000000000b": {
+ "event": "playbook_on_task_start",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "13-799b0119-ccab-4eca-b30b-a37b0bafa02c": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.245",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "14-6beb6958-4bfd-4a9c-bd2c-d20d00248605": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.61",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "15-3ca99cc8-98ea-4967-8f2d-115426d00b6a": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Get a list of block devices (excludes loop and child devices)"
+ },
+ "16-2016b900-e38f-7dcd-a2e7-00000000000c": {
+ "event": "playbook_on_task_start",
+ "task": "check if disk {{ item }} is free"
+ },
+ "17-8c88141a-08d1-411f-a855-9f7702a49c4e": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.245",
+ "task": "check if disk vda is free"
+ },
+ "18-4457db98-6f18-4f63-bfaa-584db5eea05b": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.245",
+ "task": "check if disk {{ item }} is free"
+ },
+ "19-ac3c72cd-1fbb-495a-be69-53fa6029f356": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.61",
+ "task": "check if disk vda is free"
+ },
+ "20-d161cb70-ba2e-4571-b029-c6428a566fef": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.61",
+ "task": "check if disk {{ item }} is free"
+ },
+ "21-65f1ce5c-2d86-4cc3-8e10-cff6bf6cbd82": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk sda is free"
+ },
+ "22-7f86dcd4-4ef7-4f5a-9db3-c3780b67cc4b": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk sdb is free"
+ },
+ "23-837bf4f6-a912-46a8-b94b-55aa66a935c4": {
+ "event": "runner_item_on_ok",
+ "host": "192.168.121.254",
+ "task": "check if disk sdc is free"
+ },
+ "24-adf6238d-723f-4783-9226-8475419d466e": {
+ "event": "runner_item_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk vda is free"
+ },
+ "25-554661d8-bc34-4885-a589-4960d6b8a487": {
+ "event": "runner_on_failed",
+ "host": "192.168.121.254",
+ "task": "check if disk {{ item }} is free"
+ },
+ "26-2016b900-e38f-7dcd-a2e7-00000000000d": {
+ "event": "playbook_on_task_start",
+ "task": "Update hosts freedisk list"
+ },
+ "27-52df484c-30a0-4e3b-9057-02ca345c5790": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "28-083616ad-3c1f-4fb8-a06c-5d64e670e362": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "29-bffc68d3-5448-491f-8780-07858285f5cd": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.245",
+ "task": "Update hosts freedisk list"
+ },
+ "30-cca2dfd9-16e9-4fcb-8bf7-c4da7dab5668": {
+ "event": "runner_on_skipped",
+ "host": "192.168.121.245",
+ "task": "Update hosts freedisk list"
+ },
+ "31-158a98ac-7e8d-4ebb-8c53-4467351a2d3a": {
+ "event": "runner_item_on_ok",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "32-06a7e809-8d82-41df-b01d-45d94e519cb7": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "33-d5cdbb58-728a-4be5-abf1-4a051146e727": {
+ "event": "runner_item_on_skipped",
+ "host": "192.168.121.61",
+ "task": "Update hosts freedisk list"
+ },
+ "34-9b3c570b-22d8-4539-8c94-d0c1cbed8633": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "Update hosts freedisk list"
+ },
+ "35-93336830-03cd-43ff-be87-a7e063ca7547": {
+ "event": "runner_on_skipped",
+ "host": "192.168.121.61",
+ "task": "Update hosts freedisk list"
+ },
+ "36-2016b900-e38f-7dcd-a2e7-00000000000e": {
+ "event": "playbook_on_task_start",
+ "task": "RESULTS"
+ },
+ "37-100564f1-9fed-48c2-bd62-4ae8636dfcdb": {
+ "event": "runner_on_ok",
+ "host": "192.168.121.254",
+ "task": "RESULTS"
+ },
+ "38-20a64160-30a1-481f-a3ee-36e491bc7869": {
+ "event": "playbook_on_stats"
+ }
+ },
+ "total_events": 37
+ }
+}
+
--- /dev/null
+import logging
+import unittest
+import mock
+import json
+
+import requests_mock
+
+from requests.exceptions import ConnectionError
+
+from ..ansible_runner_svc import Client, PlayBookExecution, ExecutionStatusCode, \
+ LOGIN_URL, API_URL, PLAYBOOK_EXEC_URL, \
+ PLAYBOOK_EVENTS
+
+
+SERVER_URL = "ars:5001"
+USER = "admin"
+PASSWORD = "admin"
+CERTIFICATE = ""
+
+# Playbook attributes
+PB_NAME = "test_playbook"
+PB_UUID = "1733c3ac"
+
+# Playbook execution data file
+PB_EVENTS_FILE = "./tests/pb_execution_events.data"
+
+# create console handler and set level to info
+logger = logging.getLogger()
+handler = logging.StreamHandler()
+handler.setLevel(logging.INFO)
+formatter = logging.Formatter("%(levelname)s - %(message)s")
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+
+def mock_login(mock_server):
+
+ the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL)
+
+ mock_server.register_uri("GET",
+ the_login_url,
+ json={"status": "OK",
+ "msg": "Token returned",
+ "data": {"token": "dummy_token"}},
+ status_code=200)
+
+ the_api_url = "https://%s/%s" % (SERVER_URL,API_URL)
+ mock_server.register_uri("GET",
+ the_api_url,
+ text="<!DOCTYPE html>api</html>",
+ status_code=200)
+
+def mock_get_pb(mock_server, playbook_name, return_code):
+
+ mock_login(mock_server)
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ the_pb_url = "https://%s/%s/%s" % (SERVER_URL, PLAYBOOK_EXEC_URL, playbook_name)
+
+ if return_code == 404:
+ mock_server.register_uri("POST",
+ the_pb_url,
+ json={ "status": "NOTFOUND",
+ "msg": "playbook file not found",
+ "data": {}},
+ status_code=return_code)
+ elif return_code == 202:
+ mock_server.register_uri("POST",
+ the_pb_url,
+ json={ "status": "STARTED",
+ "msg": "starting",
+ "data": { "play_uuid": "1733c3ac" }},
+ status_code=return_code)
+
+ return PlayBookExecution(ars_client, playbook_name, logger,
+ result_pattern = "RESULTS")
+
+class ARSclientTest(unittest.TestCase):
+
+ def test_server_not_reachable(self):
+
+ with self.assertRaises(ConnectionError):
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ def test_server_wrong_USER(self):
+
+ with requests_mock.Mocker() as mock_server:
+ the_login_url = "https://%s/%s" % (SERVER_URL,LOGIN_URL)
+ mock_server.get(the_login_url,
+ json={"status": "NOAUTH",
+ "msg": "Access denied invalid login: unknown USER",
+ "data": {}},
+ status_code=401)
+
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ self.assertFalse(ars_client.is_operative(),
+ "Operative attribute expected to be False")
+
+ def test_server_connection_ok(self):
+
+ with requests_mock.Mocker() as mock_server:
+
+ mock_login(mock_server)
+
+ ars_client = Client(SERVER_URL, USER, PASSWORD,
+ CERTIFICATE, logger)
+
+ self.assertTrue(ars_client.is_operative(),
+ "Operative attribute expected to be True")
+
+class PlayBookExecutionTests(unittest.TestCase):
+
+
+ def test_playbook_execution_ok(self):
+ """Check playbook id is set when the playbook is launched
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ self.assertEqual(test_pb.play_uuid, PB_UUID,
+ "Found Unexpected playbook uuid")
+
+
+
+ def test_playbook_execution_error(self):
+ """Check playbook id is not set when the playbook is not present
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, "unknown_playbook", 404)
+
+ test_pb.launch()
+
+ self.assertEqual(test_pb.play_uuid, "",
+ "Playbook uuid not empty")
+
+ def test_playbook_not_launched(self):
+ """Check right status code when Playbook execution has not been launched
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ # Check playbook not launched
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.NOT_LAUNCHED,
+ "Wrong status code for playbook not launched")
+
+ def test_playbook_launched(self):
+ """Check right status code when Playbook execution has been launched
+ """
+
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "running",
+ "data": {"task": "Step 2",
+ "last_task_num": 6}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.ON_GOING,
+ "Wrong status code for a running playbook")
+
+ self.assertEqual(test_pb.play_uuid, PB_UUID,
+ "Unexpected playbook uuid")
+
+ def test_playbook_finish_ok(self):
+ """Check right status code when Playbook execution is succesful
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "successful",
+ "data": {}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.SUCCESS,
+ "Wrong status code for a playbook executed succesfully")
+
+ def test_playbook_finish_error(self):
+ """Check right status code when Playbook execution has failed
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_status_url = "https://%s/%s/%s" % (SERVER_URL,
+ PLAYBOOK_EXEC_URL,
+ PB_UUID)
+ mock_server.register_uri("GET",
+ the_status_url,
+ json={"status": "OK",
+ "msg": "failed",
+ "data": {}
+ },
+ status_code=200)
+
+ self.assertEqual(test_pb.get_status(),
+ ExecutionStatusCode.ERROR,
+ "Wrong status code for a playbook with error")
+
+ def test_playbook_get_result(self):
+ """ Find the right result event in a set of different events
+ """
+ with requests_mock.Mocker() as mock_server:
+
+ test_pb = mock_get_pb(mock_server, PB_NAME, 202)
+
+ test_pb.launch()
+
+ the_events_url = "https://%s/%s" % (SERVER_URL,
+ PLAYBOOK_EVENTS % PB_UUID)
+
+ # Get the events stored in a file
+ pb_events = {}
+ with open(PB_EVENTS_FILE) as events_file:
+ pb_events = json.loads(events_file.read())
+
+ mock_server.register_uri("GET",
+ the_events_url,
+ json=pb_events,
+ status_code=200)
+
+ result = test_pb.get_result("runner_on_ok")
+
+ self.assertEqual(len(result.keys()), 1,
+ "Unique result event not found")
+
+ self.assertIn("37-100564f1-9fed-48c2-bd62-4ae8636dfcdb",
+ result.keys(),
+ "Predefined result event not found")
--- /dev/null
+[tox]
+envlist = py27,py3
+skipsdist = true
+toxworkdir = {env:CEPH_BUILD_DIR}/ansible
+minversion = 2.8.1
+
+[testenv]
+deps =
+ pytest
+ mock
+ requests-mock
+setenv=
+ UNITTEST = true
+ py27: PYTHONPATH = {toxinidir}/../../../../build/lib/cython_modules/lib.2
+ py3: PYTHONPATH = {toxinidir}/../../../../build/lib/cython_modules/lib.3
+
+commands=
+ {envbindir}/py.test tests/
done = False
while done is False:
- done = self._oremote("wait", completions)
+ done = self._oremote("wait", completions) == []
if not done:
any_nonpersistent = False
list(APPEND tox_tests run-tox-mgr-insights)
set(MGR_INSIGHTS_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-insights-virtualenv)
list(APPEND env_vars_for_tox_tests MGR_INSIGHTS_VIRTUALENV=${MGR_INSIGHTS_VIRTUALENV})
+
+ add_test(NAME run-tox-mgr-ansible COMMAND bash ${CMAKE_SOURCE_DIR}/src/pybind/mgr/ansible/run-tox.sh)
+ list(APPEND tox_tests run-tox-mgr-ansible)
+ set(MGR_ANSIBLE_VIRTUALENV ${CEPH_BUILD_VIRTUALENV}/mgr-ansible-virtualenv)
+ list(APPEND env_vars_for_tox_tests MGR_ANSIBLE_VIRTUALENV=${MGR_ANSIBLE_VIRTUALENV})
endif()
set_property(