--- /dev/null
+# Copyright 2015, Rackspace US, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import ConfigParser
+import io
+import json
+import os
+import yaml
+
+from ansible import errors
+from ansible.runner.return_data import ReturnData
+from ansible import utils
+from ansible.utils import template
+
+
+CONFIG_TYPES = {
+ 'ini': 'return_config_overrides_ini',
+ 'json': 'return_config_overrides_json',
+ 'yaml': 'return_config_overrides_yaml'
+}
+
+
+class ActionModule(object):
+ TRANSFERS_FILES = True
+
+ def __init__(self, runner):
+ self.runner = runner
+
+ def grab_options(self, complex_args, module_args):
+ """Grab passed options from Ansible complex and module args.
+
+ :param complex_args: ``dict``
+ :param module_args: ``dict``
+ :returns: ``dict``
+ """
+ options = dict()
+ if complex_args:
+ options.update(complex_args)
+
+ options.update(utils.parse_kv(module_args))
+ return options
+
+ @staticmethod
+ def return_config_overrides_ini(config_overrides, resultant):
+ """Returns string value from a modified config file.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ config = ConfigParser.RawConfigParser(allow_no_value=True)
+ config_object = io.BytesIO(resultant.encode('utf-8'))
+ config.readfp(config_object)
+ for section, items in config_overrides.items():
+ # If the items value is not a dictionary it is assumed that the
+ # value is a default item for this config type.
+ if not isinstance(items, dict):
+ config.set('DEFAULT', section, str(items))
+ else:
+ # Attempt to add a section to the config file passing if
+ # an error is raised that is related to the section
+ # already existing.
+ try:
+ config.add_section(section)
+ except (ConfigParser.DuplicateSectionError, ValueError):
+ pass
+ for key, value in items.items():
+ config.set(section, key, str(value))
+ else:
+ config_object.close()
+
+ resultant_bytesio = io.BytesIO()
+ try:
+ config.write(resultant_bytesio)
+ return resultant_bytesio.getvalue()
+ finally:
+ resultant_bytesio.close()
+
+ def return_config_overrides_json(self, config_overrides, resultant):
+ """Returns config json
+
+ Its important to note that file ordering will not be preserved as the
+ information within the json file will be sorted by keys.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ original_resultant = json.loads(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides
+ )
+ return json.dumps(
+ merged_resultant,
+ indent=4,
+ sort_keys=True
+ )
+
+ def return_config_overrides_yaml(self, config_overrides, resultant):
+ """Return config yaml.
+
+ :param config_overrides: ``dict``
+ :param resultant: ``str`` || ``unicode``
+ :returns: ``str``
+ """
+ original_resultant = yaml.safe_load(resultant)
+ merged_resultant = self._merge_dict(
+ base_items=original_resultant,
+ new_items=config_overrides
+ )
+ return yaml.safe_dump(
+ merged_resultant,
+ default_flow_style=False,
+ width=1000,
+ )
+
+ def _merge_dict(self, base_items, new_items):
+ """Recursively merge new_items into base_items.
+
+ :param base_items: ``dict``
+ :param new_items: ``dict``
+ :returns: ``dict``
+ """
+ for key, value in new_items.iteritems():
+ if isinstance(value, dict):
+ base_items[key] = self._merge_dict(
+ base_items.get(key, {}),
+ value
+ )
+ elif isinstance(value, list):
+ if key in base_items and isinstance(base_items[key], list):
+ base_items[key].extend(value)
+ else:
+ base_items[key] = value
+ else:
+ base_items[key] = new_items[key]
+ return base_items
+
+ def run(self, conn, tmp, module_name, module_args, inject,
+ complex_args=None, **kwargs):
+ """Run the method"""
+ if not self.runner.is_playbook:
+ raise errors.AnsibleError(
+ 'FAILED: `config_templates` are only available in playbooks'
+ )
+
+ options = self.grab_options(complex_args, module_args)
+ try:
+ source = options['src']
+ dest = options['dest']
+
+ config_overrides = options.get('config_overrides', dict())
+ config_type = options['config_type']
+ assert config_type.lower() in ['ini', 'json', 'yaml']
+ except KeyError as exp:
+ result = dict(failed=True, msg=exp)
+ return ReturnData(conn=conn, comm_ok=False, result=result)
+
+ source_template = template.template(
+ self.runner.basedir,
+ source,
+ inject
+ )
+
+ if '_original_file' in inject:
+ source_file = utils.path_dwim_relative(
+ inject['_original_file'],
+ 'templates',
+ source_template,
+ self.runner.basedir
+ )
+ else:
+ source_file = utils.path_dwim(self.runner.basedir, source_template)
+
+ # Open the template file and return the data as a string. This is
+ # being done here so that the file can be a vault encrypted file.
+ resultant = template.template_from_file(
+ self.runner.basedir,
+ source_file,
+ inject,
+ vault_password=self.runner.vault_pass
+ )
+
+ if config_overrides:
+ type_merger = getattr(self, CONFIG_TYPES.get(config_type))
+ resultant = type_merger(
+ config_overrides=config_overrides,
+ resultant=resultant
+ )
+
+ # Retemplate the resultant object as it may have new data within it
+ # as provided by an override variable.
+ template.template_from_string(
+ basedir=self.runner.basedir,
+ data=resultant,
+ vars=inject,
+ fail_on_undefined=True
+ )
+
+ # Access to protected method is unavoidable in Ansible 1.x.
+ new_module_args = dict(
+ src=self.runner._transfer_str(conn, tmp, 'source', resultant),
+ dest=dest,
+ original_basename=os.path.basename(source),
+ follow=True,
+ )
+
+ module_args_tmp = utils.merge_module_args(
+ module_args,
+ new_module_args
+ )
+
+ # Remove data types that are not available to the copy module
+ complex_args.pop('config_overrides')
+ complex_args.pop('config_type')
+
+ # Return the copy module status. Access to protected method is
+ # unavoidable in Ansible 1.x.
+ return self.runner._execute_module(
+ conn,
+ tmp,
+ 'copy',
+ module_args_tmp,
+ inject=inject,
+ complex_args=complex_args
+ )
+
[global]
{% if cephx %}
- auth cluster required = cephx
- auth service required = cephx
- auth client required = cephx
- cephx require signatures = {{ cephx_require_signatures }} # Kernel RBD does NOT support signatures!
- cephx cluster require signatures = {{ cephx_cluster_require_signatures }}
- cephx service require signatures = {{ cephx_service_require_signatures }}
+auth cluster required = cephx
+auth service required = cephx
+auth client required = cephx
+cephx require signatures = {{ cephx_require_signatures }} # Kernel RBD does NOT support signatures!
+cephx cluster require signatures = {{ cephx_cluster_require_signatures }}
+cephx service require signatures = {{ cephx_service_require_signatures }}
{% else %}
- auth cluster required = none
- auth service required = none
- auth client required = none
- auth supported = none
-{% endif %}
- fsid = {{ fsid }}
- max open files = {{ max_open_files }}
- osd pool default pg num = {{ pool_default_pg_num }}
- osd pool default pgp num = {{ pool_default_pgp_num }}
- osd pool default size = {{ pool_default_size }}
- osd pool default min size = {{ pool_default_min_size }}
- osd pool default crush rule = {{ pool_default_crush_rule }}
+auth cluster required = none
+auth service required = none
+auth client required = none
+auth supported = none
+{% endif %}
+fsid = {{ fsid }}
+max open files = {{ max_open_files }}
+osd pool default pg num = {{ pool_default_pg_num }}
+osd pool default pgp num = {{ pool_default_pgp_num }}
+osd pool default size = {{ pool_default_size }}
+osd pool default min size = {{ pool_default_min_size }}
+osd pool default crush rule = {{ pool_default_crush_rule }}
{% if common_single_host_mode is defined %}
- osd crush chooseleaf type = 0
+osd crush chooseleaf type = 0
{% endif %}
{% if disable_in_memory_logs %}
- # Disable in-memory logs
- debug_lockdep = 0/0
- debug_context = 0/0
- debug_crush = 0/0
- debug_buffer = 0/0
- debug_timer = 0/0
- debug_filer = 0/0
- debug_objecter = 0/0
- debug_rados = 0/0
- debug_rbd = 0/0
- debug_journaler = 0/0
- debug_objectcatcher = 0/0
- debug_client = 0/0
- debug_osd = 0/0
- debug_optracker = 0/0
- debug_objclass = 0/0
- debug_filestore = 0/0
- debug_journal = 0/0
- debug_ms = 0/0
- debug_monc = 0/0
- debug_tp = 0/0
- debug_auth = 0/0
- debug_finisher = 0/0
- debug_heartbeatmap = 0/0
- debug_perfcounter = 0/0
- debug_asok = 0/0
- debug_throttle = 0/0
- debug_mon = 0/0
- debug_paxos = 0/0
- debug_rgw = 0/0
+# Disable in-memory logs
+debug_lockdep = 0/0
+debug_context = 0/0
+debug_crush = 0/0
+debug_buffer = 0/0
+debug_timer = 0/0
+debug_filer = 0/0
+debug_objecter = 0/0
+debug_rados = 0/0
+debug_rbd = 0/0
+debug_journaler = 0/0
+debug_objectcatcher = 0/0
+debug_client = 0/0
+debug_osd = 0/0
+debug_optracker = 0/0
+debug_objclass = 0/0
+debug_filestore = 0/0
+debug_journal = 0/0
+debug_ms = 0/0
+debug_monc = 0/0
+debug_tp = 0/0
+debug_auth = 0/0
+debug_finisher = 0/0
+debug_heartbeatmap = 0/0
+debug_perfcounter = 0/0
+debug_asok = 0/0
+debug_throttle = 0/0
+debug_mon = 0/0
+debug_paxos = 0/0
+debug_rgw = 0/0
{% endif %}
{% if enable_debug_global %}
- debug ms = {{ debug_global_level }}
+debug ms = {{ debug_global_level }}
{% endif %}
[client]
- rbd cache = {{ rbd_cache }}
- rbd cache writethrough until flush = true
- rbd concurrent management ops = {{ rbd_concurrent_management_ops }}
- admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
- log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
- rbd default map options = {{ rbd_default_map_options }}
- rbd default features = {{ rbd_default_features }} # sum features digits
- rbd default format = {{ rbd_default_format }}
+rbd cache = {{ rbd_cache }}
+rbd cache writethrough until flush = true
+rbd concurrent management ops = {{ rbd_concurrent_management_ops }}
+admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
+log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
+rbd default map options = {{ rbd_default_map_options }}
+rbd default features = {{ rbd_default_features }} # sum features digits
+rbd default format = {{ rbd_default_format }}
[mon]
- mon osd down out interval = {{ mon_osd_down_out_interval }}
- mon osd min down reporters = {{ mon_osd_min_down_reporters }}
- mon clock drift allowed = {{ mon_clock_drift_allowed }}
- mon clock drift warn backoff = {{ mon_clock_drift_warn_backoff }}
- mon osd full ratio = {{ mon_osd_full_ratio }}
- mon osd nearfull ratio = {{ mon_osd_nearfull_ratio }}
- mon osd report timeout = {{ mon_osd_report_timeout }}
- mon pg warn max per osd = {{ mon_pg_warn_max_per_osd }}
- mon osd allow primary affinity = {{ mon_osd_allow_primary_affinity }}
- mon pg warn max object skew = {{ mon_pg_warn_max_object_skew }}
+mon osd down out interval = {{ mon_osd_down_out_interval }}
+mon osd min down reporters = {{ mon_osd_min_down_reporters }}
+mon clock drift allowed = {{ mon_clock_drift_allowed }}
+mon clock drift warn backoff = {{ mon_clock_drift_warn_backoff }}
+mon osd full ratio = {{ mon_osd_full_ratio }}
+mon osd nearfull ratio = {{ mon_osd_nearfull_ratio }}
+mon osd report timeout = {{ mon_osd_report_timeout }}
+mon pg warn max per osd = {{ mon_pg_warn_max_per_osd }}
+mon osd allow primary affinity = {{ mon_osd_allow_primary_affinity }}
+mon pg warn max object skew = {{ mon_pg_warn_max_object_skew }}
{% if enable_debug_mon %}
- debug mon = {{ debug_mon_level }}
- debug paxos = {{ debug_mon_level }}
- debug auth = {{ debug_mon_level }}
+debug mon = {{ debug_mon_level }}
+debug paxos = {{ debug_mon_level }}
+debug auth = {{ debug_mon_level }}
{% endif %}
{% for host in groups[mon_group_name] %}
- {% if hostvars[host]['ansible_hostname'] is defined %}
- [mon.{{ hostvars[host]['ansible_hostname'] }}]
- host = {{ hostvars[host]['ansible_hostname'] }}
- mon addr = {{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }}
- {% endif %}
+{% if hostvars[host]['ansible_hostname'] is defined %}
+[mon.{{ hostvars[host]['ansible_hostname'] }}]
+host = {{ hostvars[host]['ansible_hostname'] }}
+mon addr = {{ hostvars[host]['ansible_' + (hostvars[host]['monitor_interface'] if hostvars[host]['monitor_interface'] is defined else monitor_interface) ]['ipv4']['address'] }}
+{% endif %}
{% endfor %}
[osd]
- osd mkfs type = {{ osd_mkfs_type }}
- osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
- osd mount options xfs = {{ osd_mount_options_xfs }}
- osd journal size = {{ journal_size }}
+osd mkfs type = {{ osd_mkfs_type }}
+osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
+osd mount options xfs = {{ osd_mount_options_xfs }}
+osd journal size = {{ journal_size }}
{% if cluster_network is defined %}
- cluster_network = {{ cluster_network }}
+cluster_network = {{ cluster_network }}
{% endif %}
{% if public_network is defined %}
- public_network = {{ public_network }}
-{% endif %}
- osd mon heartbeat interval = {{ osd_mon_heartbeat_interval }}
- # Performance tuning
- filestore merge threshold = {{ filestore_merge_threshold }}
- filestore split multiple = {{ filestore_split_multiple }}
- osd op threads = {{ osd_op_threads }}
- filestore op threads = {{ filestore_op_threads }}
- filestore max sync interval = {{ filestore_max_sync_interval }}
- osd max scrubs = {{ osd_max_scrubs }}
+public_network = {{ public_network }}
+{% endif %}
+osd mon heartbeat interval = {{ osd_mon_heartbeat_interval }}
+# Performance tuning
+filestore merge threshold = {{ filestore_merge_threshold }}
+filestore split multiple = {{ filestore_split_multiple }}
+osd op threads = {{ osd_op_threads }}
+filestore op threads = {{ filestore_op_threads }}
+filestore max sync interval = {{ filestore_max_sync_interval }}
+osd max scrubs = {{ osd_max_scrubs }}
{% if ceph_stable_release not in ['argonaut','bobtail','cuttlefish','dumpling','emperor','firefly','giant'] %}
- osd scrub begin hour = {{ osd_scrub_begin_hour }}
- osd scrub end hour = {{ osd_scrub_end_hour }}
-{% endif %}
- # Recovery tuning
- osd recovery max active = {{ osd_recovery_max_active }}
- osd max backfills = {{ osd_max_backfills }}
- osd recovery op priority = {{ osd_recovery_op_priority }}
- osd recovery max chunk = {{ osd_recovery_max_chunk }}
- osd recovery threads = {{ osd_recovery_threads }}
- osd objectstore = {{ osd_objectstore }}
- osd crush update on start = {{ osd_crush_update_on_start }}
+osd scrub begin hour = {{ osd_scrub_begin_hour }}
+osd scrub end hour = {{ osd_scrub_end_hour }}
+{% endif %}
+# Recovery tuning
+osd recovery max active = {{ osd_recovery_max_active }}
+osd max backfills = {{ osd_max_backfills }}
+osd recovery op priority = {{ osd_recovery_op_priority }}
+osd recovery max chunk = {{ osd_recovery_max_chunk }}
+osd recovery threads = {{ osd_recovery_threads }}
+osd objectstore = {{ osd_objectstore }}
+osd crush update on start = {{ osd_crush_update_on_start }}
{% if enable_debug_osd %}
- debug osd = {{ debug_osd_level }}
- debug filestore = {{ debug_osd_level }}
- debug journal = {{ debug_osd_level }}
- debug monc = {{ debug_osd_level }}
-{% endif %}
- # Deep scrub impact
- osd scrub sleep = {{ osd_scrub_sleep }}
- osd disk thread ioprio class = {{ osd_disk_thread_ioprio_class }}
- osd disk thread ioprio priority = {{ osd_disk_thread_ioprio_priority }}
- osd scrub chunk max = {{ osd_scrub_chunk_max }}
- osd deep scrub stride = {{ osd_deep_scrub_stride }}
+debug osd = {{ debug_osd_level }}
+debug filestore = {{ debug_osd_level }}
+debug journal = {{ debug_osd_level }}
+debug monc = {{ debug_osd_level }}
+{% endif %}
+# Deep scrub impact
+osd scrub sleep = {{ osd_scrub_sleep }}
+osd disk thread ioprio class = {{ osd_disk_thread_ioprio_class }}
+osd disk thread ioprio priority = {{ osd_disk_thread_ioprio_priority }}
+osd scrub chunk max = {{ osd_scrub_chunk_max }}
+osd deep scrub stride = {{ osd_deep_scrub_stride }}
{% if groups[mds_group_name] is defined %}
-[mds]
-{% if enable_debug_mds %}
- debug mds = {{ debug_mds_level }}
- debug mds balancer = {{ debug_mds_level }}
- debug mds log = {{ debug_mds_level }}
- debug mds migrator = {{ debug_mds_level }}
-{% endif %}
{% for host in groups[mds_group_name] %}
- {% if hostvars[host]['ansible_hostname'] is defined %}
- [mds.{{ hostvars[host]['ansible_hostname'] }}]
- host = {{ hostvars[host]['ansible_hostname'] }}
- {% endif %}
+{% if hostvars[host]['ansible_hostname'] is defined %}
+[mds.{{ hostvars[host]['ansible_hostname'] }}]
+host = {{ hostvars[host]['ansible_hostname'] }}
+{% endif %}
{% endfor %}
+{% if enable_debug_mds %}
+debug mds = {{ debug_mds_level }}
+debug mds balancer = {{ debug_mds_level }}
+debug mds log = {{ debug_mds_level }}
+debug mds migrator = {{ debug_mds_level }}
+{% endif %}
{% endif %}
{% if groups[rgw_group_name] is defined %}
{% for host in groups[rgw_group_name] %}
{% if hostvars[host]['ansible_hostname'] is defined %}
[client.rgw.{{ hostvars[host]['ansible_hostname'] }}]
- {% if radosgw_dns_name is defined %}
- rgw dns name = {{ radosgw_dns_name }}
- {% endif %}
- host = {{ hostvars[host]['ansible_hostname'] }}
- keyring = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
- rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
- log file = /var/log/ceph/radosgw-{{ hostvars[host]['ansible_hostname'] }}.log
- rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}
- {% if radosgw_frontend == 'civetweb' %}
- rgw frontends = civetweb port={{ radosgw_civetweb_port }}
- {% endif %}
- {% if radosgw_keystone %}
- rgw keystone url = {{ radosgw_keystone_url }}
- rgw keystone admin token = {{ radosgw_keystone_admin_token }}
- rgw keystone accepted roles = {{ radosgw_keystone_accepted_roles }}
- rgw keystone token cache size = {{ radosgw_keystone_token_cache_size }}
- rgw keystone revocation interval = {{ radosgw_keystone_revocation_internal }}
- rgw s3 auth use keystone = {{ radosgw_s3_auth_use_keystone }}
- nss db path = {{ radosgw_nss_db_path }}
- {% endif %}
+{% if radosgw_dns_name is defined %}
+rgw dns name = {{ radosgw_dns_name }}
+{% endif %}
+host = {{ hostvars[host]['ansible_hostname'] }}
+keyring = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
+rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
+log file = /var/log/ceph/radosgw-{{ hostvars[host]['ansible_hostname'] }}.log
+rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}
+{% if radosgw_frontend == 'civetweb' %}
+rgw frontends = civetweb port={{ radosgw_civetweb_port }}
+{% endif %}
+{% if radosgw_keystone %}
+rgw keystone url = {{ radosgw_keystone_url }}
+rgw keystone admin token = {{ radosgw_keystone_admin_token }}
+rgw keystone accepted roles = {{ radosgw_keystone_accepted_roles }}
+rgw keystone token cache size = {{ radosgw_keystone_token_cache_size }}
+rgw keystone revocation interval = {{ radosgw_keystone_revocation_internal }}
+rgw s3 auth use keystone = {{ radosgw_s3_auth_use_keystone }}
+nss db path = {{ radosgw_nss_db_path }}
+{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% if groups[restapi_group_name] is defined %}
[client.restapi]
- public addr = {{ hostvars[inventory_hostname]['ansible_' + restapi_interface]['ipv4']['address'] }}:{{ restapi_port }}
- restapi base url = {{ restapi_base_url }}
- restapi log level = {{ restapi_log_level }}
- keyring = /var/lib/ceph/restapi/ceph-restapi/keyring
- log file = /var/log/ceph/ceph-restapi.log
+public addr = {{ hostvars[inventory_hostname]['ansible_' + restapi_interface]['ipv4']['address'] }}:{{ restapi_port }}
+restapi base url = {{ restapi_base_url }}
+restapi log level = {{ restapi_log_level }}
+keyring = /var/lib/ceph/restapi/ceph-restapi/keyring
+log file = /var/log/ceph/ceph-restapi.log
{% endif %}