BASEDIR=$(dirname "$0")
LOCAL_BRANCH=$(cd $BASEDIR && git rev-parse --abbrev-ref HEAD)
BRANCHES="master ansible-1.9"
-ROLES="ceph-common ceph-mon ceph-osd ceph-mds ceph-rgw ceph-restapi ceph-agent ceph-fetch-keys ceph-rbd-mirror ceph-client ceph-docker-common ceph-mgr ceph-defaults"
+ROLES="ceph-common ceph-mon ceph-osd ceph-mds ceph-rgw ceph-restapi ceph-agent ceph-fetch-keys ceph-rbd-mirror ceph-client ceph-docker-common ceph-mgr ceph-defaults ceph-config"
# FUNCTIONS
+++ /dev/null
----
-- name: create ceph conf directory and assemble directory
- file:
- path: "{{ item }}"
- state: directory
- owner: "ceph"
- group: "ceph"
- mode: "0755"
- with_items:
- - /etc/ceph/
- - /etc/ceph/ceph.d/
-
-- name: template ceph_conf_overrides
- local_action: copy content="{{ ceph_conf_overrides }}" dest="{{ fetch_directory }}/ceph_conf_overrides_temp"
- become: false
- run_once: true
-
-- name: get rendered ceph_conf_overrides
- local_action: set_fact ceph_conf_overrides_rendered="{{ lookup('template', '{{ fetch_directory }}/ceph_conf_overrides_temp') | from_yaml }}"
- become: false
- run_once: true
-
-- name: "generate ceph configuration file: {{ cluster }}.conf"
- action: config_template
- args:
- src: ceph.conf.j2
- dest: /etc/ceph/ceph.d/{{ cluster }}.conf
- owner: "ceph"
- group: "ceph"
- mode: "0644"
- config_overrides: "{{ ceph_conf_overrides_rendered }}"
- config_type: ini
-
-- name: assemble {{ cluster }}.conf and fragments
- assemble:
- src: /etc/ceph/ceph.d/
- dest: /etc/ceph/{{ cluster }}.conf
- regexp: "^(({{cluster}})|(osd)).conf$"
- owner: "ceph"
- group: "ceph"
- mode: "0644"
- notify:
- - restart ceph mons
- - restart ceph osds
- - restart ceph mdss
- - restart ceph rgws
- - restart ceph nfss
- mon_group_name in group_names
- include: create_ceph_initial_dirs.yml
-- include: generate_ceph_conf.yml
- include: create_rbd_client_dir.yml
- include: configure_cluster_name.yml
+++ /dev/null
-#jinja2: trim_blocks: "true", lstrip_blocks: "true"
-# {{ ansible_managed }}
-
-[global]
-{% if not cephx %}
-auth cluster required = none
-auth service required = none
-auth client required = none
-auth supported = none
-{% endif %}
-{% if ip_version == 'ipv6' %}
-ms bind ipv6 = true
-{% endif %}
-{% if not containerized_deployment_with_kv and not containerized_deployment %}
-fsid = {{ fsid }}
-{% endif %}
-max open files = {{ max_open_files }}
-{% if common_single_host_mode is defined and common_single_host_mode %}
-osd crush chooseleaf type = 0
-{% endif %}
-{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
-{% if groups[mon_group_name] is defined %}
-mon initial members = {% for host in groups[mon_group_name] %}
- {% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%}
- {{ hostvars[host]['ansible_fqdn'] }}
- {%- elif hostvars[host]['ansible_hostname'] is defined -%}
- {{ hostvars[host]['ansible_hostname'] }}
- {%- endif %}
- {%- if not loop.last %},{% endif %}
- {% endfor %}
-{% endif %}
-
-{% if not containerized_deployment and not containerized_deployment_with_kv -%}
-mon host = {% for host in groups[mon_group_name] -%}
- {% if monitor_address_block | length > 0 %}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
- {%- endif %}
- {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host]['monitor_address'] }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host]['monitor_address'] }}]
- {%- endif %}
- {%- else -%}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host]['ansible_' + hostvars[host]['monitor_interface']][ip_version]['address'] }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host]['ansible_' + hostvars[host]['monitor_interface']][ip_version][0]['address'] }}]
- {%- endif %}
- {%- endif %}
- {% if not loop.last -%},{%- endif %}
- {%- endfor %}
-{%- endif %}
-
-{% if containerized_deployment %}
-fsid = {{ fsid }}
-mon host = {% for host in groups[mon_group_name] -%}
- {% if monitor_address_block | length > 0 %}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
- {%- endif %}
- {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host]['monitor_address'] }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host]['monitor_address'] }}]
- {%- endif %}
- {%- else -%}
- {% set interface = ["ansible_",monitor_interface]|join %}
- {% if ip_version == 'ipv4' -%}
- {{ hostvars[host][interface][ip_version]['address'] }}
- {%- elif ip_version == 'ipv6' -%}
- [{{ hostvars[host][interface][ip_version][0]['address'] }}]
- {%- endif %}
- {%- endif %}
- {% if not loop.last -%},{%- endif %}
- {%- endfor %}
-{% endif %}
-
-{% if public_network is defined %}
-public network = {{ public_network }}
-{% endif %}
-{% if cluster_network is defined %}
-cluster network = {{ cluster_network }}
-{% endif %}
-{% if rgw_override_bucket_index_max_shards is defined %}
-rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
-{% endif %}
-{% if rgw_bucket_default_quota_max_objects is defined %}
-rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
-{% endif %}
-
-[client.libvirt]
-admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
-log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
-
-[osd]
-osd mkfs type = {{ osd_mkfs_type }}
-osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
-osd mount options xfs = {{ osd_mount_options_xfs }}
-osd journal size = {{ journal_size }}
-{% if filestore_xattr_use_omap != None %}
-filestore xattr use omap = {{ filestore_xattr_use_omap }}
-{% elif osd_mkfs_type == "ext4" %}
-filestore xattr use omap = true
-{# else, default is false #}
-{% endif %}
-
-{% if groups[mds_group_name] is defined %}
-{% if mds_group_name in group_names %}
-{% for host in groups[mds_group_name] %}
-{% if hostvars[host]['ansible_fqdn'] is defined and mds_use_fqdn %}
-[mds.{{ hostvars[host]['ansible_fqdn'] }}]
-host = {{ hostvars[host]['ansible_fqdn'] }}
-{% elif hostvars[host]['ansible_hostname'] is defined %}
-[mds.{{ hostvars[host]['ansible_hostname'] }}]
-host = {{ hostvars[host]['ansible_hostname'] }}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if groups[rgw_group_name] is defined %}
-{% if rgw_group_name in group_names %}
-{% for host in groups[rgw_group_name] %}
-{% if hostvars[host]['ansible_hostname'] is defined %}
-[client.rgw.{{ hostvars[host]['ansible_hostname'] }}]
-host = {{ hostvars[host]['ansible_hostname'] }}
-keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
-rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
-log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
-rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}
-{% if radosgw_address_block | length > 0 %}
- {% if ip_version == 'ipv4' -%}
- rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {%- elif ip_version == 'ipv6' -%}
- rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {%- endif %}
-{% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%}
- {% if ip_version == 'ipv4' -%}
- rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {%- elif ip_version == 'ipv6' -%}
- rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {% endif %}
-{%- else -%}
- {% set interface = ["ansible_",radosgw_interface]|join %}
- {% if ip_version == 'ipv6' -%}
- rgw frontends = civetweb port=[{{ hostvars[host][interface][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {%- elif ip_version == 'ipv4' -%}
- rgw frontends = civetweb port={{ hostvars[host][interface][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
- {% endif %}
-{%- endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% endif %}
-
-{% if groups[restapi_group_name] is defined %}
-{% if restapi_group_name in group_names %}
-[client.restapi]
-{% if restapi_interface != "interface" %}
-{% include 'client_restapi_interface.j2' %}
-{% else %}
-{% include 'client_restapi_address.j2' %}
-{% endif %}
-keyring = /var/lib/ceph/restapi/ceph-restapi/keyring
-log file = /var/log/ceph/ceph-restapi.log
-{% endif %}
-{% endif %}
--- /dev/null
+---
+# ceph-common
+- block:
+ - name: create ceph conf directory and assemble directory
+ file:
+ path: "{{ item }}"
+ state: directory
+ owner: "ceph"
+ group: "ceph"
+ mode: "0755"
+ with_items:
+ - /etc/ceph/
+ - /etc/ceph/ceph.d/
+
+ - name: template ceph_conf_overrides
+ local_action: copy content="{{ ceph_conf_overrides }}" dest=/tmp/ceph_conf_overrides_temp
+ run_once: true
+
+ - name: get rendered ceph_conf_overrides
+ local_action: set_fact ceph_conf_overrides_rendered="{{ lookup('template', '/tmp/ceph_conf_overrides_temp') | from_yaml }}"
+ run_once: true
+
+ - name: remove tmp template file for ceph_conf_overrides
+ local_action: file path=/tmp/ceph_conf_overrides_temp state=absent
+ run_once: true
+
+ - name: "generate ceph configuration file: {{ cluster }}.conf"
+ action: config_template
+ args:
+ src: ceph.conf.j2
+ dest: /etc/ceph/ceph.d/{{ cluster }}.conf
+ owner: "ceph"
+ group: "ceph"
+ mode: "0644"
+ config_overrides: "{{ ceph_conf_overrides_rendered }}"
+ config_type: ini
+
+ - name: assemble {{ cluster }}.conf and fragments
+ assemble:
+ src: /etc/ceph/ceph.d/
+ dest: /etc/ceph/{{ cluster }}.conf
+ regexp: "^(({{cluster}})|(osd)).conf$"
+ owner: "ceph"
+ group: "ceph"
+ mode: "0644"
+ notify:
+ - restart ceph mons
+ - restart ceph osds
+ - restart ceph mdss
+ - restart ceph rgws
+ - restart ceph nfss
+ when: not containerized_deployment
+
+# ceph-docker-common
+# only create fetch directory when:
+# we are not populating kv_store with default ceph.conf AND host is a mon
+# OR
+# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
+- block:
+ - name: create a local fetch directory if it does not exist
+ local_action: file path={{ fetch_directory }} state=directory
+ changed_when: false
+ become: false
+ run_once: true
+ when:
+ - (cephx or generate_fsid)
+ - (not mon_containerized_default_ceph_conf_with_kv and
+ (inventory_hostname in groups.get(mon_group_name, []))) or
+ (not mon_containerized_default_ceph_conf_with_kv and
+ ((groups.get(nfs_group_name, []) | length > 0)
+ and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
+
+ - name: generate cluster uuid
+ local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
+ creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ register: cluster_uuid
+ become: false
+ when: generate_fsid
+
+ - name: read cluster uuid if it already exists
+ local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
+ removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
+ changed_when: false
+ register: cluster_uuid
+ always_run: true
+ become: false
+ when: generate_fsid
+
+ - name: ensure /etc/ceph exists
+ file:
+ path: /etc/ceph
+ state: directory
+ owner: 'ceph'
+ group: 'ceph'
+ mode: 0755
+ when: groups.get(mon_group_name, []) | length == 0
+
+ - name: "generate {{ cluster }}.conf configuration file"
+ action: config_template
+ args:
+ src: "ceph.conf.j2"
+ dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
+ owner: "root"
+ group: "root"
+ mode: "0644"
+ config_overrides: "{{ ceph_conf_overrides }}"
+ config_type: ini
+ notify:
+ - restart ceph mons
+ - restart ceph osds
+ - restart ceph mdss
+ - restart ceph rgws
+
+ - name: set fsid fact when generate_fsid = true
+ set_fact:
+ fsid: "{{ cluster_uuid.stdout }}"
+ when: generate_fsid
+ when: containerized_deployment
--- /dev/null
+#jinja2: trim_blocks: "true", lstrip_blocks: "true"
+# {{ ansible_managed }}
+
+[global]
+{% if not cephx %}
+auth cluster required = none
+auth service required = none
+auth client required = none
+auth supported = none
+{% endif %}
+{% if ip_version == 'ipv6' %}
+ms bind ipv6 = true
+{% endif %}
+{% if not containerized_deployment_with_kv and not containerized_deployment %}
+fsid = {{ fsid }}
+{% endif %}
+max open files = {{ max_open_files }}
+{% if common_single_host_mode is defined and common_single_host_mode %}
+osd crush chooseleaf type = 0
+{% endif %}
+{# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #}
+
+{% set nb_mon = groups.get(mon_group_name, []) | length | int %}
+{% set nb_client = groups.get(client_group_name, []) | length | int %}
+{% set nb_osd = groups.get(osd_group_name, []) | length | int %}
+
+
+{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %}
+mon initial members = {% for host in groups[mon_group_name] %}
+ {% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%}
+ {{ hostvars[host]['ansible_fqdn'] }}
+ {%- elif hostvars[host]['ansible_hostname'] is defined -%}
+ {{ hostvars[host]['ansible_hostname'] }}
+ {%- endif %}
+ {%- if not loop.last %},{% endif %}
+ {% endfor %}
+{% endif %}
+
+{% if not containerized_deployment and not containerized_deployment_with_kv -%}
+mon host = {% if nb_mon > 0 %}
+ {% for host in groups[mon_group_name] -%}
+ {% if monitor_address_block | length > 0 %}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
+ {%- endif %}
+ {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host]['monitor_address'] }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host]['monitor_address'] }}]
+ {%- endif %}
+ {%- else -%}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host]['ansible_' + hostvars[host]['monitor_interface']][ip_version]['address'] }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host]['ansible_' + hostvars[host]['monitor_interface']][ip_version][0]['address'] }}]
+ {%- endif %}
+ {%- endif %}
+ {% if not loop.last -%},{%- endif %}
+ {%- endfor %}
+ {% elif nb_mon == 0 and inventory_hostname in groups.get(client_group_name, []) %}
+ {{ external_cluster_mon_ips }}
+ {% endif %}
+{%- endif %}
+
+{% if containerized_deployment %}
+fsid = {{ fsid }}
+mon host = {% if nb_mon > 0 %}
+{% for host in groups[mon_group_name] -%}
+ {% if monitor_address_block | length > 0 %}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }}]
+ {%- endif %}
+ {% elif hostvars[host]['monitor_address'] is defined and hostvars[host]['monitor_address'] != '0.0.0.0' -%}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host]['monitor_address'] }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host]['monitor_address'] }}]
+ {%- endif %}
+ {%- else -%}
+ {% set interface = ["ansible_",monitor_interface]|join %}
+ {% if ip_version == 'ipv4' -%}
+ {{ hostvars[host][interface][ip_version]['address'] }}
+ {%- elif ip_version == 'ipv6' -%}
+ [{{ hostvars[host][interface][ip_version][0]['address'] }}]
+ {%- endif %}
+ {%- endif %}
+ {% if not loop.last -%},{%- endif %}
+ {%- endfor %}
+ {% elif nb_mon == 0 and inventory_hostname in groups.get(client_group_name, []) %}
+ {{ external_cluster_mon_ips }}
+ {% endif %}
+{% endif %}
+
+{% if public_network is defined %}
+public network = {{ public_network }}
+{% endif %}
+{% if cluster_network is defined %}
+cluster network = {{ cluster_network }}
+{% endif %}
+{% if rgw_override_bucket_index_max_shards is defined %}
+rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards }}
+{% endif %}
+{% if rgw_bucket_default_quota_max_objects is defined %}
+rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }}
+{% endif %}
+
+{% if inventory_hostname in groups.get(client_group_name, []) %}
+[client.libvirt]
+admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor
+log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor
+{% endif %}
+
+{% if inventory_hostname in groups.get(osd_group_name, []) %}
+[osd]
+osd mkfs type = {{ osd_mkfs_type }}
+osd mkfs options xfs = {{ osd_mkfs_options_xfs }}
+osd mount options xfs = {{ osd_mount_options_xfs }}
+osd journal size = {{ journal_size }}
+{% if filestore_xattr_use_omap != None %}
+filestore xattr use omap = {{ filestore_xattr_use_omap }}
+{% elif osd_mkfs_type == "ext4" %}
+filestore xattr use omap = true
+{# else, default is false #}
+{% endif %}
+{% endif %}
+
+{% if groups[mds_group_name] is defined %}
+{% if mds_group_name in group_names %}
+{% for host in groups[mds_group_name] %}
+{% if hostvars[host]['ansible_fqdn'] is defined and mds_use_fqdn %}
+[mds.{{ hostvars[host]['ansible_fqdn'] }}]
+host = {{ hostvars[host]['ansible_fqdn'] }}
+{% elif hostvars[host]['ansible_hostname'] is defined %}
+[mds.{{ hostvars[host]['ansible_hostname'] }}]
+host = {{ hostvars[host]['ansible_hostname'] }}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+{% if groups[rgw_group_name] is defined %}
+{% if rgw_group_name in group_names %}
+{% for host in groups[rgw_group_name] %}
+{% if hostvars[host]['ansible_hostname'] is defined %}
+[client.rgw.{{ hostvars[host]['ansible_hostname'] }}]
+host = {{ hostvars[host]['ansible_hostname'] }}
+keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
+rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
+log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
+rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}
+{% if radosgw_address_block | length > 0 %}
+ {% if ip_version == 'ipv4' -%}
+ rgw frontends = civetweb port={{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {%- elif ip_version == 'ipv6' -%}
+ rgw frontends = civetweb port=[{{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(radosgw_address_block) | first }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {%- endif %}
+{% elif hostvars[host]['radosgw_address'] is defined and hostvars[host]['radosgw_address'] != '0.0.0.0' -%}
+ {% if ip_version == 'ipv4' -%}
+ rgw frontends = civetweb port={{ hostvars[host]['radosgw_address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {%- elif ip_version == 'ipv6' -%}
+ rgw frontends = civetweb port=[{{ hostvars[host]['radosgw_address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {% endif %}
+{%- else -%}
+ {% set interface = ["ansible_",radosgw_interface]|join %}
+ {% if ip_version == 'ipv6' -%}
+ rgw frontends = civetweb port=[{{ hostvars[host][interface][ip_version][0]['address'] }}]:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {%- elif ip_version == 'ipv4' -%}
+ rgw frontends = civetweb port={{ hostvars[host][interface][ip_version]['address'] }}:{{ radosgw_civetweb_port }} {{ radosgw_civetweb_options }}
+ {% endif %}
+{%- endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+{% if groups[restapi_group_name] is defined %}
+{% if restapi_group_name in group_names %}
+[client.restapi]
+{% if restapi_interface != "interface" %}
+{% include 'client_restapi_interface.j2' %}
+{% else %}
+{% include 'client_restapi_address.j2' %}
+{% endif %}
+keyring = /var/lib/ceph/restapi/ceph-restapi/keyring
+log file = /var/log/ceph/ceph-restapi.log
+{% endif %}
+{% endif %}
+
with_items: "{{ devices }}"
when:
- containerized_deployment
- - inventory_hostname in groups.get(osd_group_name)
+ - inventory_hostname in groups.get(osd_group_name, [])
always_run: yes
register: ceph_current_fsid
delegate_to: "{{ groups[mon_group_name][0] }}"
- when: not rolling_update
+ when:
+ - not rolling_update
+ - groups.get(mon_group_name, []) | length > 0
# We want this check to be run only on the first node
- name: check if {{ fetch_directory }} directory exists
- set_fact:
ceph_current_fsid:
rc: 1
- when: rolling_update
+ when:
+ - rolling_update or groups.get(mon_group_name, []) | length == 0
- name: create a local fetch directory if it does not exist
local_action: file path={{ fetch_directory }} state=directory
---
-# only create fetch directory when:
-# we are not populating kv_store with default ceph.conf AND host is a mon
-# OR
-# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs
-- name: create a local fetch directory if it does not exist
- local_action: file path={{ fetch_directory }} state=directory
- changed_when: false
- become: false
- run_once: true
- when:
- - (cephx or generate_fsid)
- - (not mon_containerized_default_ceph_conf_with_kv and
- (inventory_hostname in groups.get(mon_group_name, []))) or
- (not mon_containerized_default_ceph_conf_with_kv and
- ((groups.get(nfs_group_name, []) | length > 0)
- and (inventory_hostname == groups.get(nfs_group_name, [])[0])))
-
-- name: generate cluster uuid
- local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf
- creates="{{ fetch_directory }}/ceph_cluster_uuid.conf"
- register: cluster_uuid
- become: false
- when: generate_fsid
-
-- name: read cluster uuid if it already exists
- local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf
- removes="{{ fetch_directory }}/ceph_cluster_uuid.conf"
- changed_when: false
- register: cluster_uuid
- always_run: true
- become: false
- when: generate_fsid
-
-- name: "generate {{ cluster }}.conf configuration file"
- action: config_template
- args:
- src: "{{ playbook_dir }}/roles/ceph-common/templates/ceph.conf.j2"
- dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf"
- owner: "root"
- group: "root"
- mode: "0644"
- config_overrides: "{{ ceph_conf_overrides }}"
- config_type: ini
- notify:
- - restart ceph mons
- - restart ceph osds
- - restart ceph mdss
- - restart ceph rgws
-
-- name: set fsid fact when generate_fsid = true
- set_fact:
- fsid: "{{ cluster_uuid.stdout }}"
- when: generate_fsid
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-mon
serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-osd
- hosts: mdss
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-mds
- hosts: rgws
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-rgw
- hosts: nfss
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-nfs
- hosts: rbd_mirrors
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-rbd-mirror
- hosts: restapis
roles:
- ceph-defaults
- ceph-docker-common
+ - ceph-config
- ceph-restapi
- hosts: mgrs
roles:
- { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
- { role: ceph-docker-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
+ - { role: ceph-config, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
- { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
- hosts: clients
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-client
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-mon
- hosts: agents
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-agent
- hosts: osds
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-osd
- hosts: mdss
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-mds
- hosts: rgws
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-rgw
- hosts: nfss
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-nfs
- hosts: restapis
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-restapi
- hosts: rbdmirrors
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-rbd-mirror
- hosts: clients
roles:
- ceph-defaults
- ceph-common
+ - ceph-config
- ceph-client
- hosts: mgrs
roles:
- { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
- { role: ceph-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
+ - { role: ceph-config, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }
- { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }