From 539197a2fcc9617303f3b7d06342634abb33968a Mon Sep 17 00:00:00 2001 From: Guillaume Abrioux Date: Fri, 4 Aug 2017 16:57:46 +0200 Subject: [PATCH] Introduce new role ceph-config. This will give us more flexibility and the possibility to deploy a client node for an external ceph-cluster. related BZ: https://bugzilla.redhat.com/show_bug.cgi?id=1469426 Fixes: #1670 Signed-off-by: Guillaume Abrioux --- contrib/push-roles-to-ansible-galaxy.sh | 2 +- .../ceph-common/tasks/generate_ceph_conf.yml | 47 ------- roles/ceph-common/tasks/main.yml | 1 - roles/ceph-config/tasks/main.yml | 118 ++++++++++++++++++ .../templates/ceph.conf.j2 | 25 +++- roles/ceph-defaults/tasks/check_socket.yml | 2 +- roles/ceph-defaults/tasks/facts.yml | 7 +- .../tasks/create_configs.yml | 53 -------- site-docker.yml.sample | 9 ++ site.yml.sample | 10 ++ 10 files changed, 166 insertions(+), 108 deletions(-) delete mode 100644 roles/ceph-common/tasks/generate_ceph_conf.yml create mode 100644 roles/ceph-config/tasks/main.yml rename roles/{ceph-common => ceph-config}/templates/ceph.conf.j2 (89%) diff --git a/contrib/push-roles-to-ansible-galaxy.sh b/contrib/push-roles-to-ansible-galaxy.sh index 8a21eb035..4a955d47f 100755 --- a/contrib/push-roles-to-ansible-galaxy.sh +++ b/contrib/push-roles-to-ansible-galaxy.sh @@ -5,7 +5,7 @@ set -xe BASEDIR=$(dirname "$0") LOCAL_BRANCH=$(cd $BASEDIR && git rev-parse --abbrev-ref HEAD) BRANCHES="master ansible-1.9" -ROLES="ceph-common ceph-mon ceph-osd ceph-mds ceph-rgw ceph-restapi ceph-agent ceph-fetch-keys ceph-rbd-mirror ceph-client ceph-docker-common ceph-mgr ceph-defaults" +ROLES="ceph-common ceph-mon ceph-osd ceph-mds ceph-rgw ceph-restapi ceph-agent ceph-fetch-keys ceph-rbd-mirror ceph-client ceph-docker-common ceph-mgr ceph-defaults ceph-config" # FUNCTIONS diff --git a/roles/ceph-common/tasks/generate_ceph_conf.yml b/roles/ceph-common/tasks/generate_ceph_conf.yml deleted file mode 100644 index d0803f794..000000000 --- a/roles/ceph-common/tasks/generate_ceph_conf.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: create ceph conf directory and assemble directory - file: - path: "{{ item }}" - state: directory - owner: "ceph" - group: "ceph" - mode: "0755" - with_items: - - /etc/ceph/ - - /etc/ceph/ceph.d/ - -- name: template ceph_conf_overrides - local_action: copy content="{{ ceph_conf_overrides }}" dest="{{ fetch_directory }}/ceph_conf_overrides_temp" - become: false - run_once: true - -- name: get rendered ceph_conf_overrides - local_action: set_fact ceph_conf_overrides_rendered="{{ lookup('template', '{{ fetch_directory }}/ceph_conf_overrides_temp') | from_yaml }}" - become: false - run_once: true - -- name: "generate ceph configuration file: {{ cluster }}.conf" - action: config_template - args: - src: ceph.conf.j2 - dest: /etc/ceph/ceph.d/{{ cluster }}.conf - owner: "ceph" - group: "ceph" - mode: "0644" - config_overrides: "{{ ceph_conf_overrides_rendered }}" - config_type: ini - -- name: assemble {{ cluster }}.conf and fragments - assemble: - src: /etc/ceph/ceph.d/ - dest: /etc/ceph/{{ cluster }}.conf - regexp: "^(({{cluster}})|(osd)).conf$" - owner: "ceph" - group: "ceph" - mode: "0644" - notify: - - restart ceph mons - - restart ceph osds - - restart ceph mdss - - restart ceph rgws - - restart ceph nfss diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index c19ee0c77..d72a89e1b 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -122,6 +122,5 @@ - mon_group_name in group_names - include: create_ceph_initial_dirs.yml -- include: generate_ceph_conf.yml - include: create_rbd_client_dir.yml - include: configure_cluster_name.yml diff --git a/roles/ceph-config/tasks/main.yml b/roles/ceph-config/tasks/main.yml new file mode 100644 index 000000000..e88743f3b --- /dev/null +++ b/roles/ceph-config/tasks/main.yml @@ -0,0 +1,118 @@ +--- +# ceph-common +- block: + - name: create ceph conf directory and assemble directory + file: + path: "{{ item }}" + state: directory + owner: "ceph" + group: "ceph" + mode: "0755" + with_items: + - /etc/ceph/ + - /etc/ceph/ceph.d/ + + - name: template ceph_conf_overrides + local_action: copy content="{{ ceph_conf_overrides }}" dest=/tmp/ceph_conf_overrides_temp + run_once: true + + - name: get rendered ceph_conf_overrides + local_action: set_fact ceph_conf_overrides_rendered="{{ lookup('template', '/tmp/ceph_conf_overrides_temp') | from_yaml }}" + run_once: true + + - name: remove tmp template file for ceph_conf_overrides + local_action: file path=/tmp/ceph_conf_overrides_temp state=absent + run_once: true + + - name: "generate ceph configuration file: {{ cluster }}.conf" + action: config_template + args: + src: ceph.conf.j2 + dest: /etc/ceph/ceph.d/{{ cluster }}.conf + owner: "ceph" + group: "ceph" + mode: "0644" + config_overrides: "{{ ceph_conf_overrides_rendered }}" + config_type: ini + + - name: assemble {{ cluster }}.conf and fragments + assemble: + src: /etc/ceph/ceph.d/ + dest: /etc/ceph/{{ cluster }}.conf + regexp: "^(({{cluster}})|(osd)).conf$" + owner: "ceph" + group: "ceph" + mode: "0644" + notify: + - restart ceph mons + - restart ceph osds + - restart ceph mdss + - restart ceph rgws + - restart ceph nfss + when: not containerized_deployment + +# ceph-docker-common +# only create fetch directory when: +# we are not populating kv_store with default ceph.conf AND host is a mon +# OR +# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs +- block: + - name: create a local fetch directory if it does not exist + local_action: file path={{ fetch_directory }} state=directory + changed_when: false + become: false + run_once: true + when: + - (cephx or generate_fsid) + - (not mon_containerized_default_ceph_conf_with_kv and + (inventory_hostname in groups.get(mon_group_name, []))) or + (not mon_containerized_default_ceph_conf_with_kv and + ((groups.get(nfs_group_name, []) | length > 0) + and (inventory_hostname == groups.get(nfs_group_name, [])[0]))) + + - name: generate cluster uuid + local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf + creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" + register: cluster_uuid + become: false + when: generate_fsid + + - name: read cluster uuid if it already exists + local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf + removes="{{ fetch_directory }}/ceph_cluster_uuid.conf" + changed_when: false + register: cluster_uuid + always_run: true + become: false + when: generate_fsid + + - name: ensure /etc/ceph exists + file: + path: /etc/ceph + state: directory + owner: 'ceph' + group: 'ceph' + mode: 0755 + when: groups.get(mon_group_name, []) | length == 0 + + - name: "generate {{ cluster }}.conf configuration file" + action: config_template + args: + src: "ceph.conf.j2" + dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" + owner: "root" + group: "root" + mode: "0644" + config_overrides: "{{ ceph_conf_overrides }}" + config_type: ini + notify: + - restart ceph mons + - restart ceph osds + - restart ceph mdss + - restart ceph rgws + + - name: set fsid fact when generate_fsid = true + set_fact: + fsid: "{{ cluster_uuid.stdout }}" + when: generate_fsid + when: containerized_deployment diff --git a/roles/ceph-common/templates/ceph.conf.j2 b/roles/ceph-config/templates/ceph.conf.j2 similarity index 89% rename from roles/ceph-common/templates/ceph.conf.j2 rename to roles/ceph-config/templates/ceph.conf.j2 index 16357a1da..1282580cd 100644 --- a/roles/ceph-common/templates/ceph.conf.j2 +++ b/roles/ceph-config/templates/ceph.conf.j2 @@ -19,7 +19,13 @@ max open files = {{ max_open_files }} osd crush chooseleaf type = 0 {% endif %} {# NOTE (leseb): the blank lines in-between are needed otherwise we won't get any line break #} -{% if groups[mon_group_name] is defined %} + +{% set nb_mon = groups.get(mon_group_name, []) | length | int %} +{% set nb_client = groups.get(client_group_name, []) | length | int %} +{% set nb_osd = groups.get(osd_group_name, []) | length | int %} + + +{% if nb_mon > 0 and inventory_hostname in groups.get(mon_group_name, []) %} mon initial members = {% for host in groups[mon_group_name] %} {% if hostvars[host]['ansible_fqdn'] is defined and mon_use_fqdn -%} {{ hostvars[host]['ansible_fqdn'] }} @@ -31,7 +37,8 @@ mon initial members = {% for host in groups[mon_group_name] %} {% endif %} {% if not containerized_deployment and not containerized_deployment_with_kv -%} -mon host = {% for host in groups[mon_group_name] -%} +mon host = {% if nb_mon > 0 %} + {% for host in groups[mon_group_name] -%} {% if monitor_address_block | length > 0 %} {% if ip_version == 'ipv4' -%} {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }} @@ -53,11 +60,15 @@ mon host = {% for host in groups[mon_group_name] -%} {%- endif %} {% if not loop.last -%},{%- endif %} {%- endfor %} + {% elif nb_mon == 0 and inventory_hostname in groups.get(client_group_name, []) %} + {{ external_cluster_mon_ips }} + {% endif %} {%- endif %} {% if containerized_deployment %} fsid = {{ fsid }} -mon host = {% for host in groups[mon_group_name] -%} +mon host = {% if nb_mon > 0 %} +{% for host in groups[mon_group_name] -%} {% if monitor_address_block | length > 0 %} {% if ip_version == 'ipv4' -%} {{ hostvars[host]['ansible_all_' + ip_version + '_addresses'] | ipaddr(monitor_address_block) | first }} @@ -80,6 +91,9 @@ mon host = {% for host in groups[mon_group_name] -%} {%- endif %} {% if not loop.last -%},{%- endif %} {%- endfor %} + {% elif nb_mon == 0 and inventory_hostname in groups.get(client_group_name, []) %} + {{ external_cluster_mon_ips }} + {% endif %} {% endif %} {% if public_network is defined %} @@ -95,10 +109,13 @@ rgw override bucket index max shards = {{ rgw_override_bucket_index_max_shards } rgw bucket default quota max objects = {{ rgw_bucket_default_quota_max_objects }} {% endif %} +{% if inventory_hostname in groups.get(client_group_name, []) %} [client.libvirt] admin socket = {{ rbd_client_admin_socket_path }}/$cluster-$type.$id.$pid.$cctid.asok # must be writable by QEMU and allowed by SELinux or AppArmor log file = {{ rbd_client_log_file }} # must be writable by QEMU and allowed by SELinux or AppArmor +{% endif %} +{% if inventory_hostname in groups.get(osd_group_name, []) %} [osd] osd mkfs type = {{ osd_mkfs_type }} osd mkfs options xfs = {{ osd_mkfs_options_xfs }} @@ -110,6 +127,7 @@ filestore xattr use omap = {{ filestore_xattr_use_omap }} filestore xattr use omap = true {# else, default is false #} {% endif %} +{% endif %} {% if groups[mds_group_name] is defined %} {% if mds_group_name in group_names %} @@ -172,3 +190,4 @@ keyring = /var/lib/ceph/restapi/ceph-restapi/keyring log file = /var/log/ceph/ceph-restapi.log {% endif %} {% endif %} + diff --git a/roles/ceph-defaults/tasks/check_socket.yml b/roles/ceph-defaults/tasks/check_socket.yml index 11f04f6d3..93f0db278 100644 --- a/roles/ceph-defaults/tasks/check_socket.yml +++ b/roles/ceph-defaults/tasks/check_socket.yml @@ -18,4 +18,4 @@ with_items: "{{ devices }}" when: - containerized_deployment - - inventory_hostname in groups.get(osd_group_name) + - inventory_hostname in groups.get(osd_group_name, []) diff --git a/roles/ceph-defaults/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml index d650259ae..78e961bdf 100644 --- a/roles/ceph-defaults/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -17,7 +17,9 @@ always_run: yes register: ceph_current_fsid delegate_to: "{{ groups[mon_group_name][0] }}" - when: not rolling_update + when: + - not rolling_update + - groups.get(mon_group_name, []) | length > 0 # We want this check to be run only on the first node - name: check if {{ fetch_directory }} directory exists @@ -31,7 +33,8 @@ - set_fact: ceph_current_fsid: rc: 1 - when: rolling_update + when: + - rolling_update or groups.get(mon_group_name, []) | length == 0 - name: create a local fetch directory if it does not exist local_action: file path={{ fetch_directory }} state=directory diff --git a/roles/ceph-docker-common/tasks/create_configs.yml b/roles/ceph-docker-common/tasks/create_configs.yml index d85644222..ed97d539c 100644 --- a/roles/ceph-docker-common/tasks/create_configs.yml +++ b/roles/ceph-docker-common/tasks/create_configs.yml @@ -1,54 +1 @@ --- -# only create fetch directory when: -# we are not populating kv_store with default ceph.conf AND host is a mon -# OR -# we are not population kv_store with default ceph.conf AND there at least 1 nfs in nfs group AND host is the first nfs -- name: create a local fetch directory if it does not exist - local_action: file path={{ fetch_directory }} state=directory - changed_when: false - become: false - run_once: true - when: - - (cephx or generate_fsid) - - (not mon_containerized_default_ceph_conf_with_kv and - (inventory_hostname in groups.get(mon_group_name, []))) or - (not mon_containerized_default_ceph_conf_with_kv and - ((groups.get(nfs_group_name, []) | length > 0) - and (inventory_hostname == groups.get(nfs_group_name, [])[0]))) - -- name: generate cluster uuid - local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf - creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" - register: cluster_uuid - become: false - when: generate_fsid - -- name: read cluster uuid if it already exists - local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf - removes="{{ fetch_directory }}/ceph_cluster_uuid.conf" - changed_when: false - register: cluster_uuid - always_run: true - become: false - when: generate_fsid - -- name: "generate {{ cluster }}.conf configuration file" - action: config_template - args: - src: "{{ playbook_dir }}/roles/ceph-common/templates/ceph.conf.j2" - dest: "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" - owner: "root" - group: "root" - mode: "0644" - config_overrides: "{{ ceph_conf_overrides }}" - config_type: ini - notify: - - restart ceph mons - - restart ceph osds - - restart ceph mdss - - restart ceph rgws - -- name: set fsid fact when generate_fsid = true - set_fact: - fsid: "{{ cluster_uuid.stdout }}" - when: generate_fsid diff --git a/site-docker.yml.sample b/site-docker.yml.sample index 5740c399f..e039cbffb 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -21,6 +21,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-mon serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS @@ -29,6 +30,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-osd - hosts: mdss @@ -36,6 +38,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-mds - hosts: rgws @@ -43,6 +46,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-rgw - hosts: nfss @@ -50,6 +54,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-nfs - hosts: rbd_mirrors @@ -57,6 +62,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-rbd-mirror - hosts: restapis @@ -64,6 +70,7 @@ roles: - ceph-defaults - ceph-docker-common + - ceph-config - ceph-restapi - hosts: mgrs @@ -71,6 +78,7 @@ roles: - { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-docker-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } + - { role: ceph-config, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - hosts: clients @@ -78,4 +86,5 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-client diff --git a/site.yml.sample b/site.yml.sample index 34b923427..de833be90 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -44,6 +44,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-mon - hosts: agents @@ -52,6 +53,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-agent - hosts: osds @@ -60,6 +62,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-osd - hosts: mdss @@ -68,6 +71,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-mds - hosts: rgws @@ -76,6 +80,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-rgw - hosts: nfss @@ -84,6 +89,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-nfs - hosts: restapis @@ -92,6 +98,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-restapi - hosts: rbdmirrors @@ -100,6 +107,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-rbd-mirror - hosts: clients @@ -108,6 +116,7 @@ roles: - ceph-defaults - ceph-common + - ceph-config - ceph-client - hosts: mgrs @@ -116,4 +125,5 @@ roles: - { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } + - { role: ceph-config, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } -- 2.39.5