From: Guillaume Abrioux Date: Tue, 22 May 2018 14:41:40 +0000 (+0200) Subject: osds: move openstack pools creation in ceph-osd X-Git-Tag: v3.2.0beta1~82 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=564a662baf10b9085a6da8c9152400914e310d15;p=ceph-ansible.git osds: move openstack pools creation in ceph-osd When deploying a large number of OSD nodes it can be an issue because the protection check [1] won't pass since it tries to create pools before all OSDs are active. The idea here is to move openstack pools creation at the end of `ceph-osd` role. [1] https://github.com/ceph/ceph/blob/e59258943bcfe3e52d40a59ff30df55e1e6a3865/src/mon/OSDMonitor.cc#L5673 Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1578086 Signed-off-by: Guillaume Abrioux --- diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 483cd7be4..b142bf4a0 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -532,3 +532,69 @@ dummy: #docker_pull_retry: 3 #docker_pull_timeout: "300s" + + +############# +# OPENSTACK # +############# +#openstack_config: false +#openstack_glance_pool: +# name: "images" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_cinder_pool: +# name: "volumes" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_nova_pool: +# name: "vms" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_cinder_backup_pool: +# name: "backups" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_gnocchi_pool: +# name: "metrics" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" + +#openstack_pools: +# - "{{ openstack_glance_pool }}" +# - "{{ openstack_cinder_pool }}" +# - "{{ openstack_nova_pool }}" +# - "{{ openstack_cinder_backup_pool }}" +# - "{{ openstack_gnocchi_pool }}" + + +# The value for 'key' can be a pre-generated key, +# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" +# By default, keys will be auto-generated. +# +#openstack_keys: +# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } +# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } +# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } +# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } +# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } + diff --git a/group_vars/mons.yml.sample b/group_vars/mons.yml.sample index 50a004364..d65547739 100644 --- a/group_vars/mons.yml.sample +++ b/group_vars/mons.yml.sample @@ -69,72 +69,6 @@ dummy: # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) #create_crush_tree: false - -############# -# OPENSTACK # -############# -#openstack_config: false -#openstack_glance_pool: -# name: "images" -# pg_num: "{{ osd_pool_default_pg_num }}" -# pgp_num: "{{ osd_pool_default_pg_num }}" -# rule_name: "replicated_rule" -# type: 1 -# erasure_profile: "" -# expected_num_objects: "" -#openstack_cinder_pool: -# name: "volumes" -# pg_num: "{{ osd_pool_default_pg_num }}" -# pgp_num: "{{ osd_pool_default_pg_num }}" -# rule_name: "replicated_rule" -# type: 1 -# erasure_profile: "" -# expected_num_objects: "" -#openstack_nova_pool: -# name: "vms" -# pg_num: "{{ osd_pool_default_pg_num }}" -# pgp_num: "{{ osd_pool_default_pg_num }}" -# rule_name: "replicated_rule" -# type: 1 -# erasure_profile: "" -# expected_num_objects: "" -#openstack_cinder_backup_pool: -# name: "backups" -# pg_num: "{{ osd_pool_default_pg_num }}" -# pgp_num: "{{ osd_pool_default_pg_num }}" -# rule_name: "replicated_rule" -# type: 1 -# erasure_profile: "" -# expected_num_objects: "" -#openstack_gnocchi_pool: -# name: "metrics" -# pg_num: "{{ osd_pool_default_pg_num }}" -# pgp_num: "{{ osd_pool_default_pg_num }}" -# rule_name: "replicated_rule" -# type: 1 -# erasure_profile: "" -# expected_num_objects: "" - -#openstack_pools: -# - "{{ openstack_glance_pool }}" -# - "{{ openstack_cinder_pool }}" -# - "{{ openstack_nova_pool }}" -# - "{{ openstack_cinder_backup_pool }}" -# - "{{ openstack_gnocchi_pool }}" - - -# The value for 'key' can be a pre-generated key, -# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" -# By default, keys will be auto-generated. -# -#openstack_keys: -# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } -# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } -# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } -# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } -# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - ########## # DOCKER # ########## diff --git a/group_vars/rhcs.yml.sample b/group_vars/rhcs.yml.sample index 73a4f5bde..b142bf4a0 100644 --- a/group_vars/rhcs.yml.sample +++ b/group_vars/rhcs.yml.sample @@ -29,7 +29,7 @@ dummy: # mimic: 13 # Directory to fetch cluster fsid, keys etc... -fetch_directory: ~/ceph-ansible-keys +#fetch_directory: fetch/ # The 'cluster' variable determines the name of the cluster. # Changing the default value to something else means that you will @@ -135,14 +135,14 @@ fetch_directory: ~/ceph-ansible-keys # - 'distro' means that no separate repo file will be added # you will get whatever version of Ceph is included in your Linux distro. # 'local' means that the ceph binaries will be copied over from the local machine -ceph_origin: repository +#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 #valid_ceph_origins: # - repository # - distro # - local -ceph_repository: rhcs +#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1 #valid_ceph_repository: # - community # - rhcs @@ -532,3 +532,69 @@ ceph_repository: rhcs #docker_pull_retry: 3 #docker_pull_timeout: "300s" + + +############# +# OPENSTACK # +############# +#openstack_config: false +#openstack_glance_pool: +# name: "images" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_cinder_pool: +# name: "volumes" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_nova_pool: +# name: "vms" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_cinder_backup_pool: +# name: "backups" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" +#openstack_gnocchi_pool: +# name: "metrics" +# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" +# rule_name: "replicated_rule" +# type: 1 +# erasure_profile: "" +# expected_num_objects: "" + +#openstack_pools: +# - "{{ openstack_glance_pool }}" +# - "{{ openstack_cinder_pool }}" +# - "{{ openstack_nova_pool }}" +# - "{{ openstack_cinder_backup_pool }}" +# - "{{ openstack_gnocchi_pool }}" + + +# The value for 'key' can be a pre-generated key, +# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" +# By default, keys will be auto-generated. +# +#openstack_keys: +# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } +# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } +# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } +# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } +# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } + diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml index 8d7c65dc9..095f57808 100644 --- a/roles/ceph-defaults/defaults/main.yml +++ b/roles/ceph-defaults/defaults/main.yml @@ -523,3 +523,68 @@ rolling_update: false ##################### docker_pull_retry: 3 docker_pull_timeout: "300s" + + +############# +# OPENSTACK # +############# +openstack_config: false +openstack_glance_pool: + name: "images" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_cinder_pool: + name: "volumes" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_nova_pool: + name: "vms" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_cinder_backup_pool: + name: "backups" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_gnocchi_pool: + name: "metrics" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "replicated_rule" + type: 1 + erasure_profile: "" + expected_num_objects: "" + +openstack_pools: + - "{{ openstack_glance_pool }}" + - "{{ openstack_cinder_pool }}" + - "{{ openstack_nova_pool }}" + - "{{ openstack_cinder_backup_pool }}" + - "{{ openstack_gnocchi_pool }}" + + +# The value for 'key' can be a pre-generated key, +# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" +# By default, keys will be auto-generated. +# +openstack_keys: + - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } + - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } + - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } + - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } + - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml index 2de8bb1cf..09183253f 100644 --- a/roles/ceph-mon/defaults/main.yml +++ b/roles/ceph-mon/defaults/main.yml @@ -61,72 +61,6 @@ crush_rules: # Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host) create_crush_tree: false - -############# -# OPENSTACK # -############# -openstack_config: false -openstack_glance_pool: - name: "images" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "replicated_rule" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_cinder_pool: - name: "volumes" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "replicated_rule" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_nova_pool: - name: "vms" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "replicated_rule" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_cinder_backup_pool: - name: "backups" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "replicated_rule" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_gnocchi_pool: - name: "metrics" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "replicated_rule" - type: 1 - erasure_profile: "" - expected_num_objects: "" - -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" - - "{{ openstack_nova_pool }}" - - "{{ openstack_cinder_backup_pool }}" - - "{{ openstack_gnocchi_pool }}" - - -# The value for 'key' can be a pre-generated key, -# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ==" -# By default, keys will be auto-generated. -# -openstack_keys: - - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } - - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" } - - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", } - - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" } - - ########## # DOCKER # ########## diff --git a/roles/ceph-mon/tasks/main.yml b/roles/ceph-mon/tasks/main.yml index 94cba40b6..ad6b825b4 100644 --- a/roles/ceph-mon/tasks/main.yml +++ b/roles/ceph-mon/tasks/main.yml @@ -50,13 +50,6 @@ when: - openstack_keys_tmp is defined -# Create the pools listed in openstack_pools -- name: include openstack_config.yml - include: openstack_config.yml - when: - - openstack_config - - inventory_hostname == groups[mon_group_name] | last - - name: include create_mds_filesystems.yml include: create_mds_filesystems.yml when: diff --git a/roles/ceph-mon/tasks/openstack_config.yml b/roles/ceph-mon/tasks/openstack_config.yml deleted file mode 100644 index f0660a894..000000000 --- a/roles/ceph-mon/tasks/openstack_config.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: list existing pool(s) - command: > - {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} - osd pool get {{ item.name }} size - with_items: "{{ openstack_pools | unique }}" - register: created_pools - failed_when: false - -- name: create openstack pool(s) - command: > - {{ docker_exec_cmd }} ceph --cluster {{ cluster }} - osd pool create {{ item.0.name }} - {{ item.0.pg_num }} - {{ item.0.pgp_num | default(item.0.pg_num) }} - {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} - {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} - {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} - {{ item.0.erasure_profile }} - {%- endif %} - {{ item.0.expected_num_objects | default('') }} - with_together: - - "{{ openstack_pools | unique }}" - - "{{ created_pools.results }}" - changed_when: false - when: - - item.1.get('rc', 0) != 0 - -- name: assign application to pool(s) - command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" - with_items: "{{ openstack_pools | unique }}" - changed_when: false - when: - - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] - - item.application is defined - -- name: create openstack cephx key(s) - ceph_key: - state: present - name: "{{ item.name }}" - caps: "{{ item.caps }}" - secret: "{{ item.key | default('') }}" - containerized: "{{ docker_exec_cmd | default(False) }}" - cluster: "{{ cluster }}" - mode: "{{ item.mode|default(omit) }}" - with_items: "{{ openstack_keys }}" - when: cephx - -- name: fetch openstack cephx key(s) - fetch: - src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" - dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" - flat: yes - with_items: "{{ openstack_keys }}" - -- name: copy to other mons the openstack cephx key(s) - copy: - src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" - dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" - owner: "{{ ceph_uid }}" - group: "{{ ceph_uid }}" - mode: "{{ item.1.mode|default(omit) }}" - with_nested: - - "{{ groups[mon_group_name] }}" - - "{{ openstack_keys }}" - delegate_to: "{{ item.0 }}" - when: - - cephx - - openstack_config - - item.0 != groups[mon_group_name] | last \ No newline at end of file diff --git a/roles/ceph-osd/tasks/main.yml b/roles/ceph-osd/tasks/main.yml index 16220548a..4adade3b4 100644 --- a/roles/ceph-osd/tasks/main.yml +++ b/roles/ceph-osd/tasks/main.yml @@ -86,3 +86,10 @@ - containerized_deployment # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False + +# Create the pools listed in openstack_pools +- name: include openstack_config.yml + include: openstack_config.yml + when: + - openstack_config + - inventory_hostname == groups[osd_group_name] | last \ No newline at end of file diff --git a/roles/ceph-osd/tasks/openstack_config.yml b/roles/ceph-osd/tasks/openstack_config.yml new file mode 100644 index 000000000..238abd8f2 --- /dev/null +++ b/roles/ceph-osd/tasks/openstack_config.yml @@ -0,0 +1,75 @@ +--- +- name: list existing pool(s) + command: > + {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + osd pool get {{ item.name }} size + with_items: "{{ openstack_pools | unique }}" + register: created_pools + delegate_to: "{{ groups[mon_group_name][0] }}" + failed_when: false + +- name: create openstack pool(s) + command: > + {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} + osd pool create {{ item.0.name }} + {{ item.0.pg_num }} + {{ item.0.pgp_num | default(item.0.pg_num) }} + {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }} + {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }} + {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %} + {{ item.0.erasure_profile }} + {%- endif %} + {{ item.0.expected_num_objects | default('') }} + with_together: + - "{{ openstack_pools | unique }}" + - "{{ created_pools.results }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - item.1.get('rc', 0) != 0 + +- name: assign application to pool(s) + command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}" + with_items: "{{ openstack_pools | unique }}" + changed_when: false + delegate_to: "{{ groups[mon_group_name][0] }}" + when: + - ceph_release_num[ceph_release] >= ceph_release_num['luminous'] + - item.application is defined + +- name: create openstack cephx key(s) + ceph_key: + state: present + name: "{{ item.name }}" + caps: "{{ item.caps }}" + secret: "{{ item.key | default('') }}" + containerized: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }}" + cluster: "{{ cluster }}" + mode: "{{ item.mode|default(omit) }}" + with_items: "{{ openstack_keys }}" + delegate_to: "{{ groups[mon_group_name][0] }}" + when: cephx + +- name: fetch openstack cephx key(s) + fetch: + src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" + dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring" + flat: yes + delegate_to: "{{ groups[mon_group_name][0] }}" + with_items: "{{ openstack_keys }}" + +- name: copy to other mons the openstack cephx key(s) + copy: + src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" + dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring" + owner: "{{ ceph_uid }}" + group: "{{ ceph_uid }}" + mode: "{{ item.1.mode|default(omit) }}" + with_nested: + - "{{ groups[mon_group_name] }}" + - "{{ openstack_keys }}" + delegate_to: "{{ item.0 }}" + when: + - cephx + - openstack_config + - item.0 != groups[mon_group_name] | last \ No newline at end of file diff --git a/tests/functional/centos/7/cluster/group_vars/all b/tests/functional/centos/7/cluster/group_vars/all index 2ac4acf39..e33f448af 100644 --- a/tests/functional/centos/7/cluster/group_vars/all +++ b/tests/functional/centos/7/cluster/group_vars/all @@ -20,3 +20,23 @@ devices: dedicated_devices: - '/dev/sdc' - '/dev/sdc' +openstack_config: True +openstack_glance_pool: + name: "images" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "HDD" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_cinder_pool: + name: "volumes" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "HDD" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_pools: + - "{{ openstack_glance_pool }}" + - "{{ openstack_cinder_pool }}" \ No newline at end of file diff --git a/tests/functional/centos/7/cluster/group_vars/mons b/tests/functional/centos/7/cluster/group_vars/mons index 0919b52cc..fb7a9268b 100644 --- a/tests/functional/centos/7/cluster/group_vars/mons +++ b/tests/functional/centos/7/cluster/group_vars/mons @@ -11,25 +11,4 @@ crush_rule_hdd: type: host default: true crush_rules: - - "{{ crush_rule_hdd }}" - -openstack_config: True -openstack_glance_pool: - name: "images" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "HDD" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_cinder_pool: - name: "volumes" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "HDD" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}" + - "{{ crush_rule_hdd }}" \ No newline at end of file diff --git a/tests/functional/centos/7/docker/group_vars/all b/tests/functional/centos/7/docker/group_vars/all index 795a76e02..fb2804976 100644 --- a/tests/functional/centos/7/docker/group_vars/all +++ b/tests/functional/centos/7/docker/group_vars/all @@ -21,3 +21,23 @@ ceph_conf_overrides: devices: - '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001' - /dev/sdb +openstack_config: True +openstack_glance_pool: + name: "images" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "HDD" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_cinder_pool: + name: "volumes" + pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" + rule_name: "HDD" + type: 1 + erasure_profile: "" + expected_num_objects: "" +openstack_pools: + - "{{ openstack_glance_pool }}" + - "{{ openstack_cinder_pool }}" diff --git a/tests/functional/centos/7/docker/group_vars/mons b/tests/functional/centos/7/docker/group_vars/mons index 0919b52cc..d4d1d4334 100644 --- a/tests/functional/centos/7/docker/group_vars/mons +++ b/tests/functional/centos/7/docker/group_vars/mons @@ -12,24 +12,3 @@ crush_rule_hdd: default: true crush_rules: - "{{ crush_rule_hdd }}" - -openstack_config: True -openstack_glance_pool: - name: "images" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "HDD" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_cinder_pool: - name: "volumes" - pg_num: "{{ osd_pool_default_pg_num }}" - pgp_num: "{{ osd_pool_default_pg_num }}" - rule_name: "HDD" - type: 1 - erasure_profile: "" - expected_num_objects: "" -openstack_pools: - - "{{ openstack_glance_pool }}" - - "{{ openstack_cinder_pool }}"