#docker_pull_retry: 3
#docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+#openstack_glance_pool:
+# name: "images"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_cinder_pool:
+# name: "volumes"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_nova_pool:
+# name: "vms"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_cinder_backup_pool:
+# name: "backups"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_gnocchi_pool:
+# name: "metrics"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+
+#openstack_pools:
+# - "{{ openstack_glance_pool }}"
+# - "{{ openstack_cinder_pool }}"
+# - "{{ openstack_nova_pool }}"
+# - "{{ openstack_cinder_backup_pool }}"
+# - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
#create_crush_tree: false
-
-#############
-# OPENSTACK #
-#############
-#openstack_config: false
-#openstack_glance_pool:
-# name: "images"
-# pg_num: "{{ osd_pool_default_pg_num }}"
-# pgp_num: "{{ osd_pool_default_pg_num }}"
-# rule_name: "replicated_rule"
-# type: 1
-# erasure_profile: ""
-# expected_num_objects: ""
-#openstack_cinder_pool:
-# name: "volumes"
-# pg_num: "{{ osd_pool_default_pg_num }}"
-# pgp_num: "{{ osd_pool_default_pg_num }}"
-# rule_name: "replicated_rule"
-# type: 1
-# erasure_profile: ""
-# expected_num_objects: ""
-#openstack_nova_pool:
-# name: "vms"
-# pg_num: "{{ osd_pool_default_pg_num }}"
-# pgp_num: "{{ osd_pool_default_pg_num }}"
-# rule_name: "replicated_rule"
-# type: 1
-# erasure_profile: ""
-# expected_num_objects: ""
-#openstack_cinder_backup_pool:
-# name: "backups"
-# pg_num: "{{ osd_pool_default_pg_num }}"
-# pgp_num: "{{ osd_pool_default_pg_num }}"
-# rule_name: "replicated_rule"
-# type: 1
-# erasure_profile: ""
-# expected_num_objects: ""
-#openstack_gnocchi_pool:
-# name: "metrics"
-# pg_num: "{{ osd_pool_default_pg_num }}"
-# pgp_num: "{{ osd_pool_default_pg_num }}"
-# rule_name: "replicated_rule"
-# type: 1
-# erasure_profile: ""
-# expected_num_objects: ""
-
-#openstack_pools:
-# - "{{ openstack_glance_pool }}"
-# - "{{ openstack_cinder_pool }}"
-# - "{{ openstack_nova_pool }}"
-# - "{{ openstack_cinder_backup_pool }}"
-# - "{{ openstack_gnocchi_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-#openstack_keys:
-# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
##########
# DOCKER #
##########
# mimic: 13
# Directory to fetch cluster fsid, keys etc...
-fetch_directory: ~/ceph-ansible-keys
+#fetch_directory: fetch/
# The 'cluster' variable determines the name of the cluster.
# Changing the default value to something else means that you will
# - 'distro' means that no separate repo file will be added
# you will get whatever version of Ceph is included in your Linux distro.
# 'local' means that the ceph binaries will be copied over from the local machine
-ceph_origin: repository
+#ceph_origin: "{{ 'repository' if ceph_rhcs or ceph_stable or ceph_dev or ceph_stable_uca or ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
#valid_ceph_origins:
# - repository
# - distro
# - local
-ceph_repository: rhcs
+#ceph_repository: "{{ 'community' if ceph_stable else 'rhcs' if ceph_rhcs else 'dev' if ceph_dev else 'uca' if ceph_stable_uca else 'custom' if ceph_custom else 'dummy' }}" # backward compatibility with stable-2.2, will disappear in stable 3.1
#valid_ceph_repository:
# - community
# - rhcs
#docker_pull_retry: 3
#docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+#openstack_config: false
+#openstack_glance_pool:
+# name: "images"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_cinder_pool:
+# name: "volumes"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_nova_pool:
+# name: "vms"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_cinder_backup_pool:
+# name: "backups"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+#openstack_gnocchi_pool:
+# name: "metrics"
+# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+
+#openstack_pools:
+# - "{{ openstack_glance_pool }}"
+# - "{{ openstack_cinder_pool }}"
+# - "{{ openstack_nova_pool }}"
+# - "{{ openstack_cinder_backup_pool }}"
+# - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+#openstack_keys:
+# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+
#####################
docker_pull_retry: 3
docker_pull_timeout: "300s"
+
+
+#############
+# OPENSTACK #
+#############
+openstack_config: false
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_nova_pool:
+ name: "vms"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_cinder_backup_pool:
+ name: "backups"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_gnocchi_pool:
+ name: "metrics"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
+ - "{{ openstack_nova_pool }}"
+ - "{{ openstack_cinder_backup_pool }}"
+ - "{{ openstack_gnocchi_pool }}"
+
+
+# The value for 'key' can be a pre-generated key,
+# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
+# By default, keys will be auto-generated.
+#
+openstack_keys:
+ - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+ - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
+ - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
+ - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
+ - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
# Note that 'host' is mandatory and that you need to submit at least two bucket type (including the host)
create_crush_tree: false
-
-#############
-# OPENSTACK #
-#############
-openstack_config: false
-openstack_glance_pool:
- name: "images"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "replicated_rule"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_cinder_pool:
- name: "volumes"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "replicated_rule"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_nova_pool:
- name: "vms"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "replicated_rule"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_cinder_backup_pool:
- name: "backups"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "replicated_rule"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_gnocchi_pool:
- name: "metrics"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "replicated_rule"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
- - "{{ openstack_nova_pool }}"
- - "{{ openstack_cinder_backup_pool }}"
- - "{{ openstack_gnocchi_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-openstack_keys:
- - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool=volumes, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
- - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
##########
# DOCKER #
##########
when:
- openstack_keys_tmp is defined
-# Create the pools listed in openstack_pools
-- name: include openstack_config.yml
- include: openstack_config.yml
- when:
- - openstack_config
- - inventory_hostname == groups[mon_group_name] | last
-
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:
+++ /dev/null
----
-- name: list existing pool(s)
- command: >
- {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
- osd pool get {{ item.name }} size
- with_items: "{{ openstack_pools | unique }}"
- register: created_pools
- failed_when: false
-
-- name: create openstack pool(s)
- command: >
- {{ docker_exec_cmd }} ceph --cluster {{ cluster }}
- osd pool create {{ item.0.name }}
- {{ item.0.pg_num }}
- {{ item.0.pgp_num | default(item.0.pg_num) }}
- {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
- {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
- {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
- {{ item.0.erasure_profile }}
- {%- endif %}
- {{ item.0.expected_num_objects | default('') }}
- with_together:
- - "{{ openstack_pools | unique }}"
- - "{{ created_pools.results }}"
- changed_when: false
- when:
- - item.1.get('rc', 0) != 0
-
-- name: assign application to pool(s)
- command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
- with_items: "{{ openstack_pools | unique }}"
- changed_when: false
- when:
- - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
- - item.application is defined
-
-- name: create openstack cephx key(s)
- ceph_key:
- state: present
- name: "{{ item.name }}"
- caps: "{{ item.caps }}"
- secret: "{{ item.key | default('') }}"
- containerized: "{{ docker_exec_cmd | default(False) }}"
- cluster: "{{ cluster }}"
- mode: "{{ item.mode|default(omit) }}"
- with_items: "{{ openstack_keys }}"
- when: cephx
-
-- name: fetch openstack cephx key(s)
- fetch:
- src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
- dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
- flat: yes
- with_items: "{{ openstack_keys }}"
-
-- name: copy to other mons the openstack cephx key(s)
- copy:
- src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
- dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
- owner: "{{ ceph_uid }}"
- group: "{{ ceph_uid }}"
- mode: "{{ item.1.mode|default(omit) }}"
- with_nested:
- - "{{ groups[mon_group_name] }}"
- - "{{ openstack_keys }}"
- delegate_to: "{{ item.0 }}"
- when:
- - cephx
- - openstack_config
- - item.0 != groups[mon_group_name] | last
\ No newline at end of file
- containerized_deployment
# Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent)
static: False
+
+# Create the pools listed in openstack_pools
+- name: include openstack_config.yml
+ include: openstack_config.yml
+ when:
+ - openstack_config
+ - inventory_hostname == groups[osd_group_name] | last
\ No newline at end of file
--- /dev/null
+---
+- name: list existing pool(s)
+ command: >
+ {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+ osd pool get {{ item.name }} size
+ with_items: "{{ openstack_pools | unique }}"
+ register: created_pools
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ failed_when: false
+
+- name: create openstack pool(s)
+ command: >
+ {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+ osd pool create {{ item.0.name }}
+ {{ item.0.pg_num }}
+ {{ item.0.pgp_num | default(item.0.pg_num) }}
+ {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+ {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+ {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+ {{ item.0.erasure_profile }}
+ {%- endif %}
+ {{ item.0.expected_num_objects | default('') }}
+ with_together:
+ - "{{ openstack_pools | unique }}"
+ - "{{ created_pools.results }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - item.1.get('rc', 0) != 0
+
+- name: assign application to pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
+ with_items: "{{ openstack_pools | unique }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - ceph_release_num[ceph_release] >= ceph_release_num['luminous']
+ - item.application is defined
+
+- name: create openstack cephx key(s)
+ ceph_key:
+ state: present
+ name: "{{ item.name }}"
+ caps: "{{ item.caps }}"
+ secret: "{{ item.key | default('') }}"
+ containerized: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }}"
+ cluster: "{{ cluster }}"
+ mode: "{{ item.mode|default(omit) }}"
+ with_items: "{{ openstack_keys }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: cephx
+
+- name: fetch openstack cephx key(s)
+ fetch:
+ src: "/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+ dest: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.name }}.keyring"
+ flat: yes
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ openstack_keys }}"
+
+- name: copy to other mons the openstack cephx key(s)
+ copy:
+ src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
+ dest: "/etc/ceph/{{ cluster }}.{{ item.1.name }}.keyring"
+ owner: "{{ ceph_uid }}"
+ group: "{{ ceph_uid }}"
+ mode: "{{ item.1.mode|default(omit) }}"
+ with_nested:
+ - "{{ groups[mon_group_name] }}"
+ - "{{ openstack_keys }}"
+ delegate_to: "{{ item.0 }}"
+ when:
+ - cephx
+ - openstack_config
+ - item.0 != groups[mon_group_name] | last
\ No newline at end of file
dedicated_devices:
- '/dev/sdc'
- '/dev/sdc'
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
\ No newline at end of file
type: host
default: true
crush_rules:
- - "{{ crush_rule_hdd }}"
-
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_cinder_pool:
- name: "volumes"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
+ - "{{ crush_rule_hdd }}"
\ No newline at end of file
devices:
- '/dev/disk/by-id/ata-QEMU_HARDDISK_QM00001'
- /dev/sdb
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "HDD"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
-
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_cinder_pool:
- name: "volumes"
- pg_num: "{{ osd_pool_default_pg_num }}"
- pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
- type: 1
- erasure_profile: ""
- expected_num_objects: ""
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"