#docker_pull_timeout: "300s"
-#############
-# OPENSTACK #
-#############
-#openstack_config: false
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-# openstack_glance_pool:
-# name: "images"
-# rule_name: "my_replicated_rule"
-# application: "rbd"
-# pg_autoscale_mode: false
-# pg_num: 16
-# pgp_num: 16
-# target_size_ratio: 0.2
-#openstack_glance_pool:
-# name: "images"
-# application: "rbd"
-#openstack_cinder_pool:
-# name: "volumes"
-# application: "rbd"
-#openstack_nova_pool:
-# name: "vms"
-# application: "rbd"
-#openstack_cinder_backup_pool:
-# name: "backups"
-# application: "rbd"
-#openstack_gnocchi_pool:
-# name: "metrics"
-# application: "rbd"
-#openstack_cephfs_data_pool:
-# name: "manila_data"
-# application: "cephfs"
-#openstack_cephfs_metadata_pool:
-# name: "manila_metadata"
-# application: "cephfs"
-#openstack_pools:
-# - "{{ openstack_glance_pool }}"
-# - "{{ openstack_cinder_pool }}"
-# - "{{ openstack_nova_pool }}"
-# - "{{ openstack_cinder_backup_pool }}"
-# - "{{ openstack_gnocchi_pool }}"
-# - "{{ openstack_cephfs_data_pool }}"
-# - "{{ openstack_cephfs_metadata_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-#openstack_keys:
-# - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
-# - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-# - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
-# - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
#############
# DASHBOARD #
#############
docker_pull_timeout: "300s"
-#############
-# OPENSTACK #
-#############
-openstack_config: false
-# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
-# `pg_num` and `pgp_num` keys will be ignored, even if specified.
-# eg:
-# openstack_glance_pool:
-# name: "images"
-# rule_name: "my_replicated_rule"
-# application: "rbd"
-# pg_autoscale_mode: false
-# pg_num: 16
-# pgp_num: 16
-# target_size_ratio: 0.2
-openstack_glance_pool:
- name: "images"
- application: "rbd"
-openstack_cinder_pool:
- name: "volumes"
- application: "rbd"
-openstack_nova_pool:
- name: "vms"
- application: "rbd"
-openstack_cinder_backup_pool:
- name: "backups"
- application: "rbd"
-openstack_gnocchi_pool:
- name: "metrics"
- application: "rbd"
-openstack_cephfs_data_pool:
- name: "manila_data"
- application: "cephfs"
-openstack_cephfs_metadata_pool:
- name: "manila_metadata"
- application: "cephfs"
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
- - "{{ openstack_nova_pool }}"
- - "{{ openstack_cinder_backup_pool }}"
- - "{{ openstack_gnocchi_pool }}"
- - "{{ openstack_cephfs_data_pool }}"
- - "{{ openstack_cephfs_metadata_pool }}"
-
-
-# The value for 'key' can be a pre-generated key,
-# e.g key: "AQDC2UxZH4yeLhAAgTaZb+4wDUlYOsr1OfZSpQ=="
-# By default, keys will be auto-generated.
-#
-openstack_keys:
- - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
- - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
- - { name: client.gnocchi, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_gnocchi_pool.name }}"}, mode: "0600", }
- - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
-
-
#############
# DASHBOARD #
#############
ansible.builtin.include_tasks: crush_rules.yml
when: hostvars[groups[mon_group_name][0]]['crush_rule_config'] | default(crush_rule_config) | bool
tags: wait_all_osds_up
-
-# Create the pools listed in openstack_pools
-- name: Include openstack_config.yml
- ansible.builtin.include_tasks: openstack_config.yml
- when:
- - not add_osd | bool
- - not rolling_update | default(False) | bool
- - openstack_config | bool
- - inventory_hostname == groups[osd_group_name] | last
- tags: wait_all_osds_up
+++ /dev/null
----
-- name: Pool related tasks
- block:
- - name: Create openstack pool(s)
- ceph_pool:
- name: "{{ item.name }}"
- cluster: "{{ cluster }}"
- pg_num: "{{ item.pg_num | default(omit) }}"
- pgp_num: "{{ item.pgp_num | default(omit) }}"
- size: "{{ item.size | default(omit) }}"
- min_size: "{{ item.min_size | default(omit) }}"
- pool_type: "{{ item.type | default('replicated') }}"
- rule_name: "{{ item.rule_name | default(omit) }}"
- erasure_profile: "{{ item.erasure_profile | default(omit) }}"
- pg_autoscale_mode: "{{ item.pg_autoscale_mode | default(omit) }}"
- target_size_ratio: "{{ item.target_size_ratio | default(omit) }}"
- application: "{{ item.application | default(omit) }}"
- with_items: "{{ openstack_pools }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- environment:
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
-
-- name: Create openstack cephx key(s)
- when:
- - cephx | bool
- - openstack_config | bool
- block:
- - name: Generate keys
- ceph_key:
- name: "{{ item.name }}"
- caps: "{{ item.caps }}"
- secret: "{{ item.key | default('') }}"
- cluster: "{{ cluster }}"
- mode: "{{ item.mode | default(ceph_keyring_permissions) }}"
- environment:
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- with_items: "{{ openstack_keys }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- no_log: "{{ no_log_on_ceph_key_tasks }}"
-
- - name: Get keys from monitors
- ceph_key:
- name: "{{ item.name }}"
- cluster: "{{ cluster }}"
- output_format: plain
- state: info
- environment:
- CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
- CEPH_CONTAINER_BINARY: "{{ container_binary }}"
- register: _osp_keys
- with_items: "{{ openstack_keys }}"
- delegate_to: "{{ groups.get(mon_group_name)[0] }}"
- no_log: "{{ no_log_on_ceph_key_tasks }}"
-
- - name: Copy ceph key(s) if needed
- ansible.builtin.copy:
- dest: "/etc/ceph/{{ cluster }}.{{ item.0.item.name }}.keyring"
- content: "{{ item.0.stdout + '\n' }}"
- owner: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- group: "{{ ceph_uid if containerized_deployment | bool else 'ceph' }}"
- mode: "{{ item.0.item.mode | default(ceph_keyring_permissions) }}"
- with_nested:
- - "{{ _osp_keys.results }}"
- - "{{ groups[mon_group_name] }}"
- delegate_to: "{{ item.1 }}"
- no_log: "{{ no_log_on_ceph_key_tasks }}"
ansible.builtin.fail:
msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
with_items:
- - "{{ openstack_pools | default([]) }}"
- "{{ cephfs_pools | default([]) }}"
- "{{ pools | default([]) }}"
when:
cluster_network: "192.168.20.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
-openstack_config: True
dashboard_enabled: false
ceph_conf_overrides:
global:
ceph_repository: community
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
-openstack_config: True
dashboard_enabled: False
public_network: "192.168.17.0/24"
cluster_network: "192.168.18.0/24"
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- size: 1
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- size: 1
- application: rbd
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
- application: rbd
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- size: 1
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- size: 1
- application: rbd
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
- application: rbd
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
-openstack_glance_pool:
- name: "images"
- rule_name: "HDD"
- size: 1
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
-grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
\ No newline at end of file
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
cluster_network: "192.168.32.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
-openstack_config: True
dashboard_enabled: false
ceph_conf_overrides:
global:
ceph_repository: community
radosgw_interface: "{{ 'eth1' if ansible_facts['distribution'] == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
-openstack_config: True
dashboard_enabled: False
public_network: "192.168.31.0/24"
cluster_network: "192.168.32.0/24"
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- type: 3
- size: 1
- application: rbd
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- size: 1
- application: rbd
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
-ceph_docker_image_tag: latest-reef
\ No newline at end of file
+ceph_docker_image_tag: latest-reef
dashboard_enabled: False
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- type: 3
- size: 1
- application: rbd
- target_size_ratio: 0.2
-openstack_cinder_pool:
- name: "volumes"
- size: 1
- application: rbd
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: True
-openstack_glance_pool:
- name: "images"
- rule_name: "HDD"
- size: 1
-openstack_cinder_pool:
- name: "volumes"
- rule_name: "HDD"
- size: 1
-openstack_pools:
- - "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
dashboard_admin_password: $sX!cD$rYU6qR^B!
node_exporter_container_image: "quay.io/prometheus/node-exporter:v0.17.0"
prometheus_container_image: "quay.io/prometheus/prometheus:v2.7.2"
alertmanager_container_image: "quay.io/prometheus/alertmanager:v0.16.2"
-grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
\ No newline at end of file
+grafana_container_image: "quay.io/ceph/ceph-grafana:6.7.4"
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.io
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
ceph_docker_registry: quay.io
ceph_docker_image: ceph/daemon-base
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.io
ceph_conf_overrides:
global:
osd_pool_default_size: 3
-openstack_config: False
dashboard_enabled: False
copy_admin_key: True
\ No newline at end of file
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.io
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
-openstack_config: False
dashboard_enabled: False
copy_admin_key: True
ceph_docker_registry: quay.io
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: false
docker_pull_timeout: 600s
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
mon_max_pg_per_osd: 300
-openstack_config: false
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
mds_max_mds: 2