##########
# CEPHFS #
##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "cephfs"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"
# OPENSTACK #
#############
#openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "rbd"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
-
+# pg_autoscale_mode: False
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
#copy_admin_key: false
#user_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# test:
+# name: "test"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# application: "rbd"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
#test:
# name: "test"
# pg_num: "{{ osd_pool_default_pg_num }}"
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#test2:
# name: "test2"
# pg_num: "{{ osd_pool_default_pg_num }}"
# expected_num_objects: ""
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#pools:
# - "{{ test }}"
# - "{{ test2 }}"
##########
# CEPHFS #
##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "cephfs"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
#cephfs: cephfs # name of the ceph filesystem
#cephfs_data_pool:
# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#cephfs_metadata_pool:
# name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "cephfs"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#cephfs_pools:
# - "{{ cephfs_data_pool }}"
# - "{{ cephfs_metadata_pool }}"
# OPENSTACK #
#############
#openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "rbd"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
#openstack_glance_pool:
# name: "images"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cephfs_data_pool:
# name: "manila_data"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
# pg_num: "{{ osd_pool_default_pg_num }}"
# application: "rbd"
# size: "{{ osd_pool_default_size }}"
# min_size: "{{ osd_pool_default_min_size }}"
-
+# pg_autoscale_mode: False
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# - "{{ openstack_cinder_pool }}"
copy_admin_key: false
user_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# test:
+# name: "test"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# application: "rbd"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
expected_num_objects: ""
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
pools:
- "{{ test }}"
- "{{ test2 }}"
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
- {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
- {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
+ {{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
+ {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
+ {{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
- {{ item.0.rule_name | default('erasure-code') }}
{%- endif %}
- {{ item.0.expected_num_objects | default(0) }}
with_together:
- "{{ pools }}"
- "{{ created_pools.results }}"
- pools | length > 0
- item.1.rc != 0
+ - name: set the target ratio on pool(s)
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
+ with_items: "{{ pools | unique }}"
+ delegate_to: "{{ delegated_node }}"
+ when: item.pg_autoscale_mode | default(False) | bool
+
+ - name: set pg_autoscale_mode value on pool(s)
+ command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
+ delegate_to: "{{ delegated_node }}"
+ with_items: "{{ pools | unique }}"
+
- name: customize pool size
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
##########
# CEPHFS #
##########
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# cephfs_data_pool:
+# name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "cephfs"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
cephfs: cephfs # name of the ceph filesystem
cephfs_data_pool:
name: "{{ cephfs_data if cephfs_data is defined else 'cephfs_data' }}"
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
cephfs_metadata_pool:
name: "{{ cephfs_metadata if cephfs_metadata is defined else 'cephfs_metadata' }}"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "cephfs"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
cephfs_pools:
- "{{ cephfs_data_pool }}"
- "{{ cephfs_metadata_pool }}"
# OPENSTACK #
#############
openstack_config: false
+# When pg_autoscale_mode is set to True, you must add the target_size_ratio key with a correct value
+# `pg_num` and `pgp_num` keys will be ignored, even if specified.
+# eg:
+# openstack_glance_pool:
+# name: "images"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
+# rule_name: "replicated_rule"
+# type: 1
+# erasure_profile: ""
+# expected_num_objects: ""
+# application: "rbd"
+# size: "{{ osd_pool_default_size }}"
+# min_size: "{{ osd_pool_default_min_size }}"
+# pg_autoscale_mode: False
+# target_size_ratio: 0.2
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_nova_pool:
name: "vms"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_cephfs_data_pool:
name: "manila_data"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
+ pg_autoscale_mode: False
openstack_cephfs_metadata_pool:
name: "manila_metadata"
pg_num: "{{ osd_pool_default_pg_num }}"
application: "rbd"
size: "{{ osd_pool_default_size }}"
min_size: "{{ osd_pool_default_min_size }}"
-
+ pg_autoscale_mode: False
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
command: >
{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.name }}
- {{ item.pg_num | default(osd_pool_default_pg_num) }}
- {{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) }}
+ {{ item.pg_num | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else 16 }}
+ {{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.type | default(1) | int == 1 or item.type | default('replicated') == 'replicated' %}
replicated
{{ item.rule_name | default(osd_pool_default_crush_rule) }}
+ {{ item.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.erasure_profile }}
- {{ item.rule_name | default('erasure-code') }}
{%- endif %}
- {{ item.expected_num_objects | default(0) }}
changed_when: false
with_items:
- "{{ cephfs_pools }}"
+ - name: set the target ratio on pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
+ with_items: "{{ cephfs_pools | unique }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: item.pg_autoscale_mode | default(False) | bool
+
+ - name: set pg_autoscale_mode value on pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ cephfs_pools | unique }}"
+
- name: customize pool size
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default(osd_pool_default_size) }}"
with_items: "{{ cephfs_pools | unique }}"
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
- {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
- {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
+ {{ item.0.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}
+ {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else '' }}
{%- if item.0.type | default(1) | int == 1 or item.0.type | default('replicated') == 'replicated' %}
replicated
{{ item.0.rule_name | default(osd_pool_default_crush_rule) }}
+ {{ item.0.expected_num_objects | default(0) }}
{%- else %}
erasure
{{ item.0.erasure_profile }}
- {{ item.0.rule_name | default('erasure-code') }}
{%- endif %}
- {{ item.0.expected_num_objects | default(0) }}
with_together:
- "{{ openstack_pools | unique }}"
- "{{ created_pools.results }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
when: item.1.get('rc', 0) != 0
+ - name: set the target ratio on pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} target_size_ratio {{ item.target_size_ratio }}"
+ with_items: "{{ openstack_pools | unique }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when: item.pg_autoscale_mode | default(False) | bool
+
+ - name: set pg_autoscale_mode value on pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} pg_autoscale_mode {{ item.pg_autoscale_mode | default(False) | ternary('on', 'warn') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items: "{{ openstack_pools | unique }}"
+
- name: customize pool size
command: >
{{ hostvars[groups[mon_group_name][0]]['container_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
--- /dev/null
+---
+- name: fail if target_size_ratio is not set when pg_autoscale_mode is True
+ fail:
+ msg: "You must set a target_size_ratio value on following pool: {{ item.name }}."
+ with_items:
+ - "{{ openstack_pools | default([]) }}"
+ - "{{ cephfs_pools | default([]) }}"
+ - "{{ pools | default([]) }}"
+ when:
+ - item.pg_autoscale_mode | default(False) | bool
+ - item.target_size_ratio is undefined