# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_cinder_pool:
# name: "volumes"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_nova_pool:
# name: "vms"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
# erasure_profile: ""
# expected_num_objects: ""
# application: "rbd"
+# size: ""
#openstack_pools:
# - "{{ openstack_glance_pool }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
pools:
- "{{ test }}"
- "{{ test2 }}"
- keys | length > 0
- inventory_hostname == groups.get('_filtered_clients') | first
-- name: list existing pool(s)
- command: >
- {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
- osd pool get {{ item.name }} size
- with_items: "{{ pools }}"
- register: created_pools
- failed_when: false
- delegate_to: "{{ delegated_node }}"
+- name: pool related tasks
when:
- condition_copy_admin_key
- inventory_hostname == groups.get('_filtered_clients', []) | first
+ block:
+ - name: list existing pool(s)
+ command: >
+ {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ osd pool get {{ item.name }} size
+ with_items: "{{ pools }}"
+ register: created_pools
+ failed_when: false
+ delegate_to: "{{ delegated_node }}"
-- name: create ceph pool(s)
- command: >
- {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
- osd pool create {{ item.0.name }}
- {{ item.0.pg_num }}
- {{ item.0.pgp_num }}
- {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
- {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
- {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
- {{ item.0.erasure_profile }}
- {%- endif %}
- {{ item.0.expected_num_objects | default('') }}
- with_together:
- - "{{ pools }}"
- - "{{ created_pools.results }}"
- changed_when: false
- delegate_to: "{{ delegated_node }}"
- when:
- - pools | length > 0
- - condition_copy_admin_key
- - inventory_hostname in groups.get('_filtered_clients') | first
- - item.1.rc != 0
+ - name: create ceph pool(s)
+ command: >
+ {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ osd pool create {{ item.0.name }}
+ {{ item.0.pg_num }}
+ {{ item.0.pgp_num }}
+ {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+ {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+ {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+ {{ item.0.erasure_profile }}
+ {%- endif %}
+ {{ item.0.expected_num_objects | default('') }}
+ with_together:
+ - "{{ pools }}"
+ - "{{ created_pools.results }}"
+ changed_when: false
+ delegate_to: "{{ delegated_node }}"
+ when:
+ - pools | length > 0
+ - item.1.rc != 0
+
+ - name: customize pool size
+ command: >
+ {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
+ osd pool set {{ item.name }} size {{ item.size | default('') }}
+ with_items: "{{ pools | unique }}"
+ delegate_to: "{{ delegate_node }}"
+ changed_when: false
+ when:
+ - pools | length > 0
+ - item.size | default ("") != ""
- name: get client cephx keys
copy:
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools:
- - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
- - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}" }
+ - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+ - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
## OSD options
#
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
+ size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
+ size: ""
openstack_nova_pool:
name: "vms"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
+ size: ""
openstack_cinder_backup_pool:
name: "backups"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
+ size: ""
openstack_gnocchi_pool:
name: "metrics"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
erasure_profile: ""
expected_num_objects: ""
application: "rbd"
+ size: ""
+openstack_cephfs_data_pool:
+ name: "manila_data"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ application: "rbd"
+ size: ""
+openstack_cephfs_metadata_pool:
+ name: "manila_metadata"
+ pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ rule_name: "replicated_rule"
+ type: 1
+ erasure_profile: ""
+ expected_num_objects: ""
+ application: "rbd"
+ size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_nova_pool }}"
- "{{ openstack_cinder_backup_pool }}"
- "{{ openstack_gnocchi_pool }}"
+ - "{{ openstack_cephfs_data_pool }}"
+ - "{{ openstack_cephfs_metadata_pool }}"
# The value for 'key' can be a pre-generated key,
# Whether or not to generate secure certificate to iSCSI gateway nodes
generate_crt: False
+rbd_pool_size: ""
##################
# RBD-TARGET-API #
register: rbd_pool_exists
delegate_to: "{{ groups[mon_group_name][0] }}"
-- name: get default value for osd_pool_default_pg_num
- command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
- changed_when: false
- register: osd_pool_default_pg_num
- delegate_to: "{{ groups[mon_group_name][0] }}"
+- name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
+ block:
+ - name: get default value for osd_pool_default_pg_num
+ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
+ changed_when: false
+ register: osd_pool_default_pg_num
+ delegate_to: "{{ groups[mon_group_name][0] }}"
-- name: create a rbd pool if it doesn't exist
- command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
- changed_when: false
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
+ - name: create a rbd pool if it doesn't exist
+ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+
+ - name: customize pool size
+ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool set rbd size {{ rbd_pool_size | default('') }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when:
+ - rbd_pool_size | default ("") != ""
---
-- name: create filesystem pools
- command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
- changed_when: false
- delegate_to: "{{ groups[mon_group_name][0] }}"
- with_items:
- - "{{ cephfs_pools }}"
+- name: filesystem pools related tasks
+ block:
+ - name: create filesystem pools
+ command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ with_items:
+ - "{{ cephfs_pools }}"
+
+ - name: customize pool size
+ command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool set {{ item.name }} size {{ item.size | default('') }}"
+ with_items: "{{ cephfs_pools | unique }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: item.size | default ("") != ""
- name: check if ceph filesystem already exists
command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} fs get {{ cephfs }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
until: wait_for_all_osds_up.rc == 0
-- name: list existing pool(s)
- command: >
- {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
- osd pool get {{ item.name }} size
- with_items: "{{ openstack_pools | unique }}"
- register: created_pools
- delegate_to: "{{ groups[mon_group_name][0] }}"
- failed_when: false
+- name: pool related tasks
+ block:
+ - name: list existing pool(s)
+ command: >
+ {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+ osd pool get {{ item.name }} size
+ with_items: "{{ openstack_pools | unique }}"
+ register: created_pools
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ failed_when: false
-- name: create openstack pool(s)
- command: >
- {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
- osd pool create {{ item.0.name }}
- {{ item.0.pg_num }}
- {{ item.0.pgp_num | default(item.0.pg_num) }}
- {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
- {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
- {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
- {{ item.0.erasure_profile }}
- {%- endif %}
- {{ item.0.expected_num_objects | default('') }}
- with_together:
- - "{{ openstack_pools | unique }}"
- - "{{ created_pools.results }}"
- changed_when: false
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - item.1.get('rc', 0) != 0
+ - name: create openstack pool(s)
+ command: >
+ {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+ osd pool create {{ item.0.name }}
+ {{ item.0.pg_num }}
+ {{ item.0.pgp_num | default(item.0.pg_num) }}
+ {{ 'replicated_rule' if item.0.rule_name | default('replicated_rule') == '' else item.0.rule_name | default('replicated_rule') }}
+ {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
+ {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile != '' %}
+ {{ item.0.erasure_profile }}
+ {%- endif %}
+ {{ item.0.expected_num_objects | default('') }}
+ with_together:
+ - "{{ openstack_pools | unique }}"
+ - "{{ created_pools.results }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - item.1.get('rc', 0) != 0
-- name: assign application to pool(s)
- command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
- with_items: "{{ openstack_pools | unique }}"
- changed_when: false
- delegate_to: "{{ groups[mon_group_name][0] }}"
- when:
- - item.application is defined
+ - name: customize pool size
+ command: >
+ {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
+ osd pool set {{ item.name }} size {{ item.size | default('') }}
+ with_items: "{{ openstack_pools | unique }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ when: item.size | default ("") != ""
+
+ - name: assign application to pool(s)
+ command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool application enable {{ item.name }} {{ item.application }}"
+ with_items: "{{ openstack_pools | unique }}"
+ changed_when: false
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ when:
+ - item.application is defined
- name: create openstack cephx key(s)
ceph_key:
when:
- cephx
- openstack_config
- - item.0 != groups[mon_group_name]
\ No newline at end of file
+ - item.0 != groups[mon_group_name]
+
#rgw_create_pools:
# defaults.rgw.buckets.data:
# pg_num: 16
+# size: ""
# defaults.rgw.buckets.index:
# pg_num: 32
+# size: ""
# foo:
# pg_num: 4
+# size: ""
##########
include_tasks: docker/main.yml
when: containerized_deployment
-- name: create rgw pools if rgw_create_pools is defined
- command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
- changed_when: false
- with_dict: "{{ rgw_create_pools }}"
- delegate_to: "{{ groups[mon_group_name][0] }}"
- run_once: true
+- name: rgw pool realted tasks
when:
- rgw_create_pools is defined
+ block:
+ - name: create rgw pools if rgw_create_pools is defined
+ command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+ changed_when: false
+ with_dict: "{{ rgw_create_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ run_once: true
+
+ - name: customize pool size
+ command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set {{ item.key }} size {{ item.size | default('') }}"
+ with_dict: "{{ rgw_create_pools }}"
+ delegate_to: "{{ groups[mon_group_name][0] }}"
+ changed_when: false
+ run_once: true
+ when: item.size | default ("") != ""
{
"ceph_conf_overrides": {
"global": {
- "osd_pool_default_pg_num": 12,
- "osd_pool_default_size": 1
+ "osd_pool_default_pg_num": 12
}
},
+ "cephfs_pools": [
+ {
+ "name": "cephfs_metadata",
+ "pgs": 8,
+ "size": 2
+ },
+ {
+ "name": "cephfs_data",
+ "pgs": 8,
+ "size": 2
+ }
+ ],
"ceph_mon_docker_memory_limit": "2g"
}
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+ - "{{ openstack_cinder_pool }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
test2:
name: "test2"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
pools:
- "{{ test }}"
- "{{ test2 }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
type: 1
erasure_profile: ""
expected_num_objects: ""
+ size: ""
openstack_pools:
- "{{ openstack_glance_pool }}"
- - "{{ openstack_cinder_pool }}"
\ No newline at end of file
+ - "{{ openstack_cinder_pool }}"