The value of doing this is fairly low compare to the added value.
So we remove these tasks, if rbd pool on Jewel doesn't have the right PG
value you can always increase it.
Signed-off-by: Sébastien Han <seb@redhat.com>
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
-# CEPH creates the rbd pool during the ceph cluster initialization in
-# releases prior to luminous. If the rbd_pool.yml playbook is called too
-# early, the rbd pool does not exist yet.
-- name: include rbd_pool.yml
- include: rbd_pool.yml
- when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
-
- name: include create_mds_filesystems.yml
include: create_mds_filesystems.yml
when:
+++ /dev/null
----
-- name: test if rbd exists
- shell: |
- "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd"
- changed_when: false
- failed_when: false
- run_once: true
- check_mode: true
- register: rbd_pool_exist
-
-- name: include rbd_pool_df.yml
- include: rbd_pool_df.yml
- when: rbd_pool_exist.rc == 0
-
-- name: include rbd_pool_pgs.yml
- include: rbd_pool_pgs.yml
- when:
- - rbd_pool_exist.rc == 0
- - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
-
-- name: include rbd_pool_size.yml
- include: rbd_pool_size.yml
- when:
- - rbd_pool_exist.rc == 0
- - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
+++ /dev/null
----
-- name: verify that rbd pool exist
- fail:
- msg: "rbd pool does not exist in rbd_pool_df"
- when: rbd_pool_exist.rc == 0
-
-- name: check rbd pool usage
- shell: |
- "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'"
- changed_when: false
- failed_when: false
- check_mode: true
- run_once: true
- register: rbd_pool_df
+++ /dev/null
----
-- name: verify that rbd pool exist
- fail:
- msg: "rbd pool does not exist in rbd_pool_pgs"
- when: rbd_pool_exist.rc == 0
-
-- name: check pg num for rbd pool
- shell: |
- "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'"
- changed_when: false
- failed_when: false
- check_mode: true
- run_once: true
- register: rbd_pool_pgs
-
-- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
- shell: |
- "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it"
- "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
- changed_when: false
- failed_when: false
- run_once: true
- when:
- - rbd_pool_df.stdout == "0"
- - rbd_pool_pgs.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
+++ /dev/null
----
-- name: verify that rbd pool exist
- fail:
- msg: "rbd pool does not exist in rbd_pool_size"
- when: rbd_pool_exist.rc == 0
-
-- name: check size for rbd pool
- shell: |
- "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'"
- changed_when: false
- failed_when: false
- check_mode: true
- run_once: true
- register: rbd_pool_size
-
-- name: change rbd pool size if osd_pool_default_size is not honoured
- command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}"
- changed_when: false
- failed_when: false
- run_once: true
- when:
- - rbd_pool_df.stdout == "0"
- - rbd_pool_size.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_size }}"