The rbd pool is the default pool that gets created during ceph cluster
initializaiton. If we act on the rbd related operations too early, the
rbd pool does not exist yet. Move the call to perform rbd operations
to a later stage after other pools have been created.
The rbd_pool.yml playbook has all the operations related to the rbd pool.
Replace the always_run (deprecated) directive with check_mode.
Most of the ceph related tasks only need to run once. The run_once directive
executes the task on the first host.
The ceph sub-command to delete a pool is delete (not rm).
The changes submitted here were tested with this ceph version.
ceph version 0.94.9-9.el7cp (
b83334e01379f267fb2f9ce729d74a0a8fa1e92c)
This upload includes these changes:
- Use the fail module (instead of assert).
- From luminous release, the rbd pool is no longer created by default.
Delete the code to create the rbd pool for luminous release
- Conform the .yml files to use the suggested syntax.
The commands are executed on the mcp nodes and I think shell ansible module
is the right one to use. The command module is used to execute commands on
remote nodes. I can make the change to use command module if that is
prefrerred.
when:
- crush_rule_config
-- name: test if rbd exists
- shell: |
- ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
- changed_when: false
- failed_when: false
- register: rbd_pool_exist
-
-- name: include rbd_pool.yml
- include: rbd_pool.yml
- when: rbd_pool_exist.rc == 0
-
-- name: include rbd_pool_pgs.yml
- include: rbd_pool_pgs.yml
- when:
- - rbd_pool_exist.rc == 0
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
-
-- name: include rbd_pool_size.yml
- include: rbd_pool_size.yml
- when:
- - rbd_pool_exist.rc == 0
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_size is defined
-
-- name: create rbd pool on luminous
- shell: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
- changed_when: false
- failed_when: false
- when:
- - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
- - global_in_ceph_conf_overrides
- - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
- - rbd_pool_exist.rc != 0
-
+# Create the pools listed in openstack_pools
- name: include openstack_config.yml
include: openstack_config.yml
when:
- openstack_config
- inventory_hostname == groups[mon_group_name] | last
+# CEPH creates the rbd pool during the ceph cluster initialization in
+# releases prior to luminous. If the rbd_pool.yml playbook is called too
+# early, the rbd pool does not exist yet.
+- name: include rbd_pool.yml
+ include: rbd_pool.yml
+ when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
- name: find ceph keys
shell: ls -1 /etc/ceph/*.keyring
changed_when: false
---
-- name: check rbd pool usage
+- name: test if rbd exists
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
+ ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
changed_when: false
failed_when: false
- always_run: true
- register: rbd_pool_df
+ run_once: true
+ check_mode: true
+ register: rbd_pool_exist
-- name: check pg num for rbd pool
- shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
- changed_when: false
- failed_when: false
- always_run: true
- register: rbd_pool_pgs
+- name: include rbd_pool_df.yml
+ include: rbd_pool_df.yml
+ when: rbd_pool_exist.rc == 0
+
+- name: include rbd_pool_pgs.yml
+ include: rbd_pool_pgs.yml
+ when:
+ - rbd_pool_exist.rc == 0
+ - global_in_ceph_conf_overrides
+ - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
+
+- name: include rbd_pool_size.yml
+ include: rbd_pool_size.yml
+ when:
+ - rbd_pool_exist.rc == 0
+ - global_in_ceph_conf_overrides
+ - ceph_conf_overrides.global.osd_pool_default_size is defined
+# In luminous release, ceph does not create the rbd pool by default.
--- /dev/null
+---
+- name: verify that rbd pool exist
+ fail:
+ msg: "rbd pool does not exist in rbd_pool_df"
+ when: rbd_pool_exist.rc == 0
+
+- name: check rbd pool usage
+ shell: |
+ ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
+ changed_when: false
+ failed_when: false
+ check_mode: true
+ run_once: true
+ register: rbd_pool_df
---
+- name: verify that rbd pool exist
+ fail:
+ msg: "rbd pool does not exist in rbd_pool_pgs"
+ when: rbd_pool_exist.rc == 0
+
+- name: check pg num for rbd pool
+ shell: |
+ ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
+ changed_when: false
+ failed_when: false
+ check_mode: true
+ run_once: true
+ register: rbd_pool_pgs
+
- name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
shell: |
- ceph --connect-timeout 5 --cluster {{ cluster }} osd pool rm rbd rbd --yes-i-really-really-mean-it
+ ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
changed_when: false
failed_when: false
+ run_once: true
when:
- rbd_pool_df.stdout == "0"
- rbd_pool_pgs.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
---
+- name: verify that rbd pool exist
+ fail:
+ msg: "rbd pool does not exist in rbd_pool_size"
+ when: rbd_pool_exist.rc == 0
+
- name: check size for rbd pool
shell: |
ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'
changed_when: false
failed_when: false
- always_run: true
+ check_mode: true
+ run_once: true
register: rbd_pool_size
- name: change rbd pool size if osd_pool_default_size is not honoured
command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}
changed_when: false
failed_when: false
+ run_once: true
when:
- rbd_pool_df.stdout == "0"
- rbd_pool_size.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_size }}"