]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Only perform actions on the rbd pool after it has been created
authorAl Lau <alau2@cisco.com>
Fri, 29 Sep 2017 17:19:05 +0000 (10:19 -0700)
committerGuillaume Abrioux <gabrioux@redhat.com>
Wed, 4 Oct 2017 13:40:10 +0000 (15:40 +0200)
The rbd pool is the default pool that gets created during ceph cluster
initializaiton.  If we act on the rbd related operations too early, the
rbd pool does not exist yet.  Move the call to perform rbd operations
to a later stage after other pools have been created.

The rbd_pool.yml playbook has all the operations related to the rbd pool.

Replace the always_run (deprecated) directive with check_mode.

Most of the ceph related tasks only need to run once.  The run_once directive
executes the task on the first host.

The ceph sub-command to delete a pool is delete (not rm).

The changes submitted here were tested with this ceph version.
ceph version 0.94.9-9.el7cp (b83334e01379f267fb2f9ce729d74a0a8fa1e92c)

This upload includes these changes:
  - Use the fail module (instead of assert).
  - From luminous release, the rbd pool is no longer created by default.
    Delete the code to create the rbd pool for luminous release
  - Conform the .yml files to use the suggested syntax.

The commands are executed on the mcp nodes and I think shell ansible module
is the right one to use.  The command module is used to execute commands on
remote nodes.  I can make the change to use command module if that is
prefrerred.

roles/ceph-mon/tasks/ceph_keys.yml
roles/ceph-mon/tasks/rbd_pool.yml
roles/ceph-mon/tasks/rbd_pool_df.yml [new file with mode: 0644]
roles/ceph-mon/tasks/rbd_pool_luminous.yml [new file with mode: 0644]
roles/ceph-mon/tasks/rbd_pool_pgs.yml
roles/ceph-mon/tasks/rbd_pool_size.yml

index 263bf9f2aa40f7ee9c1d5d2cd7220b2c0df16bf1..49eedaa49220bbc7edb34d55c5f49db2c8b87928 100644 (file)
   when:
     - crush_rule_config
 
-- name: test if rbd exists
-  shell: |
-    ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
-  changed_when: false
-  failed_when: false
-  register: rbd_pool_exist
-
-- name: include rbd_pool.yml
-  include: rbd_pool.yml
-  when: rbd_pool_exist.rc == 0
-
-- name: include rbd_pool_pgs.yml
-  include: rbd_pool_pgs.yml
-  when:
-    - rbd_pool_exist.rc == 0
-    - global_in_ceph_conf_overrides
-    - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
-
-- name: include rbd_pool_size.yml
-  include: rbd_pool_size.yml
-  when:
-    - rbd_pool_exist.rc == 0
-    - global_in_ceph_conf_overrides
-    - ceph_conf_overrides.global.osd_pool_default_size is defined
-
-- name: create rbd pool on luminous
-  shell: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
-  changed_when: false
-  failed_when: false
-  when:
-    - ceph_release_num.{{ ceph_release }} >= ceph_release_num.luminous
-    - global_in_ceph_conf_overrides
-    - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
-    - rbd_pool_exist.rc != 0
-
+# Create the pools listed in openstack_pools
 - name: include openstack_config.yml
   include: openstack_config.yml
   when:
     - openstack_config
     - inventory_hostname == groups[mon_group_name] | last
 
+# CEPH creates the rbd pool during the ceph cluster initialization in
+# releases prior to luminous.  If the rbd_pool.yml playbook is called too
+# early, the rbd pool does not exist yet.
+- name: include rbd_pool.yml
+  include: rbd_pool.yml
+  when: ceph_release_num.{{ ceph_release }} < ceph_release_num.luminous
+
 - name: find ceph keys
   shell: ls -1 /etc/ceph/*.keyring
   changed_when: false
index 8b9a6ab481735dd303dbf36b11ab8dc829bac1ff..49849ef74d17bd8aa605d9d53fbfc90c1084a231 100644 (file)
@@ -1,17 +1,29 @@
 ---
-- name: check rbd pool usage
+- name: test if rbd exists
   shell: |
-    ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
+    ceph --cluster {{ cluster }} osd pool ls | grep -sq rbd
   changed_when: false
   failed_when: false
-  always_run: true
-  register: rbd_pool_df
+  run_once: true
+  check_mode: true
+  register: rbd_pool_exist
 
-- name: check pg num for rbd pool
-  shell: |
-    ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
-  changed_when: false
-  failed_when: false
-  always_run: true
-  register: rbd_pool_pgs
+- name: include rbd_pool_df.yml
+  include: rbd_pool_df.yml
+  when: rbd_pool_exist.rc == 0
+
+- name: include rbd_pool_pgs.yml
+  include: rbd_pool_pgs.yml
+  when:
+    - rbd_pool_exist.rc == 0
+    - global_in_ceph_conf_overrides
+    - ceph_conf_overrides.global.osd_pool_default_pg_num is defined
+
+- name: include rbd_pool_size.yml
+  include: rbd_pool_size.yml
+  when:
+    - rbd_pool_exist.rc == 0
+    - global_in_ceph_conf_overrides
+    - ceph_conf_overrides.global.osd_pool_default_size is defined
 
+# In luminous release, ceph does not create the rbd pool by default.
diff --git a/roles/ceph-mon/tasks/rbd_pool_df.yml b/roles/ceph-mon/tasks/rbd_pool_df.yml
new file mode 100644 (file)
index 0000000..6285756
--- /dev/null
@@ -0,0 +1,14 @@
+---
+- name: verify that rbd pool exist
+  fail:
+    msg: "rbd pool does not exist in rbd_pool_df"
+  when: rbd_pool_exist.rc == 0
+
+- name: check rbd pool usage
+  shell: |
+    ceph --connect-timeout 5 --cluster {{ cluster }} df | awk '/rbd/ {print $3}'
+  changed_when: false
+  failed_when: false
+  check_mode: true
+  run_once: true
+  register: rbd_pool_df
diff --git a/roles/ceph-mon/tasks/rbd_pool_luminous.yml b/roles/ceph-mon/tasks/rbd_pool_luminous.yml
new file mode 100644 (file)
index 0000000..e69de29
index a58969c9b1e13e50383459d128e8484ae34d7521..914b67fd605ca5cde6774b45def316fa0054fc3a 100644 (file)
@@ -1,10 +1,25 @@
 ---
+- name: verify that rbd pool exist
+  fail:
+    msg: "rbd pool does not exist in rbd_pool_pgs"
+  when: rbd_pool_exist.rc == 0
+
+- name: check pg num for rbd pool
+  shell: |
+    ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd pg_num | awk '{print $2}'
+  changed_when: false
+  failed_when: false
+  check_mode: true
+  run_once: true
+  register: rbd_pool_pgs
+
 - name: destroy and recreate rbd pool if osd_pool_default_pg_num is not honoured
   shell: |
-    ceph --connect-timeout 5 --cluster {{ cluster }} osd pool rm rbd rbd --yes-i-really-really-mean-it
+    ceph --connect-timeout 5 --cluster {{ cluster }} osd pool delete rbd rbd --yes-i-really-really-mean-it
     ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create rbd {{ ceph_conf_overrides.global.osd_pool_default_pg_num }}
   changed_when: false
   failed_when: false
+  run_once: true
   when:
     - rbd_pool_df.stdout == "0"
     - rbd_pool_pgs.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
index 76cacc7f65ad2f11b37c9652162013e249606553..19029f195258cf508f3471acb2ad4e11ba6887cc 100644 (file)
@@ -1,16 +1,23 @@
 ---
+- name: verify that rbd pool exist
+  fail:
+    msg: "rbd pool does not exist in rbd_pool_size"
+  when: rbd_pool_exist.rc == 0
+
 - name: check size for rbd pool
   shell: |
     ceph --connect-timeout 5 --cluster {{ cluster }} osd pool get rbd size | awk '{print $2}'
   changed_when: false
   failed_when: false
-  always_run: true
+  check_mode: true
+  run_once: true
   register: rbd_pool_size
 
 - name: change rbd pool size if osd_pool_default_size is not honoured
   command: ceph --connect-timeout 5 --cluster {{ cluster }} osd pool set rbd size {{ ceph_conf_overrides.global.osd_pool_default_size }}
   changed_when: false
   failed_when: false
+  run_once: true
   when:
     - rbd_pool_df.stdout == "0"
     - rbd_pool_size.stdout != "{{ ceph_conf_overrides.global.osd_pool_default_size }}"