]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
mon: move `osd_pool_default_pg_num` in `ceph-defaults`
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 13 Nov 2018 14:40:35 +0000 (15:40 +0100)
committermergify[bot] <mergify[bot]@users.noreply.github.com>
Wed, 21 Nov 2018 15:42:50 +0000 (15:42 +0000)
`osd_pool_default_pg_num` parameter is set in `ceph-mon`.
When using ceph-ansible with `--limit` on a specifc group of nodes, it
will fail when trying to access this variables since it wouldn't be
defined.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1518696
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
18 files changed:
group_vars/all.yml.sample
group_vars/clients.yml.sample
group_vars/rhcs.yml.sample
roles/ceph-client/defaults/main.yml
roles/ceph-client/tasks/create_users_keys.yml
roles/ceph-defaults/defaults/main.yml
roles/ceph-defaults/tasks/facts.yml
roles/ceph-iscsi-gw/tasks/common.yml
roles/ceph-mds/tasks/create_mds_filesystems.yml
roles/ceph-mon/tasks/main.yml
roles/ceph-mon/tasks/set_osd_pool_default_pg_num.yml [deleted file]
roles/ceph-osd/tasks/openstack_config.yml
roles/ceph-rgw/tasks/main.yml
tests/functional/centos/7/cluster/group_vars/all
tests/functional/centos/7/cluster/group_vars/clients
tests/functional/centos/7/docker-collocation/group_vars/clients
tests/functional/centos/7/docker/group_vars/all
tests/functional/centos/7/docker/group_vars/clients

index 515469de7a1ea8b03a485a51ecb91943d9ad3680..a3e5281479c8d9fdbd2daf74f7dfe934c4b5c03a 100644 (file)
@@ -352,8 +352,8 @@ dummy:
 #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
 
 #cephfs_pools:
-#  - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
-#  - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+#  - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+#  - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
 
 ## OSD options
 #
@@ -571,8 +571,8 @@ dummy:
 #openstack_config: false
 #openstack_glance_pool:
 #  name: "images"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -581,8 +581,8 @@ dummy:
 #  size: ""
 #openstack_cinder_pool:
 #  name: "volumes"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -591,8 +591,8 @@ dummy:
 #  size: ""
 #openstack_nova_pool:
 #  name: "vms"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -601,8 +601,8 @@ dummy:
 #  size: ""
 #openstack_cinder_backup_pool:
 #  name: "backups"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -611,8 +611,8 @@ dummy:
 #  size: ""
 #openstack_gnocchi_pool:
 #  name: "metrics"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -621,8 +621,8 @@ dummy:
 #  size: ""
 #openstack_cephfs_data_pool:
 #  name: "manila_data"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -631,8 +631,8 @@ dummy:
 #  size: ""
 #openstack_cephfs_metadata_pool:
 #  name: "manila_metadata"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
index 5bae33868e4170659d6e9a6df9615498565d135c..39a3c04b41c53450ec3fec82c454d38a425077df 100644 (file)
@@ -20,8 +20,8 @@ dummy:
 #user_config: false
 #test:
 #  name: "test"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -29,8 +29,8 @@ dummy:
 #  size: ""
 #test2:
 #  name: "test2"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
index ba3b529dc5504dd42debb5e1d39b5685771fa4d2..051a6c533df43b1fd2acd854d16899aad239fe2a 100644 (file)
@@ -352,8 +352,8 @@ ceph_rhcs_version: 3
 #cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
 
 #cephfs_pools:
-#  - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
-#  - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+#  - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+#  - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
 
 ## OSD options
 #
@@ -571,8 +571,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #openstack_config: false
 #openstack_glance_pool:
 #  name: "images"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -581,8 +581,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_cinder_pool:
 #  name: "volumes"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -591,8 +591,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_nova_pool:
 #  name: "vms"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -601,8 +601,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_cinder_backup_pool:
 #  name: "backups"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -611,8 +611,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_gnocchi_pool:
 #  name: "metrics"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -621,8 +621,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_cephfs_data_pool:
 #  name: "manila_data"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
@@ -631,8 +631,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
 #  size: ""
 #openstack_cephfs_metadata_pool:
 #  name: "manila_metadata"
-#  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-#  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+#  pg_num: "{{ osd_pool_default_pg_num }}"
+#  pgp_num: "{{ osd_pool_default_pg_num }}"
 #  rule_name: "replicated_rule"
 #  type: 1
 #  erasure_profile: ""
index 80f9a5552e7d29654e5f4539e49c713f7d3b2c87..ad9779cf50a3d53e63912453b993aa8ab0c5e3e0 100644 (file)
@@ -12,8 +12,8 @@ copy_admin_key: false
 user_config: false
 test:
   name: "test"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -21,8 +21,8 @@ test:
   size: ""
 test2:
   name: "test2"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
index 9158fcf2bcbcdbc0d8c9e4f164c1796c6abe9030..4f7be2b437da2f2b38f97b424c19732f12183408 100644 (file)
       command: >
         {{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
         osd pool create {{ item.0.name }}
-        {{ item.0.pg_num }}
-        {{ item.0.pgp_num }}
+        {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
+        {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
         {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
         {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
         {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}
index ab24cc74b728e1aca2a8f002c0abebabfef30c87..033945dcfc5e36917dac8f228042ada28562991c 100644 (file)
@@ -344,8 +344,8 @@ cephfs_data: cephfs_data # name of the data pool for a given filesystem
 cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
 
 cephfs_pools:
-  - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
-  - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+  - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+  - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
 
 ## OSD options
 #
@@ -563,8 +563,8 @@ docker_pull_timeout: "300s"
 openstack_config: false
 openstack_glance_pool:
   name: "images"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -573,8 +573,8 @@ openstack_glance_pool:
   size: ""
 openstack_cinder_pool:
   name: "volumes"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -583,8 +583,8 @@ openstack_cinder_pool:
   size: ""
 openstack_nova_pool:
   name: "vms"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -593,8 +593,8 @@ openstack_nova_pool:
   size: ""
 openstack_cinder_backup_pool:
   name: "backups"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -603,8 +603,8 @@ openstack_cinder_backup_pool:
   size: ""
 openstack_gnocchi_pool:
   name: "metrics"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -613,8 +613,8 @@ openstack_gnocchi_pool:
   size: ""
 openstack_cephfs_data_pool:
   name: "manila_data"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
@@ -623,8 +623,8 @@ openstack_cephfs_data_pool:
   size: ""
 openstack_cephfs_metadata_pool:
   name: "manila_metadata"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "replicated_rule"
   type: 1
   erasure_profile: ""
index 548eeb634b5b1bb539180e9863214458ed0bc42f..2f63a4fba0d02c29006a60e4bdd57640dfdd1ebe 100644 (file)
     - ceph_current_status['servicemap']['services'] is defined
     - ceph_current_status['servicemap']['services']['rgw'] is defined
 
+- name: set_fact osd_pool_default_pg_num
+  set_fact:
+    osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
 - name: import_tasks set_monitor_address.yml
   import_tasks: set_monitor_address.yml
 
index d187ad1d0396b04589d97351767e19348022ddb2..7c8acc1ea62fc52e9726902ea37b80106107bb70 100644 (file)
 - name: rbd pool related tasks
   when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
   block:
-    - name: get default value for osd_pool_default_pg_num
-      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
-      changed_when: false
-      register: osd_pool_default_pg_num
-      delegate_to: "{{ groups[mon_group_name][0] }}"
-
     - name: create a rbd pool if it doesn't exist
-      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
+      command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
 
index 733952aa4f796b41f3a9926c38d19f814ab85bfc..b94087f3361ed87ec6d2e46661438602f166fd2f 100644 (file)
@@ -2,7 +2,7 @@
 - name: filesystem pools related tasks
   block:
     - name: create filesystem pools
-      command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
+      command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
       changed_when: false
       delegate_to: "{{ groups[mon_group_name][0] }}"
       with_items:
index 3181879e8c5effe7e5512fc60c289624891decd7..4aa0b27391674fc62719b0542cbfeb996885527e 100644 (file)
@@ -33,6 +33,3 @@
   include_tasks: crush_rules.yml
   when:
     - crush_rule_config
-
-- name: include set_osd_pool_default_pg_num.yml
-  include_tasks: set_osd_pool_default_pg_num.yml
diff --git a/roles/ceph-mon/tasks/set_osd_pool_default_pg_num.yml b/roles/ceph-mon/tasks/set_osd_pool_default_pg_num.yml
deleted file mode 100644 (file)
index a620970..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-# NOTE(leseb): we add a conditional for backward compatibility
-# so people that had 'pool_default_pg_num' declared will get
-# the same behaviour
-#
-- name: get default value for osd_pool_default_pg_num
-  command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num"
-  failed_when: false
-  changed_when: false
-  run_once: true
-  register: default_pool_default_pg_num
-  when:
-    - pool_default_pg_num is not defined
-    - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
-
-- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
-  set_fact:
-    osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
-  when: pool_default_pg_num is defined
-
-- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
-  set_fact:
-    osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
-  when:
-    - default_pool_default_pg_num.get('rc') == 0
-
-- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
-  set_fact:
-    osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
-  when:
-    - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
index 4b4422f46ccbaaba9054605f66d100d1338fa6c7..34e9e6aadc49c7195d6d33ed8d5472363a93fe9d 100644 (file)
@@ -25,8 +25,8 @@
       command: >
         {{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
         osd pool create {{ item.0.name }}
-        {{ item.0.pg_num }}
-        {{ item.0.pgp_num | default(item.0.pg_num) }}
+        {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
+        {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
         {{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
         {{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
         {%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}
index 34cbce7155a4fe5f8fa9030fb0a5d1754eb1e356..447861c4f9be603a45b648e470011fef8d60b9ab 100644 (file)
@@ -27,7 +27,7 @@
     - rgw_create_pools is defined
   block:
     - name: create rgw pools if rgw_create_pools is defined
-      command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+      command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
       changed_when: false
       with_dict: "{{ rgw_create_pools }}"
       delegate_to: "{{ groups[mon_group_name][0] }}"
index 5b0de72115f25b85ffe93a6845c2f9dd7f397d64..a7f7ff35301452483c556056bd369976795ff1f6 100644 (file)
@@ -14,8 +14,8 @@ nfs_ganesha_flavor: "ceph_master"
 openstack_config: True
 openstack_glance_pool:
   name: "images"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
@@ -23,8 +23,8 @@ openstack_glance_pool:
   size: ""
 openstack_cinder_pool:
   name: "volumes"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
index 55180053b1a941438abe11e4046feabb29f099a8..66371fdf1287c4f7b6da07671feb2d2f28dd1279 100644 (file)
@@ -3,8 +3,8 @@ copy_admin_key: True
 user_config: True
 test:
   name: "test"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
@@ -12,8 +12,8 @@ test:
   size: ""
 test2:
   name: "test2"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
index 0fcdc83f709a291a45cf7bfee86884db64a31766..1131a16c542bc1bf3700a2efa06ce2533ad6d548 100644 (file)
@@ -2,16 +2,16 @@
 user_config: True
 test:
   name: "test"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
 test2:
   name: "test2"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
index 9becef1923cc6be4f4cf40df8d1708f4a84f6b92..18d7321f1757d586dabea42d1c4412cb643e2fa4 100644 (file)
@@ -18,8 +18,8 @@ ceph_conf_overrides:
 openstack_config: True
 openstack_glance_pool:
   name: "images"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
@@ -27,8 +27,8 @@ openstack_glance_pool:
   size: ""
 openstack_cinder_pool:
   name: "volumes"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
index cbd665ce341a4e3675524168cd986fc989bfa324..a8ea366d04a907a8c0c3109907f28c7b9d438f8d 100644 (file)
@@ -3,16 +3,16 @@ user_config: True
 copy_admin_key: True
 test:
   name: "test"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""
   expected_num_objects: ""
 test2:
   name: "test2"
-  pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-  pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
   erasure_profile: ""