`osd_pool_default_pg_num` parameter is set in `ceph-mon`.
When using ceph-ansible with `--limit` on a specifc group of nodes, it
will fail when trying to access this variables since it wouldn't be
defined.
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1518696
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools:
-# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
-# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+# - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+# - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
#openstack_config: false
#openstack_glance_pool:
# name: "images"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cinder_pool:
# name: "volumes"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_nova_pool:
# name: "vms"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cephfs_data_pool:
# name: "manila_data"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
#user_config: false
#test:
# name: "test"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#test2:
# name: "test2"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
#cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
#cephfs_pools:
-# - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
-# - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+# - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+# - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
#openstack_config: false
#openstack_glance_pool:
# name: "images"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cinder_pool:
# name: "volumes"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_nova_pool:
# name: "vms"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cinder_backup_pool:
# name: "backups"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_gnocchi_pool:
# name: "metrics"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cephfs_data_pool:
# name: "manila_data"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
# size: ""
#openstack_cephfs_metadata_pool:
# name: "manila_metadata"
-# pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
-# pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+# pg_num: "{{ osd_pool_default_pg_num }}"
+# pgp_num: "{{ osd_pool_default_pg_num }}"
# rule_name: "replicated_rule"
# type: 1
# erasure_profile: ""
user_config: false
test:
name: "test"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
test2:
name: "test2"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
command: >
{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
- {{ item.0.pg_num }}
- {{ item.0.pgp_num }}
+ {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
+ {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}
cephfs_metadata: cephfs_metadata # name of the metadata pool for a given filesystem
cephfs_pools:
- - { name: "{{ cephfs_data }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
- - { name: "{{ cephfs_metadata }}", pgs: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}", size: "" }
+ - { name: "{{ cephfs_data }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
+ - { name: "{{ cephfs_metadata }}", pgs: "{{ osd_pool_default_pg_num }}", size: "" }
## OSD options
#
openstack_config: false
openstack_glance_pool:
name: "images"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_cinder_pool:
name: "volumes"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_nova_pool:
name: "vms"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_cinder_backup_pool:
name: "backups"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_gnocchi_pool:
name: "metrics"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_cephfs_data_pool:
name: "manila_data"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
size: ""
openstack_cephfs_metadata_pool:
name: "manila_metadata"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "replicated_rule"
type: 1
erasure_profile: ""
- ceph_current_status['servicemap']['services'] is defined
- ceph_current_status['servicemap']['services']['rgw'] is defined
+- name: set_fact osd_pool_default_pg_num
+ set_fact:
+ osd_pool_default_pg_num: "{{ ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', ceph_osd_pool_default_pg_num) }}"
+
- name: import_tasks set_monitor_address.yml
import_tasks: set_monitor_address.yml
- name: rbd pool related tasks
when: "'rbd' not in (rbd_pool_exists.stdout | from_json)"
block:
- - name: get default value for osd_pool_default_pg_num
- command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} daemon mon.{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} config get osd_pool_default_pg_num"
- changed_when: false
- register: osd_pool_default_pg_num
- delegate_to: "{{ groups[mon_group_name][0] }}"
-
- name: create a rbd pool if it doesn't exist
- command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ (osd_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
+ command: "{{ docker_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd pool create rbd {{ osd_pool_default_pg_num }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
- name: filesystem pools related tasks
block:
- name: create filesystem pools
- command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs }}"
+ command: "{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pgs | default(osd_pool_default_pg_num) }}"
changed_when: false
delegate_to: "{{ groups[mon_group_name][0] }}"
with_items:
include_tasks: crush_rules.yml
when:
- crush_rule_config
-
-- name: include set_osd_pool_default_pg_num.yml
- include_tasks: set_osd_pool_default_pg_num.yml
+++ /dev/null
-# NOTE(leseb): we add a conditional for backward compatibility
-# so people that had 'pool_default_pg_num' declared will get
-# the same behaviour
-#
-- name: get default value for osd_pool_default_pg_num
- command: "{{ docker_exec_cmd }} ceph --cluster {{ cluster }} daemon mon.{{ monitor_name }} config get osd_pool_default_pg_num"
- failed_when: false
- changed_when: false
- run_once: true
- register: default_pool_default_pg_num
- when:
- - pool_default_pg_num is not defined
- - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) == False
-
-- name: set_fact osd_pool_default_pg_num with pool_default_pg_num (backward compatibility)
- set_fact:
- osd_pool_default_pg_num: "{{ pool_default_pg_num }}"
- when: pool_default_pg_num is defined
-
-- name: set_fact osd_pool_default_pg_num with default_pool_default_pg_num.stdout
- set_fact:
- osd_pool_default_pg_num: "{{ (default_pool_default_pg_num.stdout | from_json).osd_pool_default_pg_num }}"
- when:
- - default_pool_default_pg_num.get('rc') == 0
-
-- name: set_fact osd_pool_default_pg_num ceph_conf_overrides.global.osd_pool_default_pg_num
- set_fact:
- osd_pool_default_pg_num: "{{ ceph_conf_overrides.global.osd_pool_default_pg_num }}"
- when:
- - ceph_conf_overrides.get('global', {}).get('osd_pool_default_pg_num', False) != False
command: >
{{ hostvars[groups[mon_group_name][0]]['docker_exec_cmd'] | default('') }} ceph --cluster {{ cluster }}
osd pool create {{ item.0.name }}
- {{ item.0.pg_num }}
- {{ item.0.pgp_num | default(item.0.pg_num) }}
+ {{ item.0.pg_num | default(osd_pool_default_pg_num) }}
+ {{ item.0.pgp_num | default(item.0.pg_num) | default(osd_pool_default_pg_num) }}
{{ 'replicated_rule' if not item.0.rule_name | default('replicated_rule') else item.0.rule_name | default('replicated_rule') }}
{{ 1 if item.0.type|default(1) == 'replicated' else 3 if item.0.type|default(1) == 'erasure' else item.0.type|default(1) }}
{%- if (item.0.type | default("1") == '3' or item.0.type | default("1") == 'erasure') and item.0.erasure_profile %}
- rgw_create_pools is defined
block:
- name: create rgw pools if rgw_create_pools is defined
- command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num }}"
+ command: "{{ docker_exec_cmd }} ceph --connect-timeout 5 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
changed_when: false
with_dict: "{{ rgw_create_pools }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
openstack_config: True
openstack_glance_pool:
name: "images"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
size: ""
openstack_cinder_pool:
name: "volumes"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
user_config: True
test:
name: "test"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
size: ""
test2:
name: "test2"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
user_config: True
test:
name: "test"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
openstack_config: True
openstack_glance_pool:
name: "images"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
size: ""
openstack_cinder_pool:
name: "volumes"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
copy_admin_key: True
test:
name: "test"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""