bring the recent refact about `osd_pool_default_pg_num` and
`osd_pool_default_size` into podman scenario as well.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
openstack_config: True
openstack_glance_pool:
name: "images"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
- size: ""
+ size: 1
openstack_cinder_pool:
name: "volumes"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
- size: ""
+ size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
copy_admin_key: True
test:
name: "test"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
- pg_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
- pgp_num: "{{ hostvars[groups[mon_group_name][0]]['osd_pool_default_pg_num'] }}"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""