This commit moves the erasure pool creation testing from `all_daemons`
to `lvm_osds` so we can decrease the number of osd nodes we spawn so the
OVH Jenkins slaves aren't less overwhelmed when a `all_daemons` based
scenario is being tested.
Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit
8476beb5b1f673d8b0925293d9273041c99a9bac)
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
- type: 3
+ type: 1
size: 1
pg_autoscale_mode: True
target_size_ratio: 0.2
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
+ size: 1
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
type: 1
+ size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
-osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
-osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss]
mds0
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
-osd_vms: 5
+osd_vms: 3
mds_vms: 3
rgw_vms: 1
nfs_vms: 1
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
- type: 3
+ type: 1
size: 1
application: rbd
pg_autoscale_mode: True
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
- size: "{{ osd_pool_default_size }}"
+ size: 1
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
- rule_name: "HDD"
type: 1
- size: "{{ osd_pool_default_size }}"
+ size: 1
pools:
- "{{ test }}"
- "{{ test2 }}"
osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
-osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
-osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
[mdss]
mds0
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 3
-osd_vms: 5
+osd_vms: 3
mds_vms: 3
rgw_vms: 1
nfs_vms: 0
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ type: 3
+ size: 1
+ application: rbd
+ pg_autoscale_mode: True
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ type: 1
+ size: 1
+ application: rbd
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
\ No newline at end of file
osd_pool_default_size: 1
dashboard_enabled: False
handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+ name: "images"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ type: 3
+ size: 1
+ application: rbd
+ pg_autoscale_mode: True
+ target_size_ratio: 0.2
+openstack_cinder_pool:
+ name: "volumes"
+ pg_num: "{{ osd_pool_default_pg_num }}"
+ pgp_num: "{{ osd_pool_default_pg_num }}"
+ type: 1
+ size: 1
+ application: rbd
+openstack_pools:
+ - "{{ openstack_glance_pool }}"
+ - "{{ openstack_cinder_pool }}"
\ No newline at end of file