]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
tests: move erasure pool testing in lvm_osds
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 11 Aug 2020 13:26:16 +0000 (15:26 +0200)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 20 Aug 2020 09:55:40 +0000 (11:55 +0200)
This commit moves the erasure pool creation testing from `all_daemons`
to `lvm_osds` so we can decrease the number of osd nodes we spawn so the
OVH Jenkins slaves aren't less overwhelmed when a `all_daemons` based
scenario is being tested.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 8476beb5b1f673d8b0925293d9273041c99a9bac)

tests/functional/all_daemons/container/group_vars/all
tests/functional/all_daemons/container/group_vars/clients
tests/functional/all_daemons/container/hosts
tests/functional/all_daemons/container/vagrant_variables.yml
tests/functional/all_daemons/group_vars/all
tests/functional/all_daemons/group_vars/clients
tests/functional/all_daemons/hosts
tests/functional/all_daemons/vagrant_variables.yml
tests/functional/lvm-osds/container/group_vars/all
tests/functional/lvm-osds/group_vars/all

index 6b19bcbd89f805783a0e3882ffbbc483e497f2a5..55d59f33bde7f7a675cdbbab07a8db0666192e49 100644 (file)
@@ -22,7 +22,7 @@ openstack_glance_pool:
   name: "images"
   pg_num: "{{ osd_pool_default_pg_num }}"
   pgp_num: "{{ osd_pool_default_pg_num }}"
-  type: 3
+  type: 1
   size: 1
   pg_autoscale_mode: True
   target_size_ratio: 0.2
index 27248d7374513a908ff84700d4467b81215cd50c..f914459892f3e35bd6e9deafd6945b84fa8a88fb 100644 (file)
@@ -7,12 +7,13 @@ test:
   pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
+  size: 1
 test2:
   name: "test2"
   pg_num: "{{ osd_pool_default_pg_num }}"
   pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
   type: 1
+  size: 1
 pools:
   - "{{ test }}"
   - "{{ test2 }}"
index 1fde8ef1b5d85270308f75370e77c0b3c76dadaa..fe64c8e8b35d82c27944298358a3a7fe236e5845 100644 (file)
@@ -10,8 +10,6 @@ mgr0
 osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
 osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
 osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
-osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
-osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
 
 [mdss]
 mds0
index 08ca709679f0a55fd96d9b60167ead44d4d6e82c..5e199588f02473ab0b7d38a078c684b6987969ed 100644 (file)
@@ -5,7 +5,7 @@ docker: True
 
 # DEFINE THE NUMBER OF VMS TO RUN
 mon_vms: 3
-osd_vms: 5
+osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
 nfs_vms: 1
index 8117ebc219b552908d0485336ec89b6f49980960..0faa4de8042fa8e2f65d89b8d751572b96f4b1a0 100644 (file)
@@ -14,7 +14,7 @@ openstack_glance_pool:
   name: "images"
   pg_num: "{{ osd_pool_default_pg_num }}"
   pgp_num: "{{ osd_pool_default_pg_num }}"
-  type: 3
+  type: 1
   size: 1
   application: rbd
   pg_autoscale_mode: True
index 64ff43313650b2cd6401bc072fccdeed49d377b4..106fcbf434223fe32a4343f66b9b93343c4a22e9 100644 (file)
@@ -7,14 +7,13 @@ test:
   pgp_num: "{{ osd_pool_default_pg_num }}"
   rule_name: "HDD"
   type: 1
-  size: "{{ osd_pool_default_size }}"
+  size: 1
 test2:
   name: "test2"
   pg_num: "{{ osd_pool_default_pg_num }}"
   pgp_num: "{{ osd_pool_default_pg_num }}"
-  rule_name: "HDD"
   type: 1
-  size: "{{ osd_pool_default_size }}"
+  size: 1
 pools:
   - "{{ test }}"
   - "{{ test2 }}"
index e0e64ba694c4aec5cfe6a66550bcf502477b1b70..641217d5254455a0aa246c3b18b90ca6aa0cfb9b 100644 (file)
@@ -10,8 +10,6 @@ mgr0
 osd0 osd_crush_location="{ 'root': 'HDD', 'rack': 'mon-rackkkk', 'pod': 'monpod', 'host': 'osd0' }"
 osd1 osd_crush_location="{ 'root': 'default', 'host': 'osd1' }"
 osd2 osd_crush_location="{ 'root': 'default', 'host': 'osd2' }"
-osd3 osd_crush_location="{ 'root': 'default', 'host': 'osd3' }"
-osd4 osd_crush_location="{ 'root': 'default', 'host': 'osd4' }"
 
 [mdss]
 mds0
index 3c4354d9add33f5e4bcb05fa568a5de7f11b4e62..f994ae0824e6bd3389a5f89a40a195bdbcb4485a 100644 (file)
@@ -5,7 +5,7 @@ docker: false
 
 # DEFINE THE NUMBER OF VMS TO RUN
 mon_vms: 3
-osd_vms: 5
+osd_vms: 3
 mds_vms: 3
 rgw_vms: 1
 nfs_vms: 0
index 3ea93c23332fbf11dffc235ff47d15a25c08ab2d..47a42455c15c894ee122da0b334d4c4a08b0da08 100644 (file)
@@ -22,4 +22,24 @@ ceph_conf_overrides:
     osd_pool_default_size: 1
 dashboard_enabled: False
 handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
+  type: 3
+  size: 1
+  application: rbd
+  pg_autoscale_mode: True
+  target_size_ratio: 0.2
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
+  type: 1
+  size: 1
+  application: rbd
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
\ No newline at end of file
index 0cf11039ff3076216a7ffa7ce28ec2f44f065698..3b649fbeff3d35b3b8bf99473c6bab224dfff6f1 100644 (file)
@@ -17,4 +17,24 @@ ceph_conf_overrides:
     osd_pool_default_size: 1
 dashboard_enabled: False
 handler_health_mon_check_delay: 10
-handler_health_osd_check_delay: 10
\ No newline at end of file
+handler_health_osd_check_delay: 10
+openstack_config: True
+openstack_glance_pool:
+  name: "images"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
+  type: 3
+  size: 1
+  application: rbd
+  pg_autoscale_mode: True
+  target_size_ratio: 0.2
+openstack_cinder_pool:
+  name: "volumes"
+  pg_num: "{{ osd_pool_default_pg_num }}"
+  pgp_num: "{{ osd_pool_default_pg_num }}"
+  type: 1
+  size: 1
+  application: rbd
+openstack_pools:
+  - "{{ openstack_glance_pool }}"
+  - "{{ openstack_cinder_pool }}"
\ No newline at end of file