]> git.apps.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
tests: increase `mon_max_pg_per_osd`
authorGuillaume Abrioux <gabrioux@redhat.com>
Wed, 10 Feb 2021 14:49:38 +0000 (15:49 +0100)
committerGuillaume Abrioux <gabrioux@redhat.com>
Thu, 11 Feb 2021 15:35:55 +0000 (16:35 +0100)
we aren't deploying enough OSD daemon, so it fails like following:

```
  stderr: 'Error ERANGE: pool id 10 pg_num 256 size 2 would mean 1536 total pgs, which exceeds max 1500 (mon_max_pg_per_osd 250 * num_in_osds 6)'
```

Let's increase the value of `mon_max_pg_per_osd` in order to get around
this issue in the CI.

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
tests/functional/all-in-one/container/group_vars/all
tests/functional/all-in-one/group_vars/all
tests/functional/all_daemons/ceph-override.json
tests/functional/all_daemons/container/group_vars/all
tests/functional/all_daemons/group_vars/all
tests/functional/collocation/container/group_vars/all
tests/functional/collocation/group_vars/all

index dafb610c51f67f25c9df9c62391f7d8b1e66ea98..eb58e03278cfaa732b5e2a97b2922d7fee536ff0 100644 (file)
@@ -20,6 +20,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 rgw_create_pools:
index d0caa26c3a0b5cfd9b3fbcf8c2165b2c9f4eff21..0eef250985ef69e33053b57c4f935f672d5d3bde 100644 (file)
@@ -17,6 +17,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 osd_objectstore: "bluestore"
index f17dacc24626054207eb8e69f8bd95ef657c5da2..acf8b1217aff4d29e57644048e9f5028d46925cc 100644 (file)
@@ -4,7 +4,8 @@
       "osd_pool_default_pg_num": 12,
       "osd_pool_default_size": 1,
       "mon_allow_pool_size_one": true,
-      "mon_warn_on_pool_no_redundancy": false
+      "mon_warn_on_pool_no_redundancy": false,
+      "mon_max_pg_per_osd": 300
     }
   },
   "cephfs_pools": [
index d16abaf27b02bd74d060ffa9ab48fac6cf26cfa7..d3fb25ed9177ea867812a17e9a1e28dfca2eb5d0 100644 (file)
@@ -16,6 +16,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 openstack_config: True
 openstack_glance_pool:
   name: "images"
index 15943304ddf2818969a295fe7d26fe9ebfe74cc8..ae3b46b05df67c1b4908856f4266b290f9b6d1f9 100644 (file)
@@ -9,6 +9,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 openstack_config: True
 openstack_glance_pool:
   name: "images"
index 1bdc042a80dd8628d3216210f28c24f0111e0b8b..9b5c3365d8fb57d17dc58fa56fe41c52cfd8c37d 100644 (file)
@@ -18,6 +18,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 dashboard_admin_password: $sX!cD$rYU6qR^B!
index 440c0b781560d8c7f42886b190011f7d20265e48..cee908e23fe2f141ffd5c259f351c59ce469425e 100644 (file)
@@ -15,6 +15,7 @@ ceph_conf_overrides:
     mon_allow_pool_size_one: true
     mon_warn_on_pool_no_redundancy: false
     osd_pool_default_size: 1
+    mon_max_pg_per_osd: 300
 handler_health_mon_check_delay: 10
 handler_health_osd_check_delay: 10
 dashboard_admin_password: $sX!cD$rYU6qR^B!