Refactor common tasks and allow loading mgrmodules before unittests start.
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
--- /dev/null
+tasks:
+ - install:
--- /dev/null
+tasks:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-ignorelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - \(POOL_APP_NOT_ENABLED\)
--- /dev/null
+mgrmodules:
+ sequential:
+ - print: "Enabling mgr modules"
+ # other fragments append to this
+
+tasks:
+ - sequential:
+ - mgrmodules
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(RECENT_CRASH\)
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_crash
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_failover
--- /dev/null
+overrides:
+ ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-ignorelist:
+ - \(MGR_INSIGHTS_WARNING\)
+ - \(insights_health_check
+ - \(RECENT_CRASH\)
+
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_insights
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - Reduced data availability
+ - Degraded data redundancy
+ - objects misplaced
+ - Synthetic exception in serve
+ - influxdb python module not found
+ - \(MGR_ZABBIX_
+ - foo bar
+ - Failed to open Telegraf
+ - evicting unresponsive client
+ - 1 mgr modules have recently crashed \(RECENT_MGR_MODULE_CRASH\)
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_module_selftest
+ fail_on_skip: false
--- /dev/null
+overrides:
+ check-counter:
+ counters:
+ mgr:
+ - name: "finisher-balancer.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-balancer.queue_len"
+ expected_val: 0
+ - name: "finisher-crash.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-crash.queue_len"
+ expected_val: 0
+ - name: "finisher-devicehealth.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-devicehealth.queue_len"
+ expected_val: 0
+ - name: "finisher-iostat.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-iostat.queue_len"
+ expected_val: 0
+ - name: "finisher-pg_autoscaler.complete_latency.avgcount"
+ min: 1
+ - name: "finisher-pg_autoscaler.queue_len"
+ expected_val: 0
+ - name: "finisher-progress.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-progress.queue_len"
+ expected_val: 0
+ - name: "finisher-status.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-status.queue_len"
+ expected_val: 0
+ - name: "finisher-telemetry.complete_latency.avgcount"
+ min: 2
+ - name: "finisher-telemetry.queue_len"
+ expected_val: 0
+tasks:
+ - workunit:
+ clients:
+ client.0:
+ - mgr/test_per_module_finisher.sh
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd mclock profile: high_recovery_ops
+ global:
+ osd pool default size : 3
+ osd pool default min size : 2
+ log-ignorelist:
+ - \(MDS_ALL_DOWN\)
+ - \(MDS_UP_LESS_THAN_MAX\)
+ - \(FS_WITH_FAILED_MDS\)
+ - \(FS_DEGRADED\)
+ - \(OSDMAP_FLAGS\)
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_progress
--- /dev/null
+tasks:
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_prometheus
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.0:
+ - mgr/test_localpool.sh
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(PG_
- - \(RECENT_CRASH\)
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_crash
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_failover
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(MGR_INSIGHTS_WARNING\)
- - \(insights_health_check
- - \(PG_
- - \(RECENT_CRASH\)
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_insights
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - Reduced data availability
- - Degraded data redundancy
- - objects misplaced
- - Synthetic exception in serve
- - influxdb python module not found
- - \(MGR_ZABBIX_
- - foo bar
- - Failed to open Telegraf
- - evicting unresponsive client
- - 1 mgr modules have recently crashed \(RECENT_MGR_MODULE_CRASH\)
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_module_selftest
- fail_on_skip: false
+++ /dev/null
-tasks:
- - install:
- - ceph:
- wait-for-scrub: false
- log-ignorelist:
- - \(POOL_APP_NOT_ENABLED\)
- - check-counter:
- counters:
- mgr:
- - name: "finisher-balancer.complete_latency.avgcount"
- min: 1
- - name: "finisher-balancer.queue_len"
- expected_val: 0
- - name: "finisher-crash.complete_latency.avgcount"
- min: 2
- - name: "finisher-crash.queue_len"
- expected_val: 0
- - name: "finisher-devicehealth.complete_latency.avgcount"
- min: 1
- - name: "finisher-devicehealth.queue_len"
- expected_val: 0
- - name: "finisher-iostat.complete_latency.avgcount"
- min: 1
- - name: "finisher-iostat.queue_len"
- expected_val: 0
- - name: "finisher-pg_autoscaler.complete_latency.avgcount"
- min: 1
- - name: "finisher-pg_autoscaler.queue_len"
- expected_val: 0
- - name: "finisher-progress.complete_latency.avgcount"
- min: 2
- - name: "finisher-progress.queue_len"
- expected_val: 0
- - name: "finisher-status.complete_latency.avgcount"
- min: 2
- - name: "finisher-status.queue_len"
- expected_val: 0
- - name: "finisher-telemetry.complete_latency.avgcount"
- min: 2
- - name: "finisher-telemetry.queue_len"
- expected_val: 0
- - workunit:
- clients:
- client.0:
- - mgr/test_per_module_finisher.sh
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd mclock profile: high_recovery_ops
-tasks:
- - install:
- - ceph:
- config:
- global:
- osd pool default size : 3
- osd pool default min size : 2
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(MDS_ALL_DOWN\)
- - \(MDS_UP_LESS_THAN_MAX\)
- - \(FS_WITH_FAILED_MDS\)
- - \(FS_DEGRADED\)
- - \(PG_
- - \(OSDMAP_FLAGS\)
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_progress
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_prometheus
+++ /dev/null
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - \(POOL_APP_NOT_ENABLED\)
- - workunit:
- clients:
- client.0:
- - mgr/test_localpool.sh