From: Yuri Weinstein Date: Wed, 13 Jan 2021 16:22:25 +0000 (-0800) Subject: qa/tests: added stress-split-erasure-code-no-cephadm tests (not using cephadm) X-Git-Tag: v16.1.0~32^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=489e60b9ee4af006e8b763dedaf33c7b3f68ad42;p=ceph.git qa/tests: added stress-split-erasure-code-no-cephadm tests (not using cephadm) Signed-off-by: Yuri Weinstein --- diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml index c465a19cba83..b76cd1ada7ba 100644 --- a/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml +++ b/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml @@ -3,7 +3,6 @@ meta:  run basic import/export cli tests for rbd workload:  full_sequential: - - print: "**** done start rbd_import_export.yaml"  - workunit:  branch: octopus  clients: diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/% b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/% new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa new file mode 120000 index 000000000000..fea2489fdf6d --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster new file mode 120000 index 000000000000..c7da783ed576 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster @@ -0,0 +1 @@ +../stress-split-no-cephadm/0-cluster \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-nautilus-install b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-nautilus-install new file mode 120000 index 000000000000..078cb90069cf --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-nautilus-install @@ -0,0 +1 @@ +../stress-split-no-cephadm/1-ceph-install \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000000..a602a0353e75 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000000..e31e37ba6e80 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade new file mode 120000 index 000000000000..920b144020ff --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade @@ -0,0 +1 @@ +../stress-split-no-cephadm/2-partial-upgrade \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa new file mode 120000 index 000000000000..a602a0353e75 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml new file mode 100644 index 000000000000..82ab4ced37b6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds default.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore new file mode 120000 index 000000000000..b7cd805a0d6d --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore @@ -0,0 +1 @@ +../stress-split-no-cephadm/objectstore \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/% b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/% new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa new file mode 120000 index 000000000000..a602a0353e75 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml new file mode 100644 index 000000000000..c89551e6b86c --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool +stress-tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml new file mode 100644 index 000000000000..d0e661dcaf0b --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + run rbd tests on EC pool + overrides => force bluestore since it's required for ec-overwrite + use an EC pool for rbd and run xfstests on top of it to verify correctness +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool +- qemu: + all: + clone: true + type: block + disks: 3 + test: qa/run_xfstests_qemu.sh +- print: "**** done rbd/qemu ec task" +exclude_arch: armv7l +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml new file mode 120000 index 000000000000..9723ca018cea --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../stress-split-no-cephadm/5-finish-upgrade.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml new file mode 120000 index 000000000000..1467fc88ecd2 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml new file mode 100644 index 000000000000..31d5ac961204 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml @@ -0,0 +1,36 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool + using the jerasure plugin with k=3 and m=1 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 +- print: "**** done rados ec 7-final-workload.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election new file mode 120000 index 000000000000..3f331e6214c4 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election @@ -0,0 +1 @@ +.qa/mon_election \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml new file mode 120000 index 000000000000..9124eb1aa29a --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml new file mode 120000 index 000000000000..cfb85f10ef59 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml index 115939e6c7b8..5832dfa511d4 100644 --- a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml @@ -49,4 +49,4 @@ stress-tasks: - radosbench: clients: [client.0] time: 90 -- print: "**** done radosbench 7-workload" +- print: "**** done end radosbench.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml index 44c78c0f5f13..65d4ef455741 100644 --- a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml @@ -11,11 +11,11 @@ tasks: daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*] wait-for-healthy: false wait-for-osds-up: true -- print: "**** restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*" +- print: "**** done restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*" - exec: osd.0: - ceph osd set pglog_hardlimit - ceph osd dump --format=json-pretty | grep "flags" - ceph config set global mon_warn_on_msgr2_not_enabled false -- print: "**** try to set pglog_hardlimit again, should succeed" +- print: "**** done try to set pglog_hardlimit again, should succeed" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml index 42cc6c6636d2..d04fab7dfb0d 100644 --- a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml @@ -11,4 +11,4 @@ tasks: clients: client.0: - rbd/test_librbd_python.sh -- print: "**** done rbd/test_librbd_python.sh 9-workload" +- print: "**** done rbd/test_librbd_python.sh"