run basic import/export cli tests for rbd
workload:
 full_sequential:
- - print: "**** done start rbd_import_export.yaml"
 - workunit:
 branch: octopus
 clients:
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../stress-split-no-cephadm/0-cluster
\ No newline at end of file
--- /dev/null
+../stress-split-no-cephadm/1-ceph-install
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd min pg log entries: 1
+ osd max pg log entries: 2
--- /dev/null
+../stress-split-no-cephadm/2-partial-upgrade
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+ aggressive_pg_num_changes: false
+- print: "**** done thrashosds default.yaml"
--- /dev/null
+../stress-split-no-cephadm/objectstore
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+meta:
+- desc: |
+ run rbd tests on EC pool
+ overrides => force bluestore since it's required for ec-overwrite
+ use an EC pool for rbd and run xfstests on top of it to verify correctness
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+- qemu:
+ all:
+ clone: true
+ type: block
+ disks: 3
+ test: qa/run_xfstests_qemu.sh
+- print: "**** done rbd/qemu ec task"
+exclude_arch: armv7l
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
--- /dev/null
+../stress-split-no-cephadm/5-finish-upgrade.yaml
\ No newline at end of file
--- /dev/null
+.qa/releases/pacific.yaml
\ No newline at end of file
--- /dev/null
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+- print: "**** done rados ec 7-final-workload.yaml"
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
- radosbench:
clients: [client.0]
time: 90
-- print: "**** done radosbench 7-workload"
+- print: "**** done end radosbench.yaml"
daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*]
wait-for-healthy: false
wait-for-osds-up: true
-- print: "**** restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*"
+- print: "**** done restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*"
- exec:
osd.0:
- ceph osd set pglog_hardlimit
- ceph osd dump --format=json-pretty | grep "flags"
- ceph config set global mon_warn_on_msgr2_not_enabled false
-- print: "**** try to set pglog_hardlimit again, should succeed"
+- print: "**** done try to set pglog_hardlimit again, should succeed"
clients:
client.0:
- rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+- print: "**** done rbd/test_librbd_python.sh"