--- /dev/null
+tasks:
+- exec:
+ osd.0:
+ - ceph osd set sortbitwise
+ - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1 third node.
+ Use xfs beneath the osds.
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - failed to encode map
+ - wrongly marked
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ fs: xfs
--- /dev/null
+meta:
+- desc: |
+ install ceph/infernalis latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: infernalis
+- print: "**** done installing infernalis"
+- ceph:
+- print: "**** done ceph"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
--- /dev/null
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+meta:
+- desc: |
+ object class functional tests
+workload:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ mon.b:
+ - print: "**** done install.upgrade mon.a and mon.b"
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+ - print: "**** done ceph.restart all"
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ - print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ #do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - install.upgrade:
+ mon.b:
+ - print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
--- /dev/null
+../../../../releases/jewel.yaml
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
--- /dev/null
+../../../../distros/supported/
\ No newline at end of file
--- /dev/null
+../../../../distros/supported/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/infernalis v9.2.0 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/infernalis latest version
+ run workload and upgrade-sequence in parallel
+ install ceph/-x version (jewel)
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - failed to encode
+ - wrongly marked
+ fs: xfs
+ conf:
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+tasks:
+- print: "**** v9.2.0 about to install"
+- install:
+ tag: v9.2.0
+- print: "**** done v9.2.0 install"
+- ceph:
+ fs: xfs
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v9.2.0"
+- parallel:
+ - workload_infernalis
+ - upgrade-sequence_infernalis
+- print: "**** done parallel infernalis branch"
+- install.upgrade:
+ client.1:
+ branch: infernalis
+- parallel:
+ - workload_x
+ - upgrade-sequence_x
+- print: "**** done parallel -x branch"
+# Run test.sh on the -x upgraded cluster
+- install.upgrade:
+ client.1:
+- workunit:
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+- print: "**** done final test on -x cluster"
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: infernalis
+ mon.b:
+ branch: infernalis
+ # Note that client.a IS NOT upgraded at this point
+ #client.1:
+ #branch: infernalis
+ - print: "**** done branch: infernalis install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all infernalis branch mds/osd/mon"
+workload_infernalis:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_infernalis"
+ - rgw: [client.0]
+ - print: "**** done rgw workload_infernalis"
+ - s3tests:
+ client.0:
+ force-branch: ceph-infernalis
+ rgw_server: client.0
+ - print: "**** done s3tests workload_infernalis"
+upgrade-sequence_infernalis:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ branch: infernalis
+ mon.b:
+ branch: infernalis
+ # Note that client.a IS NOT upgraded at this point
+ #client.1:
+ #branch: hammer
+ - print: "**** done branch: infernalis install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all hammer branch mds/osd/mon"
+workload_x:
+ sequential:
+ - workunit:
+ branch: infernalis
+ clients:
+ client.1:
+ #- rados/test-upgrade-v9.0.1.sh
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
+ - workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done rados/test.sh & cls workload_x upgraded client"
+ - rgw: [client.1]
+ - print: "**** done rgw workload_x"
+ - s3tests:
+ client.1:
+ force-branch: ceph-infernalis
+ rgw_server: client.1
+ - print: "**** done s3tests workload_x"
+upgrade-sequence_x:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ #branch: infernalis
+ mon.b:
+ #branch: infernalis
+ # Note that client.a IS NOT upgraded here
+ #client.1:
+ #branch: infernalis
+ - print: "**** done branch: -x install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all -x branch mds/osd/mon"
--- /dev/null
+../stress-split/0-cluster/
\ No newline at end of file
--- /dev/null
+arch: x86_64
--- /dev/null
+../stress-split/1-infernalis-install/
\ No newline at end of file
--- /dev/null
+../stress-split/2-partial-upgrade/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance of increasing the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+ - failed to encode map e
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../stress-split/4-mon/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+../stress-split/6-next-mon/
\ No newline at end of file
--- /dev/null
+../stress-split/8-next-mon/
\ No newline at end of file
--- /dev/null
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ ruleset-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+../stress-split/0-cluster/
\ No newline at end of file
--- /dev/null
+../stress-split/1-infernalis-install/
\ No newline at end of file
--- /dev/null
+../stress-split/2-partial-upgrade/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+ - failed to encode map e
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../stress-split/4-mon/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+../stress-split/6-next-mon/
\ No newline at end of file
--- /dev/null
+../stress-split/8-next-mon/
\ No newline at end of file
--- /dev/null
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ ruleset-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+../../../../distros/supported/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
--- /dev/null
+meta:
+- desc: install ceph/infernalis latest
+tasks:
+- install:
+ branch: infernalis
+- print: "**** done install infernalis"
+- ceph:
+- print: "**** done ceph"
--- /dev/null
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
+- print: "**** done ceph.restart 1st half"
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+ - failed to encode map e
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+meta:
+- desc: |
+ restart mon.a so it is upgraded to -x
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+meta:
+- desc: |
+ restart mon.b so it is upgraded to -x
+tasks:
+- ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+- print: "**** done radosbench 7-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ restart mon.c so it is upgraded to -x
+ as all mon were upgrated, expected ceph cluster reach quorum
+tasks:
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+ default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../../../../distros/supported/
\ No newline at end of file