--- /dev/null
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+- - mon.b
+ - mon.c
+ - osd.2
+ - osd.3
+- - client.0
+ - client.1
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
--- /dev/null
+tasks:
+- install:
+ branch: hammer
+- print: "**** done installing hammer"
+- ceph:
+ fs: xfs
+- print: "**** done ceph"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
--- /dev/null
+../../../../../erasure-code/ec-rados-default.yaml
\ No newline at end of file
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
--- /dev/null
+workload:
+ sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ mon.b:
+ - print: "**** done install.upgrade mon.a and mon.b"
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+ - print: "**** done ceph.restart all"
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ - print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - install.upgrade:
+ mon.b:
+ - print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
--- /dev/null
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload"
--- /dev/null
+tasks:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - swift:
+ client.1:
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
--- /dev/null
+../../../../distros/supported
\ No newline at end of file
--- /dev/null
+../stress-split/0-cluster
\ No newline at end of file
--- /dev/null
+../stress-split/1-hammer-install
\ No newline at end of file
--- /dev/null
+../stress-split/2-partial-upgrade
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../stress-split/4-mon
\ No newline at end of file
--- /dev/null
+../../../../../erasure-code/ec-rados-default.yaml
\ No newline at end of file
--- /dev/null
+../stress-split/6-next-mon
\ No newline at end of file
--- /dev/null
+../stress-split/8-next-mon
\ No newline at end of file
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ erasure_code_profile:
+ name: isaprofile
+ plugin: isa
+ k: 2
+ m: 1
+ technique: reed_sol_van
+ ruleset-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+- print: "**** done ec-rados-plugin=isa-k=2-m=1 9-workload"
--- /dev/null
+os_type: rhel
+os_version: "7.0"
+arch: x86_64
--- /dev/null
+os_type: ubuntu
+os_version: "14.04"
+arch: x86_64
--- /dev/null
+../stress-split/0-cluster
\ No newline at end of file
--- /dev/null
+../stress-split/1-hammer-install/
\ No newline at end of file
--- /dev/null
+../stress-split/2-partial-upgrade
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../stress-split/4-mon
\ No newline at end of file
--- /dev/null
+../../../../../erasure-code/ec-rados-default.yaml
\ No newline at end of file
--- /dev/null
+../stress-split/6-next-mon
\ No newline at end of file
--- /dev/null
+../stress-split/8-next-mon
\ No newline at end of file
--- /dev/null
+../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
\ No newline at end of file
--- /dev/null
+../stress-split/distros
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ - osd.6
+- - osd.7
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+ - osd.12
+ - osd.13
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: hammer
+- print: "**** done install hammer"
+- ceph:
+ fs: xfs
+- print: "**** done ceph"
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6]
+- print: "**** done ceph.restart 1st half"
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
+- print: "**** done radosbench 7-workload"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+tasks:
+- rgw:
+ client.0:
+ default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../../../../distros/supported
\ No newline at end of file