fs: xfs
- parallel:
- workload
+ - upgrade-sequence
- print: "**** done parallel"
--- /dev/null
+upgrade-sequence0:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ mon.b:
+ - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
--- /dev/null
+tasks:
+ - install.upgrade:
+ mon.a:
+ branch: firefly
+ mon.b:
+ branch: firefly
+ client.0:
+ branch: firefly
+ - print: "**** done install.upgrade"
+ - ceph.restart:
+ - print: "**** done restart"
+ - parallel:
+ - workload2
+ - upgrade-sequence
+ - print: "**** done parallel"
+ - install.upgrade:
+ client.0:
+ - print: "**** done install.upgrade client.0 to the version from teuthology-suite arg"
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rados/test.sh
+ - cls
+ - print: "**** done #rados/test.sh and cls 2"
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2"
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2"
--- /dev/null
+workload2:
+ sequential:
+ - workunit:
+ branch: firefly
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2"
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - install.upgrade:
+ mon.a:
+ - print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - ceph osd crush tunables firefly
+ - install.upgrade:
+ mon.b:
+ - print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+### removed to fix #9642
+# - install.upgrade:
+# client.0:
+# - print: "*** client.0 upgraded"
--- /dev/null
+../../../../../erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
\ No newline at end of file
--- /dev/null
+../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+- rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
--- /dev/null
+tasks:
+ - sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - workunit:
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh - 6-final-workload"
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
--- /dev/null
+tasks:
+- rgw: [client.1]
+- s3tests:
+ client.1:
+ rgw_server: client.1
--- /dev/null
+tasks:
+# Uncomment the next line if you have not already included rgw_s3tests.yaml in your test.
+# - rgw: [client.1]
+- swift:
+ client.1:
+ rgw_server: client.1