--- /dev/null
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on legacy crush tunables: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
--- /dev/null
+tasks:
+- install:
+ branch: hammer
+- print: "**** done install hammer"
+- ceph:
+ fs: xfs
+- print: "**** done ceph"
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+- print: "**** done ceph.restart 1st half"
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.a"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.b 6-next-mon"
--- /dev/null
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 1800
+- print: "**** done radosbench 7-workload"
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+tasks:
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- print: "**** done ceph.restart mon.c 8-next-mon"
+- ceph.wait_for_mon_quorum: [a, b, c]
+- print: "**** done wait_for_mon_quorum 8-next-mon"
--- /dev/null
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ ruleset-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+tasks:
+- workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+tasks:
+- rgw:
+ client.0:
+ default_idle_timeout: 300
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+workunit:
+ sequential:
+ - exec:
+ client.0:
+ - ceph osd pool create base 4
+ - ceph osd pool create cache 4
+ - ceph osd tier add base cache
+ - ceph osd tier cache-mode cache writeback
+ - ceph osd tier set-overlay base cache
+ - ceph osd pool set cache hit_set_type bloom
+ - ceph osd pool set cache hit_set_count 8
+ - ceph osd pool set cache hit_set_period 3600
+ - ceph osd pool set cache target_max_objects 250
+ - rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ flush: 50
+ try_flush: 50
+ evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done test_cache-pool-snaps 9-workload"