+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client 0,1,2 third node.
- Use xfs beneath the osds.
- CephFS tests running on client 2,3
-roles:
-- - mon.a
- - mgr.x
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
- - client.2
- - client.3
-- - client.4
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - wrongly marked
- - \(POOL_APP_NOT_ENABLED\)
- - overall HEALTH_
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- mon:
- mon warn on osd down out interval zero: false
- osd:
- osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
- replica_log rgw sdk statelog timeindex user version"
- osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
- replica_log rgw sdk statelog timeindex user version"
- fs: xfs
+++ /dev/null
-meta:
-- desc: |
- install ceph/luminous latest
- run workload and upgrade-sequence in parallel
- upgrade the client node
-tasks:
-- install:
- branch: luminous
-- print: "**** done installing luminous"
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(FS_
- - \(MDS_
- - \(OSD_
- - \(MON_DOWN\)
- - \(CACHE_POOL_
- - \(POOL_
- - \(MGR_DOWN\)
- - \(PG_
- - \(SMALLER_PGP_NUM\)
- - Monitor daemon marked osd
- - Behind on trimming
- - Manager daemon
- conf:
- global:
- mon warn on pool no app: false
-- exec:
- osd.0:
- - ceph osd require-osd-release luminous
- - ceph osd set-require-min-compat-client luminous
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade both hosts"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on client.0"
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c, mgr.x]
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mds.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, osd.0, osd.1, mds.a
- step two ordering: mon.b, mon.c, osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b, mon.c, mgr.x]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 5-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 5-final-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshots
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1 byte to 1MB
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- librados C and C++ api tests
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- branch: luminous
- clients:
- client.1:
- - rados/test.sh
- - print: "**** done rados/test.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
- on NO upgrated client
-tasks:
- - workunit:
- branch: luminous
- clients:
- client.4:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload on NO upgrated client"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
- on upgrated client
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload on upgrated client"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - swift:
- client.1:
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-../stress-split/1-ceph-install/
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-stress-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- crush-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-../../../../tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- fs: xfs
- log-whitelist:
- - overall HEALTH_
- - \(MON_DOWN\)
- - \(MGR_DOWN\)
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- mon:
- mon warn on osd down out interval zero: false
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-meta:
-- desc: install ceph/luminous latest
-tasks:
-- install:
- branch: luminous
-- print: "**** done install luminous"
-- ceph:
-- exec:
- osd.0:
- - ceph osd require-osd-release luminous
- - ceph osd set-require-min-compat-client luminous
-- print: "**** done ceph "
-overrides:
- ceph:
- conf:
- mon:
- mon warn on osd down out interval zero: false
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.3:
- client.0:
-- ceph.restart:
- daemons: [osd.3, osd.4, osd.5]
- wait-for-healthy: false
- wait-for-osds-up: true
-
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../../../../../objectstore/bluestore.yaml
\ No newline at end of file
+++ /dev/null
-../../../../../objectstore/filestore-xfs.yaml
\ No newline at end of file
+++ /dev/null
-../../../../tasks/thrashosds-health.yaml
\ No newline at end of file