--- /dev/null
+tasks:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release mimic
+ - ceph osd set-require-min-compat-client mimic
+- ceph.healthy:
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- fs: xfs
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - mgr.y
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-meta:
-- desc: install ceph/jewel latest
-tasks:
-- install:
- branch: jewel
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install jewel"
-- ceph:
- skip_mgr_daemons: true
- add_osds_to_crush: true
- log-whitelist:
- - required past_interval bounds are empty
-- print: "**** done ceph"
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2,3,4,5
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: false
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2]
- wait-for-healthy: false
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - split_tasks
-split_tasks:
- sequential:
- - thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- chance_force_recovery: 0
- - print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-split_tasks:
- sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-split_tasks:
- sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-split_tasks:
- sequential:
- - full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
- - print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-split_tasks:
- sequential:
- - full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-split_tasks:
- sequential:
- - full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-split_tasks:
- sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- install upgrade on remaining node
- restartin remaining osds
-overrides:
- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_
-tasks:
-- install.upgrade:
- osd.3:
-- ceph.restart:
- daemons: [osd.3, osd.4, osd.5]
- wait-for-up: true
- wait-for-healthy: false
-- ceph.restart:
- daemons: [mds.a]
- wait-for-up: true
- wait-for-healthy: false
-- install.upgrade:
- client.0:
+++ /dev/null
-../../../../releases/luminous.yaml
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../../../../tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - mgr.y
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+- - client.0
--- /dev/null
+meta:
+- desc: install ceph/luminous latest
+tasks:
+- install:
+ branch: luminous
+- print: "**** done install luminous"
+- ceph:
+- print: "**** done ceph"
--- /dev/null
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2]
+ wait-for-healthy: false
+- print: "**** done ceph.restart 1st half"
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - split_tasks
+split_tasks:
+ sequential:
+ - thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ - print: "**** done thrashosds 3-thrash"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+ - print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+split_tasks:
+ sequential:
+ - full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - print: "**** done radosbench 7-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ install upgrade on remaining node
+ restartin remaining osds
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_DEGRADED\)
+ - \(MDS_
+tasks:
+- install.upgrade:
+ osd.3:
+- ceph.restart:
+ daemons: [osd.3, osd.4, osd.5]
+ wait-for-up: true
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [mds.a]
+ wait-for-up: true
+ wait-for-healthy: false
+- install.upgrade:
+ client.0:
--- /dev/null
+../../../../releases/mimic.yaml
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../../../../tasks/thrashosds-health.yaml
\ No newline at end of file