+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- mon_bind_addrvec: false
- mon_bind_msgr2: false
- fs: xfs
- conf:
- global:
- ms dump corrupt message level: 0
- ms bind msgr2: false
- mds:
- debug ms: 1
- debug mds: 20
-roles:
-- - mon.a
- - mgr.x
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-- - mon.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
-- - mon.c
- - mgr.y
- - osd.8
- - osd.9
- - osd.10
- - osd.11
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - \(MON_DOWN\)
- - \(MGR_DOWN\)
- - slow request
-meta:
-- desc: install ceph/luminous latest
-tasks:
-- install:
- branch: luminous
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-diskprediction-cloud
- - ceph-mgr-rook
- - ceph-mgr-ssh
- extra_packages: ['librados2']
-- print: "**** done install luminous"
-- ceph:
-- print: "**** done ceph"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2,3,4,5
-tasks:
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [mon.a, mon.b]
- wait-for-healthy: false
- mon-health-to-clog: false
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7]
- wait-for-healthy: false
-- print: "**** done ceph.restart 1st 2/3s"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - split_tasks
-split_tasks:
- sequential:
- - thrashosds:
- disable_objectstore_tool_tests: true
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- aggressive_pg_num_changes: false
- - print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-split_tasks:
- sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-split_tasks:
- sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-split_tasks:
- sequential:
- - full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
- - print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-split_tasks:
- sequential:
- - full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-split_tasks:
- sequential:
- - full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-split_tasks:
- sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- install upgrade on remaining node
- restartin remaining osds
-overrides:
- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_
-tasks:
-- install.upgrade:
- mon.c:
-- ceph.restart:
- daemons: [mon.c, mgr.x, mgr.y]
- wait-for-up: true
- wait-for-healthy: false
-- ceph.restart:
- daemons: [osd.8, osd.9, osd.10, osd.11]
- wait-for-up: true
- wait-for-healthy: false
-- ceph.restart:
- daemons: [mds.a]
- wait-for-up: true
- wait-for-healthy: false
-- exec:
- mon.a:
- - ceph mon enable-msgr2
-- install.upgrade:
- client.0:
+++ /dev/null
-.qa/releases/nautilus.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-.qa/distros/supported-random-distro$
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ mon_bind_addrvec: false
+ mon_bind_msgr2: false
+ fs: xfs
+ conf:
+ global:
+ ms dump corrupt message level: 0
+ ms bind msgr2: false
+ mds:
+ debug ms: 1
+ debug mds: 20
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - mon.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - mon.c
+ - mgr.y
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+- - client.0
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-whitelist:
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ - slow request
+meta:
+- desc: install ceph/mimic latest
+tasks:
+- install:
+ branch: mimic
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-diskprediction-cloud
+ - ceph-mgr-rook
+ - ceph-mgr-ssh
+ extra_packages: ['librados2']
+- print: "**** done install luminous"
+- ceph:
+- print: "**** done ceph"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3,4,5
+tasks:
+- install.upgrade:
+ mon.a:
+ mon.b:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a, mon.b]
+ wait-for-healthy: false
+ mon-health-to-clog: false
+- ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7]
+ wait-for-healthy: false
+- print: "**** done ceph.restart 1st 2/3s"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - split_tasks
+split_tasks:
+ sequential:
+ - thrashosds:
+ disable_objectstore_tool_tests: true
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ aggressive_pg_num_changes: false
+ - print: "**** done thrashosds 3-thrash"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+ - print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+split_tasks:
+ sequential:
+ - full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+split_tasks:
+ sequential:
+ - full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ - print: "**** done radosbench 7-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+split_tasks:
+ sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ install upgrade on remaining node
+ restartin remaining osds
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_DEGRADED\)
+ - \(MDS_
+tasks:
+- install.upgrade:
+ mon.c:
+- ceph.restart:
+ daemons: [mon.c, mgr.x, mgr.y]
+ wait-for-up: true
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [osd.8, osd.9, osd.10, osd.11]
+ wait-for-up: true
+ wait-for-healthy: false
+- ceph.restart:
+ daemons: [mds.a]
+ wait-for-up: true
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - ceph mon enable-msgr2
+- install.upgrade:
+ client.0:
--- /dev/null
+.qa/releases/nautilus.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+.qa/distros/supported-random-distro$
\ No newline at end of file
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client 0,1,2 third node.
- Use xfs beneath the osds.
- CephFS tests running on client 2,3
-roles:
-- - mon.a
- - mgr.x
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-- - mon.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
-- - mon.c
- - osd.8
- - osd.9
- - osd.10
- - osd.11
-- - client.0
- - client.1
- - client.2
- - client.3
-overrides:
- ceph:
- mon_bind_msgr2: false
- mon_bind_addrvec: false
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - wrongly marked
- - \(POOL_APP_NOT_ENABLED\)
- - \(SLOW_OPS\)
- - overall HEALTH_
- - slow request
- - \(MON_MSGR2_NOT_ENABLED\)
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- mon:
- mon warn on osd down out interval zero: false
- osd:
- osd class load list: "*"
- osd class default list: "*"
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install ceph/luminous latest
- run workload and upgrade-sequence in parallel
- upgrade the client node
-tasks:
-- install:
- branch: luminous
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-diskprediction-cloud
- - ceph-mgr-rook
- - ceph-mgr-ssh
- extra_packages: ['librados2']
-- print: "**** done installing luminous"
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(FS_
- - \(MDS_
- - \(OSD_
- - \(MON_DOWN\)
- - \(CACHE_POOL_
- - \(POOL_
- - \(MGR_DOWN\)
- - \(PG_
- - \(SMALLER_PGP_NUM\)
- - Monitor daemon marked osd
- - Behind on trimming
- - Manager daemon
- conf:
- global:
- mon warn on pool no app: false
-- exec:
- osd.0:
- - ceph osd require-osd-release luminous
- - ceph osd set-require-min-compat-client luminous
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
- mon.c:
-- print: "**** done install.upgrade non-client hosts"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on client.0"
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd min pg log entries: 1
- osd max pg log entries: 2
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- rgw ragweed prepare
-workload:
- full_sequential:
- - sequential:
- - rgw:
- - client.1
- - ragweed:
- client.1:
- default-branch: ceph-master
- rgw_server: client.1
- stages: prepare
- - print: "**** done rgw ragweed prepare 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c, mgr.x]
- mon-health-to-clog: false
- wait-for-healthy: false
- - exec:
- mon.a:
- - ceph config set global mon_warn_on_msgr2_not_enabled false
- - ceph.healthy:
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mds.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, osd.0, osd.1, mds.a
- step two ordering: mon.b, mon.c, osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b, mgr.x]
- wait-for-healthy: true
- mon-health-to-clog: false
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
- mon-health-to-clog: false
- - exec:
- mon.a:
- - ceph config set global mon_warn_on_msgr2_not_enabled false
- - ceph.healthy:
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.4, osd.5, osd.6, osd.7]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.8, osd.9, osd.10, osd.11]
- wait-for-healthy: true
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- exec:
- mon.a:
- - ceph mon enable-msgr2
- - ceph config rm global mon_warn_on_msgr2_not_enabled
+++ /dev/null
-.qa/releases/nautilus.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 4-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshots
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1 byte to 1MB
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- librados C and C++ api tests
-overrides:
- ceph:
- log-whitelist:
- - reached quota
- - \(REQUEST_SLOW\)
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- clients:
- client.1:
- - rados/test.sh
- - print: "**** done rados/test.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload"
+++ /dev/null
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - sequential:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - rgw-final-workload
+++ /dev/null
-meta:
-- desc: |
- ragweed check for rgw
-rgw-final-workload:
- full_sequential:
- - ragweed:
- client.1:
- default-branch: ceph-master
- rgw_server: client.1
- stages: check
- - print: "**** done ragweed check 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-rgw-final-workload:
- full_sequential:
- - swift:
- client.1:
- force-branch: ceph-master
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-all-distro
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-../stress-split/1-ceph-install/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd min pg log entries: 1
- osd max pg log entries: 2
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
- disable_objectstore_tool_tests: true
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-stress-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- crush-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-all-distro
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 4
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- mon_bind_msgr2: false
- mon_bind_addrvec: false
- fs: xfs
- log-whitelist:
- - overall HEALTH_
- - \(MON_DOWN\)
- - \(MGR_DOWN\)
- - slow request
- - \(MON_MSGR2_NOT_ENABLED\)
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- mon warn on msgr2 not enabled: false
- mon:
- mon warn on osd down out interval zero: false
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-- - mon.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
-- - mon.c
-- - osd.8
- - osd.9
- - osd.10
- - osd.11
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: install ceph/luminous latest
-tasks:
-- install:
- branch: luminous
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-diskprediction-cloud
- - ceph-mgr-rook
- - ceph-mgr-ssh
- extra_packages: ['librados2']
-- print: "**** done install luminous"
-- ceph:
-- exec:
- osd.0:
- - ceph osd require-osd-release luminous
- - ceph osd set-require-min-compat-client luminous
-- print: "**** done ceph"
-overrides:
- ceph:
- conf:
- mon:
- mon warn on osd down out interval zero: false
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd min pg log entries: 1
- osd max pg log entries: 2
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on 2/3 of cluster
- restart : mons, osd.0-7
-tasks:
-- install.upgrade:
- mon.a:
- mon.b:
- mon.c:
-- print: "**** done install.upgrade of first 3 nodes"
-- ceph.restart:
- daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7]
- mon-health-to-clog: false
-- print: "**** done ceph.restart of all mons and 2/3 of osds"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.8:
- client.0:
-- ceph.restart:
- daemons: [mon.c, osd.8, osd.9, osd.10, osd.11]
- wait-for-healthy: false
- wait-for-osds-up: true
-- exec:
- osd.0:
- - ceph osd set pglog_hardlimit
- - ceph osd dump --format=json-pretty | grep "flags"
- - ceph config set global mon_warn_on_msgr2_not_enabled false
-- print: "**** try to set pglog_hardlimit again, should succeed"
-
+++ /dev/null
-tasks:
-- exec:
- mon.a:
- - ceph mon enable-msgr2
- - ceph config rm global mon_warn_on_msgr2_not_enabled
-- ceph.healthy:
+++ /dev/null
-.qa/releases/nautilus.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-overrides:
- ceph:
- conf:
- client:
- rbd default clone format: 1
-tasks:
-- workunit:
- branch: luminous
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
+++ /dev/null
-.qa/objectstore/filestore-xfs.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-all-distro
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - mon.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - mon.c
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ mon_bind_addrvec: false
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SLOW_OPS\)
+ - overall HEALTH_
+ - slow request
+ - \(MON_MSGR2_NOT_ENABLED\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+ osd:
+ osd class load list: "*"
+ osd class default list: "*"
+ fs: xfs
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install ceph/luminous latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ branch: nautilus
+- print: "**** done installing nautilus"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+- exec:
+ osd.0:
+ - ceph osd require-osd-release nautilus
+ - ceph osd set-require-min-compat-client nautilus
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
+- print: "**** done install.upgrade non-client hosts"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd min pg log entries: 1
+ osd max pg log entries: 2
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ rgw ragweed prepare
+workload:
+ full_sequential:
+ - sequential:
+ - rgw:
+ - client.1
+ - ragweed:
+ client.1:
+ default-branch: ceph-master
+ rgw_server: client.1
+ stages: prepare
+ - print: "**** done rgw ragweed prepare 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ mon-health-to-clog: false
+ wait-for-healthy: false
+ - exec:
+ mon.a:
+ - ceph config set global mon_warn_on_msgr2_not_enabled false
+ - ceph.healthy:
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b, mgr.x]
+ wait-for-healthy: true
+ mon-health-to-clog: false
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ mon-health-to-clog: false
+ - exec:
+ mon.a:
+ - ceph config set global mon_warn_on_msgr2_not_enabled false
+ - ceph.healthy:
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.4, osd.5, osd.6, osd.7]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.8, osd.9, osd.10, osd.11]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - ceph mon enable-msgr2
+ - ceph config rm global mon_warn_on_msgr2_not_enabled
--- /dev/null
+.qa/releases/nautilus.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 4-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshots
+tasks:
+ - rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ - print: "**** done rados 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1 byte to 1MB
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done rados/load-gen-mix.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ librados C and C++ api tests
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(REQUEST_SLOW\)
+tasks:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - print: "**** done mon_thrash 4-final-workload"
+ - workunit:
+ clients:
+ client.1:
+ - rados/test.sh
+ - print: "**** done rados/test.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+ - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done rbd/import_export.sh 4-final-workload"
--- /dev/null
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - sequential:
+ - rgw: [client.1]
+ - print: "**** done rgw 4-final-workload"
+ - rgw-final-workload
--- /dev/null
+meta:
+- desc: |
+ ragweed check for rgw
+rgw-final-workload:
+ full_sequential:
+ - ragweed:
+ client.1:
+ default-branch: ceph-master
+ rgw_server: client.1
+ stages: check
+ - print: "**** done ragweed check 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+rgw-final-workload:
+ full_sequential:
+ - swift:
+ client.1:
+ force-branch: ceph-master
+ rgw_server: client.1
+ - print: "**** done swift 4-final-workload"
--- /dev/null
+../stress-split/objectstore/
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-all-distro
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../stress-split/0-cluster/
\ No newline at end of file
--- /dev/null
+../stress-split/1-ceph-install/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd min pg log entries: 1
+ osd max pg log entries: 2
--- /dev/null
+../stress-split/2-partial-upgrade/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ chance_force_recovery: 0
+ aggressive_pg_num_changes: false
+ disable_objectstore_tool_tests: true
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+stress-tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+../stress-split/5-finish-upgrade.yaml
\ No newline at end of file
--- /dev/null
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+meta:
+- desc: |
+ randomized correctness test for rados operations on an erasure coded pool
+ using the jerasure plugin with k=3 and m=1
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
--- /dev/null
+../stress-split/objectstore/
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-all-distro
\ No newline at end of file
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ mon_bind_addrvec: false
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ - slow request
+ - \(MON_MSGR2_NOT_ENABLED\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon warn on msgr2 not enabled: false
+ mon:
+ mon warn on osd down out interval zero: false
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - mon.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - mon.c
+- - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+- - client.0
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: install ceph/nautilus latest
+tasks:
+- install:
+ branch: nautilus
+- print: "**** done install nautilus"
+- ceph:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release nautilus
+ - ceph osd set-require-min-compat-client nautilus
+- print: "**** done ceph"
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd min pg log entries: 1
+ osd max pg log entries: 2
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install upgrade ceph/-x on 2/3 of cluster
+ restart : mons, osd.0-7
+tasks:
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
+- print: "**** done install.upgrade of first 3 nodes"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7]
+ mon-health-to-clog: false
+- print: "**** done ceph.restart of all mons and 2/3 of osds"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+ aggressive_pg_num_changes: false
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done radosbench 7-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.8:
+ client.0:
+- ceph.restart:
+ daemons: [mon.c, osd.8, osd.9, osd.10, osd.11]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- exec:
+ osd.0:
+ - ceph osd set pglog_hardlimit
+ - ceph osd dump --format=json-pretty | grep "flags"
+ - ceph config set global mon_warn_on_msgr2_not_enabled false
+- print: "**** try to set pglog_hardlimit again, should succeed"
+
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - ceph mon enable-msgr2
+ - ceph config rm global mon_warn_on_msgr2_not_enabled
+- ceph.healthy:
--- /dev/null
+.qa/releases/nautilus.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default clone format: 1
+tasks:
+- workunit:
+ branch: luminous
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 9-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+.qa/objectstore/filestore-xfs.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-all-distro
\ No newline at end of file
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file