+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client 0,1,2 third node.
- Use xfs beneath the osds.
- CephFS tests running on client 2,3
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
- - client.2
- - client.3
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - failed to encode map
- - wrongly marked
- conf:
- mon:
- mon warn on legacy crush tunables: false
- fs: xfs
+++ /dev/null
-meta:
-- desc: |
- install ceph/infernalis latest
- run workload and upgrade-sequence in parallel
- upgrade the client node
-tasks:
-- install:
- branch: infernalis
-- print: "**** done installing infernalis"
-- ceph:
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade mon.a and mon.b"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on client.0"
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - workunit:
- branch: infernalis
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - workunit:
- branch: infernalis
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - workunit:
- branch: infernalis
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - workunit:
- branch: infernalis
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence:
- sequential:
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, osd.0, osd.1, mds.a
- step two ordering: mon.b, mon.c, osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
- #do we need to use "ceph osd crush tunables hammer" ?
- - exec:
- mon.b:
- - sudo ceph osd crush tunables hammer
- - print: "**** done ceph osd crush tunables hammer"
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
+++ /dev/null
-../../../../releases/jewel.yaml
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 5-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 5-final-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshots
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1 byte to 1MB
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- librados C and C++ api tests
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- clients:
- client.1:
- - rados/test.sh
- - print: "**** done rados/test.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - swift:
- client.1:
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes, using one of them as a client,
- with a separate client-only node.
- Use xfs beneath the osds.
- install ceph/infernalis v9.2.0 point version
- run workload and upgrade-sequence in parallel
- install ceph/infernalis latest version
- run workload and upgrade-sequence in parallel
- install ceph/-x version (jewel)
- run workload and upgrade-sequence in parallel
-overrides:
- ceph:
- log-whitelist:
- - reached quota
- - scrub
- - osd_map_max_advance
- - failed to encode
- - wrongly marked
- fs: xfs
- conf:
- mon:
- mon debug unsafe allow tier with nonempty snaps: true
- osd:
- osd map max advance: 1000
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
-- - client.1
-openstack:
-- volumes: # attached to each instance
- count: 3
- size: 30 # GB
-tasks:
-- print: "**** v9.2.0 about to install"
-- install:
- tag: v9.2.0
-- print: "**** done v9.2.0 install"
-- ceph:
- fs: xfs
-- print: "**** done ceph xfs"
-- sequential:
- - workload
-- print: "**** done workload v9.2.0"
-- install.upgrade:
- mon.a:
- branch: infernalis
- mon.b:
- branch: infernalis
- # Note that client.a IS NOT upgraded at this point
- #client.1:
- #branch: hammer
-- parallel:
- - workload_infernalis
- - upgrade-sequence_infernalis
-- print: "**** done parallel infernalis branch"
-- install.upgrade:
- client.1:
- branch: infernalis
-- print: "**** done branch: -infernalis install.upgrade on client.1"
-- install.upgrade:
- mon.a:
- #branch: infernalis
- mon.b:
- #branch: infernalis
-- print: "**** done branch: -x install.upgrade on mon.a and mon.b"
-- parallel:
- - workload_x
- - upgrade-sequence_x
-- print: "**** done parallel -x branch"
-# Run test.sh on the -x upgraded cluster
-- install.upgrade:
- client.1:
-- workunit:
- clients:
- client.1:
- - rados/test.sh
- - cls
-- print: "**** done final test on -x cluster"
-#######################
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-workload_infernalis:
- full_sequential:
- - workunit:
- branch: infernalis
- clients:
- client.1:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload_infernalis"
- - sequential:
- - rgw: [client.0]
- - print: "**** done rgw workload_infernalis"
- - s3tests:
- client.0:
- force-branch: ceph-infernalis
- rgw_server: client.0
- - print: "**** done s3tests workload_infernalis"
-upgrade-sequence_infernalis:
- sequential:
- - print: "**** done branch: infernalis install.upgrade"
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all hammer branch mds/osd/mon"
-workload_x:
- sequential:
- - workunit:
- branch: infernalis
- clients:
- client.1:
- - rados/test-upgrade-from-9.2.sh
- - cls
- - print: "**** done rados/test.sh & cls workload_x NOT upgraded client"
- - workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload_x upgraded client"
- - rgw: [client.1]
- - print: "**** done rgw workload_x"
- - s3tests:
- client.1:
- force-branch: ceph-infernalis
- rgw_server: client.1
- - print: "**** done s3tests workload_x"
-upgrade-sequence_x:
- sequential:
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all -x branch mds/osd/mon"
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-arch: x86_64
+++ /dev/null
-../stress-split/1-infernalis-install/
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance of increasing the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../stress-split/4-mon/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/6-next-mon/
\ No newline at end of file
+++ /dev/null
-../stress-split/8-next-mon/
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- ruleset-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-../stress-split/1-infernalis-install/
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../stress-split/4-mon/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/6-next-mon/
\ No newline at end of file
+++ /dev/null
-../stress-split/8-next-mon/
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- ruleset-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- fs: xfs
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-meta:
-- desc: install ceph/infernalis latest
-tasks:
-- install:
- branch: infernalis
-- print: "**** done install infernalis"
-- ceph:
-- print: "**** done ceph"
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2,3,4,5
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- restart mon.a so it is upgraded to -x
-tasks:
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.a"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-tasks:
-- workunit:
- branch: infernalis
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-tasks:
-- workunit:
- branch: infernalis
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-meta:
-- desc: |
- restart mon.b so it is upgraded to -x
-tasks:
-- ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.b 6-next-mon"
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-tasks:
-- workunit:
- branch: infernalis
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- restart mon.c so it is upgraded to -x
- as all mon were upgrated, expected ceph cluster reach quorum
-tasks:
-- ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.c 8-next-mon"
-- ceph.wait_for_mon_quorum: [a, b, c]
-- print: "**** done wait_for_mon_quorum 8-next-mon"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- branch: infernalis
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
- default_idle_timeout: 300
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes, using one of them as a client,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- log-whitelist:
- - scrub
- - scrub mismatch
- - ScrubResult
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map
- - soft lockup
- - detected stalls on CPUs
- fs: xfs
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
-- - client.1
- - client.2
+++ /dev/null
-meta:
-- desc: |
- install ceph/hammer latest release
- run workload and upgrade-sequence in parallel
-tasks:
-- install:
- branch: hammer
-- print: "**** done latest hammer install"
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel hammer"
+++ /dev/null
-meta:
-- desc: |
- install ceph/infernalis v9.2.0
- run workload and upgrade-sequence in parallel
-tasks:
-- install:
- tag: v9.2.0
-- print: "**** done v9.2.0 install"
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel v9.2.0"
+++ /dev/null
-meta:
-- desc: |
- install ceph/infernalis v9.2.1
- run workload and upgrade-sequence in parallel
-tasks:
-- install:
- tag: v9.2.1
-- print: "**** done v9.2.1 install"
-- ceph:
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel v9.2.1"
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.0 before running workunit
-workload:
- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-workload:
- sequential:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- add test details here
- run s3tests
-workload:
- sequential:
- - rgw: [client.1]
- - s3tests:
- client.0:
- force-branch: hammer
- rgw_server: client.1
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
-workload:
- sequential:
- - rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- ordering: mon, mds, osd
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- mon.b:
- - print: "**** done install.upgrade infernalis"
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- ordering: osd, mon, mds
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- mon.b:
- - print: "**** done install.upgrade infernalis"
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive monitors
-tasks:
-- mon_thrash:
- revive_delay: 20
- thrash_delay: 1
-- print: "**** done mon_thrash 4-workload"
-- ceph-fuse:
-- print: "**** done ceph-fuse 4-workload"
-- workunit:
- clients:
- client.0:
- - suites/dbench.sh
-- print: "**** done suites/dbench.sh 4-workload"
-
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osds
- increasing the number of pgs at random times
- run cephfs stress test
-tasks:
-- sequential:
- - thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- - print: "**** done thrashosds 4-workload"
- - workunit:
- clients:
- client.0:
- - suites/iogen.sh
- - print: "**** done suites/iogen.sh 4-workload"
-
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
-tasks:
-- sequential:
- - rados:
- clients: [client.0]
- ops: 2000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-tasks:
-- sequential:
- - rgw: [client.2]
- - print: "**** done rgw: [client.2] 4-workload"
- - s3tests:
- client.2:
- force-branch: hammer
- rgw_server: client.2
- - print: "**** done s3tests 4-workload"
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - reached quota
- - scrub
- - osd_map_max_advance
- fs: xfs
- conf:
- mon:
- mon debug unsafe allow tier with nonempty snaps: true
- osd:
- osd map max advance: 1000
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
-- - client.1
-tasks:
-- print: "**** v9.2.0 about to install"
-- install:
- tag: v9.2.0
-- print: "**** done v9.2.0 install"
-- ceph:
- fs: xfs
-- print: "**** done ceph xfs"
-- sequential:
- - workload
-- print: "**** done workload v9.2.0"
-
-- parallel:
- - workload1
- - upgrade-sequence1
-- print: "**** done parallel v9.2.1"
-
-###- parallel:
-### - workloadX
-### - upgrade-sequenceX
-###- print: "**** done parallel v9.2.X"
-
-- parallel:
- - workload_infernalis
- - upgrade-sequence_infernalis
-- print: "**** done parallel infernalis branch"
-#######################
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh workload"
-
-workload1:
- sequential:
- - workunit:
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh workload1"
- - workunit:
- clients:
- client.0:
- - rados/test.sh
- - cls
- - print: "**** done rados/test.sh & cls workload1"
- - workunit:
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh workload1"
-upgrade-sequence1:
- sequential:
- - install.upgrade:
- mon.a:
- tag: v9.2.1
- mon.b:
- tag: v9.2.1
- client.1:
- tag: v9.2.1
- - print: "**** done v9.2.1 install.upgrade"
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 30
- - print: "**** done ceph.restart all mon/mds/osd v0.94.1"
-
-
-### =====
-###workloadX:
-### sequential:
-### - workunit:
-### clients:
-### client.0:
-### - rados/load-gen-big.sh
-### - print: "**** done rados/load-gen-big.sh workload1"
-### - workunit:
-### clients:
-### client.0:
-### - rados/test.sh
-### - cls
-### - print: "**** done rados/test.sh & cls workload1"
-### - workunit:
-### clients:
-### client.0:
-### - rbd/test_librbd.sh
-### - print: "**** done rbd/test_librbd.sh workload1"
-###upgrade-sequenceX:
-### sequential:
-### - install.upgrade:
-### mon.a:
-### tag: v9.2.X
-### mon.b:
-### tag: v9.2.X
-### client.1:
-### tag: v9.2.X
-### - print: "**** done v9.2.X install.upgrade"
-### - ceph.restart: [mon.a]
-### - sleep:
-### duration: 60
-### - ceph.restart: [mon.b]
-### - sleep:
-### duration: 60
-### - ceph.restart: [mon.c]
-### - sleep:
-### duration: 60
-### - ceph.restart: [mds.a]
-### - sleep:
-### duration: 60
-### - ceph.restart: [osd.0]
-### - sleep:
-### duration: 30
-### - ceph.restart: [osd.1]
-### - sleep:
-### duration: 30
-### - ceph.restart: [osd.2]
-### - sleep:
-### duration: 30
-### - ceph.restart: [osd.3]
-### - sleep:
-### duration: 30
-### - ceph.restart: [osd.4]
-### - sleep:
-### duration: 30
-### - ceph.restart: [osd.5]
-### - sleep:
-### duration: 30
-### - print: "**** done ceph.restart all mon/mds/osd v0.94.X"
-
-workload_infernalis:
- sequential:
- - rgw: [client.0]
- - print: "**** done rgw workload_infernalis"
- - s3tests:
- client.0:
- # for s3tests ceph-infernalis should be used
- # for new similar tests should check with Yehuda about the branch name
- force-branch: ceph-infernalis
- rgw_server: client.0
- - print: "**** done s3tests workload_infernalis"
-upgrade-sequence_infernalis:
- sequential:
- - install.upgrade:
- mon.a:
- branch: infernalis
- mon.b:
- branch: infernalis
- client.1:
- branch: infernalis
- - print: "**** done branch: infernalis install.upgrade"
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all infernalis current branch mds/osd/mon"