+++ /dev/null
-roles:
-- [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, client.0]
-tasks:
-- install:
- branch: firefly
-- ceph:
- fs: xfs
- log-whitelist:
- - reached quota
+++ /dev/null
-tasks:
-- workunit:
- branch: firefly
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-tasks:
-- ceph.stop: [mon.a, mon.b, mon.c]
-- ceph.stop: [osd.0, osd.1, osd.2]
-- install.upgrade:
- mon.a:
-- ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: false
- wait-for-osds-up: false
-- exec:
- mon.a:
- - ceph osd down 0 1 2
-- ceph.restart: [osd.0, osd.1, osd.2]
-- sleep:
- duration: 10
+++ /dev/null
-../../../../releases/infernalis.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.0:
- - rados/test.sh
+++ /dev/null
-Verify that we can upgrade straight from firefly to x without ever installing
-hammer.
-
-This would be an offline upgrade, though.. all osds have to be restarted!
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
- - client.2
- - client.3
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - failed to encode map
- - wrongly marked me down
- - soft lockup
- - detected stalls on CPUs
- conf:
- mon:
- mon warn on legacy crush tunables: false
+++ /dev/null
-../../../../timezone/eastern.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
-- print: "**** done installing hammer"
-- ceph:
- fs: xfs
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade mon.a and mon.b"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-upgrade-sequence:
- sequential:
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
- - print: "**** done ceph.restart all"
+++ /dev/null
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
-#do we need to use "ceph osd crush tunables hammer" ?
- - exec:
- mon.b:
- - sudo ceph osd crush tunables hammer
- - print: "**** done ceph osd crush tunables hammer"
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
+++ /dev/null
-../../../../releases/infernalis.yaml
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 5-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 5-final-workload"
+++ /dev/null
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- clients:
- client.1:
- - rados/test.sh
- - print: "**** done rados/test.sh 4-final-workload"
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload"
+++ /dev/null
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - swift:
- client.1:
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster
\ No newline at end of file
+++ /dev/null
-../../../../timezone/eastern.yaml
\ No newline at end of file
+++ /dev/null
-arch: x86_64
+++ /dev/null
-../stress-split/1-hammer-install
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- fs: xfs
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../stress-split/4-mon
\ No newline at end of file
+++ /dev/null
-../../../../../erasure-code/ec-rados-default.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/6-next-mon
\ No newline at end of file
+++ /dev/null
-../stress-split/8-next-mon
\ No newline at end of file
+++ /dev/null
-../../../../../erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster
\ No newline at end of file
+++ /dev/null
-../../../../timezone/eastern.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/1-hammer-install/
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- fs: xfs
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
- - soft lockup
- - detected stalls on CPUs
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../stress-split/4-mon
\ No newline at end of file
+++ /dev/null
-../../../../../erasure-code/ec-rados-default.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/6-next-mon
\ No newline at end of file
+++ /dev/null
-../stress-split/8-next-mon
\ No newline at end of file
+++ /dev/null
-#
-# The shec plugin cannot be used because some OSD are not upgraded
-# yet and would crash.
-#
-tasks:
-- exec:
- mon.a:
- - |-
- sudo ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by"
+++ /dev/null
-../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/distros
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 40 # GB
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-../../../../timezone/eastern.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
-- print: "**** done install hammer"
-- ceph:
- fs: xfs
-- print: "**** done ceph"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5]
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
- - failed to encode map e
- - soft lockup
- - detected stalls on CPUs
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.a"
+++ /dev/null
-tasks:
-- workunit:
- branch: hammer
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-tasks:
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.b 6-next-mon"
+++ /dev/null
-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-tasks:
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-tasks:
-- ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done ceph.restart mon.c 8-next-mon"
-- ceph.wait_for_mon_quorum: [a, b, c]
-- print: "**** done wait_for_mon_quorum 8-next-mon"
+++ /dev/null
-tasks:
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-tasks:
-- rgw:
- client.0:
- default_idle_timeout: 300
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-../../../../distros/supported
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mds.a
- - osd.0
- - osd.1
-- - osd.2
- - osd.3
-- - client.0
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - failed to encode map
- - wrongly marked me down
- - soft lockup
- - detected stalls on CPUs
- conf:
- mon:
- mon warn on legacy crush tunables: false
+++ /dev/null
-tasks:
-- install:
- branch: hammer
-- print: "**** done install hammer"
-- ceph:
- fs: xfs
-- print: "**** done ceph"
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd erasure-code-profile set teuthologyprofile ruleset-failure-domain=osd k=2 m=1
- - ceph osd pool create base-pool 4 4 erasure teuthologyprofile
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base-pool 4
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache-pool 4
- - ceph osd tier add base-pool cache-pool
- - ceph osd tier cache-mode cache-pool writeback
- - ceph osd tier set-overlay base-pool cache-pool
- - ceph osd pool set cache-pool hit_set_type bloom
- - ceph osd pool set cache-pool hit_set_count 8
- - ceph osd pool set cache-pool hit_set_period 5
+++ /dev/null
-tasks:
-- parallel:
- - workload-when-upgrading
- - upgrade-sequence
-- print: "**** done upgrade"
-
-workload-when-upgrading:
- sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- pools: [base-pool]
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados when upgrading"
-
-upgrade-sequence:
- sequential:
- - upgrade-first-half
- - flip-but-fail
- - upgrade-second-half
-
-upgrade-first-half:
- sequential:
- - install.upgrade:
- mon.a:
- - print: "**** done install.upgrade mon.{a,b,c} and osd.{0,1}"
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
-
-upgrade-second-half:
- sequential:
- - install.upgrade:
- osd.2:
- - print: "**** done install.upgrade osd.{2,3}"
- - ceph.restart:
- daemons: [osd.2]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
-
-flip-but-fail:
- sequential:
- - exec:
- mon.a:
- - |-
- ceph osd set sortbitwise 2>&1 | grep "not all up OSDs have OSD_BITWISE_HOBJ_SORT feature"
- - print: "**** done flip-but-fail"
+++ /dev/null
-tasks:
-- parallel:
- - workload-2
- - flip-and-success
-
-workload-2:
- sequential:
- - rados:
- clients: [client.0]
- ops: 1000
- objects: 50
- pools: [base-pool]
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados after upgrading"
-
-flip-and-success:
- sequential:
- - exec:
- client.0:
- - ceph osd set sortbitwise
- - ceph osd pool set cache-pool use_gmt_hitset true
- - print: "**** done flip-and-success"
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-../../../../distros/supported/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - failed to encode map
- conf:
- mon:
- mon warn on legacy crush tunables: false
+++ /dev/null
-#
-# Test the expected behavior of the
-#
-# CEPH_FEATURE_HAMMER_0_94_4
-#
-# feature that forbids a cluster with a mix of
-# OSD < v0.94.4 and OSD >= v0.94.4
-#
-roles:
-- - mon.a
- - osd.0
- - osd.1
-- - osd.2
-tasks:
-- print: "**** Install version lower than v0.94.4"
-- install:
- tag: v0.94.3
-- ceph:
- fs: xfs
-
-- print: "*** Upgrade the target that runs osd.0 and osd.1 to -x while the target that runs osd.2 stays v0.94.3"
-- install.upgrade:
- osd.0:
-
-- print: "*** Restart the mon.a so that it is post-hammer v0.94.4 and implements the CEPH_FEATURE_HAMMER_0_94_4 feature"
-- ceph.restart:
- daemons: [mon.a]
-
-- print: "*** Verify that osd.0 cannot restart because osd.1 and osd.2 are still < v0.94.4"
-- ceph.restart:
- daemons: [osd.0]
- wait-for-healthy: false
-- exec:
- osd.0:
- - |-
- set -x
- success=false
- for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
- if ceph daemon osd.0 log flush ; then
- if grep "one or more pre-v0.94.4 hammer" /var/log/ceph/ceph-osd.0.log ; then
- success=true
- break
- fi
- fi
- sleep $delay
- done
- $success || exit 1
-
-- print: "*** Stop all OSDs and restart osd.0 and osd.1 which are >= v0.94.4"
-- ceph.stop:
- daemons: [osd.0, osd.1, osd.2]
-- exec:
- mon.a:
- - |-
- set -x
- ceph osd down osd.0
- ceph osd down osd.1
- ceph osd down osd.2
-- ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: false
-- exec:
- mon.a:
- - |-
- set -x
- success=false
- for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
- if ceph osd dump | grep 'osd.1 up' && ceph osd dump | grep 'osd.0 up' ; then
- success=true
- break
- fi
- ceph osd dump
- sleep $delay
- done
- $success || exit 1
- ceph osd dump | grep 'osd.2 down' || exit 1
-
-- print: "*** Verify that osd.2 cannot restart because it is < v0.94.4 and all other OSDs are >= v0.94.4"
-- ceph.restart:
- daemons: [osd.2]
- wait-for-healthy: false
-- exec:
- mon.a:
- - |-
- set -x
- success=false
- for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
- ceph daemon mon.a log flush
- if grep "disallowing boot of pre-hammer v0.94.4 OSD" /var/log/ceph/*.log ; then
- success=true
- break
- fi
- sleep $delay
- ceph osd dump
- done
- $success || exit 1
-
-- print: "*** Upgrade the target that runs osd.2 to -x and verify the cluster is back to being healthy"
-- install.upgrade:
- osd.2:
-- ceph.restart:
- daemons: [osd.2]