Merger hammer-x (jewel branch) and jewel-x (master branch).
Signed-off-by: Sage Weil <sage@redhat.com>
--- /dev/null
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
--- /dev/null
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
workload:
- sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- #- rados/test-upgrade-v9.0.1.sh
- - cls
- - print: "**** done 2-workload/rados_api.yaml"
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
workload:
- sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh"
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
-#workload:
-# sequential:
-# - workunit:
-# branch: hammer
-# clients:
-# client.0:
-# - rbd/test_librbd.sh
-# - print: "**** done rbd/test_librbd.sh"
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
-#workload:
-# sequential:
-# - workunit:
-# branch: hammer
-# clients:
-# client.0:
-# - rbd/test_librbd_python.sh
-# - print: "**** done rbd/test_librbd_python.sh"
+workload:
+ full_sequential:
+ - workunit:
+ branch: hammer
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
upgrade-sequence:
sequential:
- - install.upgrade:
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart do not wait for healthy"
+ - exec:
mon.a:
- branch: jewel
- mon.b:
- branch: jewel
- - print: "**** done install.upgrade mon.a & mon.b to branch: jewel"
- - ceph.restart: [osd.0, osd.1, osd.2, osd.3]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a]
- - print: "**** done ceph.restart all"
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - print: "**** done ceph.healthy"
+++ /dev/null
-upgrade-sequence:
- sequential:
- - install.upgrade:
- mon.a:
- branch: jewel
- - print: "**** done install.upgrade mon.a to branch: jewel"
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart [mon.a] [osd.0, osd.1] [mds.a]"
- - exec:
- mon.b:
- # is this command valid?
- - ceph osd crush tunables hammer
- - install.upgrade:
- mon.b:
- branch: jewel
- - print: "**** done install.upgrade mon.b to branch: jewel"
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - print: "**** done ceph.restart [osd.2, osd.3] & [mon.b, mon.c]"
--- /dev/null
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+#do we need to use "ceph osd crush tunables hammer" ?
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables hammer
+ - print: "**** done ceph osd crush tunables hammer"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: false
+ - sleep:
+ duration: 30
+ - exec:
+ osd.0:
+ - sleep 300 # http://tracker.ceph.com/issues/17808
+ - ceph osd set require_jewel_osds
+ - ceph.healthy:
+ - sleep:
+ duration: 60
+++ /dev/null
-tasks:
- - parallel:
- - workload2
- - upgrade-sequence2
- - print: "**** done parallel workload2 and upgrade-sequence2"
- - install.upgrade:
- client.0:
- - print: "**** done install.upgrade client.0 to the version from teuthology-suite arg"
--- /dev/null
+../../../../releases/jewel.yaml
\ No newline at end of file
--- /dev/null
+tasks:
+ - parallel:
+ - workload2
+ - upgrade-sequence2
+ - print: "**** done parallel workload2 and upgrade-sequence2"
+ - install.upgrade:
+ client.0:
+ - print: "**** done install.upgrade client.0 to the version from teuthology-suite arg"
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-workload2:
- sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rados/test-upgrade-v11.0.0.sh
- - cls
- - print: "**** done rados/test-upgrade-v11.0.0.sh and cls"
+++ /dev/null
-workload2:
- sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2"
+++ /dev/null
-#workload2:
-# sequential:
-# - workunit:
-# branch: hammer
-# clients:
-# client.0:
-# - rbd/test_librbd.sh
-# - print: "**** done rbd/test_librbd.sh 2"
+++ /dev/null
-#workload2:
-# sequential:
-# - workunit:
-# branch: hammer
-# clients:
-# client.0:
-# - rbd/test_librbd_python.sh
-# - print: "**** done rbd/test_librbd_python.sh 2"
+++ /dev/null
-upgrade-sequence2:
- sequential:
- - install.upgrade:
- mon.a:
- mon.b:
- - print: "**** done install.upgrade mon.a and mon.b"
- - ceph.restart: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
- - print: "**** done ceph.restart all"
- - exec:
- mon.b:
- - ceph osd set require_jewel_osds
- - print: "**** done exec 'ceph osd set require_jewel_osds'"
+++ /dev/null
-upgrade-sequence2:
- sequential:
- - install.upgrade:
- mon.a:
- - print: "**** done install.upgrade mon.a to the version from teuthology-suite arg"
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
- - exec:
- mon.b:
- - ceph osd crush tunables jewel
- - print: "**** done exec 'ceph osd crush tunables jewel'"
- - install.upgrade:
- mon.b:
- - print: "**** done install.upgrade mon.b to the version from teuthology-suite arg"
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- #wait-for-healthy: true
- - sleep:
- duration: 60
- - exec:
- mon.b:
- - ceph osd set require_jewel_osds
- - print: "**** done exec 'ceph osd set require_jewel_osds'"
--- /dev/null
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.2 before running workunit
+workload:
+ full_sequential:
+ - sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 2-workload"
+ - workunit:
+ clients:
+ client.2:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
--- /dev/null
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - cls
+ - print: "**** done cls 2-workload"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done rados/load-gen-big.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ - print: "**** done rbd/test_librbd.sh 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: jewel
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../../../../../erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
\ No newline at end of file
+++ /dev/null
-../../../../../erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
\ No newline at end of file
+++ /dev/null
-tasks:
-- rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
+++ /dev/null
-tasks:
- - sequential:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - workunit:
- branch: jewel
- clients:
- client.1:
- - rados/test-upgrade-v11.0.0.sh
- - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload"
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
-- print: "**** done 7-final-workload/rbd_cls.yaml"
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh from 7-final-workload"
+++ /dev/null
-tasks:
-- rgw: [client.1]
-- s3tests:
- client.1:
- rgw_server: client.1
-- print: "**** done rgw_server from 7-final-workload"
-overrides:
- ceph:
- conf:
- client:
- rgw lc debug interval: 10
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mds.a, osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - exec:
+ mon.a:
+ - ceph osd set require_kraken_osds
+ - ceph.restart:
+ daemons: [osd.0]
+ - print: "**** done ceph.restart all"
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - print: "**** running mixed versions of osds and mons"
+ - exec:
+ mon.b:
+ - sudo ceph osd crush tunables jewel
+ - print: "**** done ceph osd crush tunables jewel"
+ - ceph.restart:
+ daemons: [mon.b, mon.c]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.2, osd.3]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - exec:
+ mon.a:
+ - ceph osd set require_kraken_osds
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 60
--- /dev/null
+../../../../releases/kraken.yaml
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run a cephfs stress test
+ mount ceph-fuse on client.3 before running workunit
+tasks:
+- sequential:
+ - ceph-fuse:
+ - print: "**** done ceph-fuse 5-final-workload"
+ - workunit:
+ clients:
+ client.3:
+ - suites/blogbench.sh
+ - print: "**** done suites/blogbench.sh 5-final-workload"
--- /dev/null
+tasks:
+- rados:
+ clients: [client.1]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
--- /dev/null
+tasks:
+ - workunit:
+ clients:
+ client.1:
+ - rados/load-gen-mix.sh
+ - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
--- /dev/null
+tasks:
+ - sequential:
+ - mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ - workunit:
+ branch: jewel
+ clients:
+ client.1:
+ - rados/test-upgrade-v11.0.0.sh
+ - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload"
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - cls/test_cls_rbd.sh
+- print: "**** done 7-final-workload/rbd_cls.yaml"
--- /dev/null
+tasks:
+- workunit:
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh from 7-final-workload"
--- /dev/null
+tasks:
+- rgw: [client.1]
+- s3tests:
+ client.1:
+ rgw_server: client.1
+- print: "**** done rgw_server from 7-final-workload"
+overrides:
+ ceph:
+ conf:
+ client:
+ rgw lc debug interval: 10