+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
-overrides:
- ceph:
- log-whitelist:
- - failed to encode map
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install hammer"
-upgrade_workload:
- sequential:
- - install.upgrade:
- exclude_packages: ['ceph-test-dbg']
- client.0:
- - print: "**** done install.upgrade client.0"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- rbd default features: 13
-tasks:
-- exec:
- client.0:
- - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
-- sequential:
- - upgrade_workload
-- ceph:
-- print: "**** done ceph"
-- exec:
- client.0:
- - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
- - "rm -rf $TESTDIR/ceph_test_librbd_api"
-- print: "**** done reverting to hammer ceph_test_librbd_api"
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd_api.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd/test_librbd_api.sh"
+++ /dev/null
-tasks:
-- sequential:
- - upgrade_workload
-- ceph:
-- print: "**** done ceph"
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
-- print: "**** done rbd/import_export.sh"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - osd.0
- - osd.1
- - osd.2
- - client.0
-- - client.1
-overrides:
- ceph:
- log-whitelist:
- - failed to encode map
- fs: xfs
- conf:
- client:
- rbd default features: 1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install hammer"
-- install.upgrade:
- exclude_packages: ['ceph-test-dbg']
- client.1:
-- print: "**** done install.upgrade client.1"
-- ceph:
-- print: "**** done ceph"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/notify_master.sh
- client.1:
- - rbd/notify_slave.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd: old librbd -> new librbd"
-- workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/notify_slave.sh
- client.1:
- - rbd/notify_master.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd: new librbd -> old librbd"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - osd.0
- - osd.1
- - osd.2
-- - client.0
-overrides:
- ceph:
- log-whitelist:
- - failed to encode map
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: jewel
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install jewel"
-upgrade_workload:
- sequential:
- - install.upgrade:
- exclude_packages: ['ceph-test', 'ceph-test-dbg']
- client.0:
- - print: "**** done install.upgrade to -x on client.0"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - "cp $(which ceph_test_librbd_api) $TESTDIR/ceph_test_librbd_api"
-- sequential:
- - upgrade_workload
-- ceph:
-- print: "**** done ceph"
-- exec:
- client.0:
- - "cp --force $TESTDIR/ceph_test_librbd_api $(which ceph_test_librbd_api)"
- - "rm -rf $TESTDIR/ceph_test_librbd_api"
-- print: "**** done reverting to jewel ceph_test_librbd_api"
-- workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/test_librbd_api.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd/test_librbd_api.sh"
+++ /dev/null
-tasks:
-- sequential:
- - upgrade_workload
-- ceph:
-- print: "**** done ceph"
-- workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --image-feature layering,exclusive-lock,object-map
-- print: "**** done rbd/import_export.sh"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - osd.0
- - osd.1
- - osd.2
- - client.0
-- - client.1
-overrides:
- ceph:
- log-whitelist:
- - failed to encode map
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: jewel
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install jewel"
-- install.upgrade:
- exclude_packages: ['ceph-test', 'ceph-test-dbg']
- client.1:
-- print: "**** done install.upgrade to -x on client.0"
-- ceph:
-- print: "**** done ceph task"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- rbd default features: 61
-
+++ /dev/null
-overrides:
- ceph:
- conf:
- client:
- rbd default features: 1
-
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/notify_master.sh
- client.1:
- - rbd/notify_slave.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd: old librbd -> new librbd"
-- workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/notify_slave.sh
- client.1:
- - rbd/notify_master.sh
- env:
- RBD_FEATURES: "13"
-- print: "**** done rbd: new librbd -> old librbd"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- mon debug unsafe allow tier with nonempty snaps: true
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - reached quota
-roles:
-- - mon.a
- - osd.0
- - osd.1
- - mgr.x
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done hammer"
-- ceph:
- fs: xfs
- skip_mgr_daemons: true
- add_osds_to_crush: true
-- install.upgrade:
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- osd.0:
- branch: jewel
- osd.2:
- branch: jewel
-- print: "*** client.0 upgraded packages to jewel"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-workload:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart do not wait for healthy"
- - exec:
- mon.a:
- - sleep 300 # http://tracker.ceph.com/issues/17808
- - ceph osd set sortbitwise
- - ceph osd set require_jewel_osds
- - ceph.healthy:
- - print: "**** done ceph.healthy"
+++ /dev/null
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: false
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
-#do we need to use "ceph osd crush tunables hammer" ?
- - exec:
- mon.b:
- - sudo ceph osd crush tunables hammer
- - print: "**** done ceph osd crush tunables hammer"
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: false
- - sleep:
- duration: 30
- - exec:
- osd.0:
- - sleep 300 # http://tracker.ceph.com/issues/17808
- - ceph osd set sortbitwise
- - ceph osd set require_jewel_osds
- - ceph.healthy:
- - sleep:
- duration: 60
+++ /dev/null
-tasks:
-- install.upgrade:
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- client.0:
- branch: jewel
+++ /dev/null
-.qa/releases/jewel.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
- - install.upgrade:
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- client.0:
- branch: jewel
- - print: "**** done install.upgrade client.0 to jewel"
- - install.upgrade:
- osd.0:
- osd.2:
- - print: "**** done install.upgrade daemons to x"
- - parallel:
- - workload2
- - upgrade-sequence2
- - print: "**** done parallel workload2 and upgrade-sequence2"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload2:
- full_sequential:
- - rados:
- erasure_code_profile:
- name: teuthologyprofile2
- k: 2
- m: 1
- crush-failure-domain: osd
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload2:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload2:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload2:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload2:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence2:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c, osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, mon.b, mon.c, osd.0, osd.1
- step two ordering: osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence2:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
- - exec:
- mon.b:
- - sudo ceph osd crush tunables jewel
- - print: "**** done ceph osd crush tunables jewel"
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
+++ /dev/null
-.qa/releases/luminous.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done 7-final-workload/rados-snaps-few-objects.yaml"
+++ /dev/null
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done 7-final-workload/rados_loadgenmix.yaml"
+++ /dev/null
-tasks:
- - sequential:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - workunit:
- branch: jewel
- clients:
- client.1:
- - rados/test-upgrade-v11.0.0.sh
- - print: "**** done rados/test-upgrade-v11.0.0.sh from 7-final-workload"
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
-- print: "**** done 7-final-workload/rbd_cls.yaml"
+++ /dev/null
-tasks:
-- workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh from 7-final-workload"
+++ /dev/null
-tasks:
-- rgw: [client.1]
-- s3tests:
- client.1:
- rgw_server: client.1
-- print: "**** done rgw_server from 7-final-workload"
-overrides:
- ceph:
- conf:
- client:
- rgw lc debug interval: 10
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../../jewel-x/stress-split/0-cluster
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
-- print: '**** done hammer'
-- ceph:
- fs: xfs
- skip_mgr_daemons: true
- add_osds_to_crush: true
-- install.upgrade:
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
- osd.0:
- branch: jewel
- osd.3:
- branch: jewel
-- print: '*** client.0 upgraded packages to jewel'
-- parallel:
- - workload-h-j
- - upgrade-sequence-h-j
-- print: '**** done parallel'
-- install.upgrade:
- client.0:
- branch: jewel
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
-- exec:
- osd.0:
- - ceph osd set sortbitwise
- - ceph osd set require_jewel_osds
- - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ;
- done
-- print: '**** done install.upgrade client.0 to jewel'
-upgrade-sequence-h-j:
- sequential:
- - ceph.restart:
- daemons:
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - osd.4
- - osd.5
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons:
- - mon.a
- - mon.b
- - mon.c
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: '**** done ceph.restart do not wait for healthy'
- - exec:
- mon.a:
- - sleep 300
- - ceph osd set require_jewel_osds
- - ceph.healthy: null
- - print: '**** done ceph.healthy'
-workload-h-j:
- full_sequential:
- - workunit:
- branch: hammer
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
- - workunit:
- branch: hammer
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-../../jewel-x/stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-../../jewel-x/stress-split/3-thrash/
\ No newline at end of file
+++ /dev/null
-../../jewel-x/stress-split/4-workload
\ No newline at end of file
+++ /dev/null
-../../jewel-x/stress-split/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-.qa/releases/luminous.yaml
\ No newline at end of file
+++ /dev/null
-../../jewel-x/stress-split/7-final-workload/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- mon:
- mon warn on legacy crush tunables: false
- log-whitelist:
- - but it is still running
- - wrongly marked me down
-roles:
-- - mon.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
- branch: hammer
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
-- print: '**** done hammer'
-- ceph:
- fs: xfs
- skip_mgr_daemons: true
- add_osds_to_crush: true
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd erasure-code-profile set t-profile crush-failure-domain=osd k=2 m=1
- - ceph osd pool create base-pool 4 4 erasure t-profile
- - ceph osd pool application enable base-pool rados
+++ /dev/null
-tasks:
-- exec:
- client.0:
- - ceph osd pool create base-pool 4
- - ceph osd pool application enable base-pool rados
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - ceph osd pool create cache-pool 4
- - ceph osd tier add base-pool cache-pool
- - ceph osd tier cache-mode cache-pool writeback
- - ceph osd tier set-overlay base-pool cache-pool
- - ceph osd pool set cache-pool hit_set_type bloom
- - ceph osd pool set cache-pool hit_set_count 8
- - ceph osd pool set cache-pool hit_set_period 5
+++ /dev/null
-tasks:
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-
-workload:
- sequential:
- - rados:
- clients: [client.0]
- pools: [base-pool]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- - print: "**** done rados"
-
-upgrade-sequence:
- sequential:
- - install.upgrade:
- exclude_packages:
- - ceph-mgr
- - libcephfs2
- - libcephfs-devel
- - libcephfs-dev
- osd.0:
- branch: jewel
- osd.2:
- branch: jewel
- - print: "*** done install.upgrade osd.0 and osd.2"
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart do not wait for healthy"
- - exec:
- mon.a:
- - sleep 300 # http://tracker.ceph.com/issues/17808
- - ceph osd set sortbitwise
- - ceph osd set require_jewel_osds
- - ceph.healthy:
- - print: "**** done ceph.healthy"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported/centos_latest.yaml
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- Setup 4 node ceph cluster using ceph-deploy, use latest
- stable jewel as initial release, upgrade to stable luminous and
- also setup mgr nodes along after upgrade, check for cluter to
- reach healthy state and run kernel tar/untar task. Finally upgrade
- to master dev branch and wait for healthy state, Run systemd and
- mixed-load gen task. This test will detect any ceph upgrade issue
- and systemd issues.
-overrides:
- ceph-deploy:
- fs: xfs
- conf:
- global:
- mon pg warn min per osd: 2
- osd:
- osd pool default size: 2
- osd objectstore: filestore
- osd sloppy crc: true
- client:
- rbd default features: 5
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - mgr.x
-- - mon.b
- - mgr.y
-- - mon.c
- - osd.3
- - osd.4
- - osd.5
-- - osd.6
- - osd.7
- - osd.8
- - client.0
-tasks:
-- ssh-keys:
-- ceph-deploy:
- branch:
- stable: jewel
- skip-mgr: True
-- ceph-deploy.upgrade:
- branch:
- stable: luminous
- setup-mgr-node: True
- check-for-healthy: True
- roles:
- - mon.a
- - mon.b
- - mon.c
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
-- ceph-deploy.upgrade:
- setup-mgr-node: False
- check-for-healthy: True
- roles:
- - mon.a
- - mon.b
- - mon.c
-- systemd:
-- workunit:
- clients:
- all:
- - rados/load-gen-mix.sh
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client 0,1,2 third node.
- Use xfs beneath the osds.
- CephFS tests running on client 2,3
-roles:
-- - mon.a
- - mds.a
- - mgr.x
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
- - client.2
- - client.3
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - wrongly marked
- - \(MDS_FAILED\)
- - \(OBJECT_
- - is unresponsive
- conf:
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install ceph/jewel latest
- run workload and upgrade-sequence in parallel
- upgrade the client node
-tasks:
-- install:
- branch: jewel
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done installing jewel"
-- ceph:
- skip_mgr_daemons: true
- add_osds_to_crush: true
- log-whitelist:
- - overall HEALTH_
- - \(FS_
- - \(MDS_
- - \(OSD_
- - \(MON_DOWN\)
- - \(CACHE_POOL_
- - \(POOL_
- - \(MGR_DOWN\)
- - \(PG_
- - Monitor daemon marked osd
- - Behind on trimming
- conf:
- global:
- mon warn on pool no app: false
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade mon.a and mon.b"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on client.0"
+++ /dev/null
-# do not require luminous osds at mkfs time; only set flag at
-# the end of the test run, then do a final scrub (to convert any
-# legacy snapsets), and verify we are healthy.
-tasks:
-- full_sequential_finally:
- - ceph.osd_scrub_pgs:
- cluster: ceph
- - exec:
- mon.a:
- - ceph pg dump -f json-pretty
- - "ceph pg dump sum -f json-pretty | grep num_legacy_snapsets | head -1 | grep ': 0'"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-overrides:
- ceph:
- log-whitelist:
- - must scrub before tier agent can activate
-tasks:
-workload:
- full_sequential:
- - sequential:
- - exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 0
- - sudo ceph osd pool set cache min_write_recency_for_promote 0
- - rados:
- clients: [client.0]
- pools: [base]
- ops: 4000
- objects: 500
- pool_snaps: true
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-openstack:
- - machine:
- ram: 15000 # MB
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c]
- - ceph.restart:
- daemons: [mds.a, osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, osd.0, osd.1, mds.a
- step two ordering: mon.b, mon.c, osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b, mon.c]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - print: "**** running mixed versions of osds and mons"
- - exec:
- mon.b:
- - sudo ceph osd crush tunables jewel
- - print: "**** done ceph osd crush tunables jewel"
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
+++ /dev/null
-.qa/releases/luminous.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 5-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 5-final-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshots
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1 byte to 1MB
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- librados C and C++ api tests
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- branch: jewel
- clients:
- client.1:
- - rados/test-upgrade-v11.0.0.sh
- - print: "**** done rados/test-upgrade-v11.0.0.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - swift:
- client.1:
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-../stress-split/1-jewel-install/
\ No newline at end of file
+++ /dev/null
-../parallel/1.5-final-scrub.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-stress-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/6-luminous.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- crush-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- fs: xfs
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: install ceph/jewel latest
-tasks:
-- install:
- branch: jewel
- exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-- print: "**** done install jewel"
-- ceph:
- skip_mgr_daemons: true
- add_osds_to_crush: true
- log-whitelist:
- - required past_interval bounds are empty
-- print: "**** done ceph"
+++ /dev/null
-../parallel/1.5-final-scrub.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [mon.a,mon.b,mon.c,osd.0, osd.1, osd.2]
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: jewel
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: jewel
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.3:
- client.0:
-- ceph.restart:
- daemons: [osd.3, osd.4, osd.5]
- wait-for-healthy: false
- wait-for-osds-up: true
-
+++ /dev/null
-.qa/releases/luminous.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- Setup 4 node ceph cluster using ceph-deploy, use latest
- stable jewel as initial release, upgrade to stable luminous and
- also setup mgr nodes along after upgrade, check for cluter to
- reach healthy state and run kernel tar/untar task. Finally upgrade
- to current master branch and wait for healthy state, Run systemd and
- mixed-load gen task. This test will detect any ceph upgrade issue
- and systemd issues.
-overrides:
- ceph-deploy:
- fs: xfs
- conf:
- global:
- mon pg warn min per osd: 2
- osd:
- osd pool default size: 2
- osd objectstore: filestore
- osd sloppy crc: true
- client:
- rbd default features: 5
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - mgr.x
-- - mon.b
- - mgr.y
-- - mon.c
- - osd.3
- - osd.4
- - osd.5
-- - osd.6
- - osd.7
- - osd.8
- - client.0
-tasks:
-- ssh-keys:
-- ceph-deploy:
- branch:
- stable: kraken
- skip-mgr: True
-- ceph-deploy.upgrade:
- branch:
- stable: luminous
- setup-mgr-node: True
- check-for-healthy: True
- roles:
- - mon.a
- - mon.b
- - mon.c
-- workunit:
- clients:
- all:
- - kernel_untar_build.sh
-- ceph-deploy.upgrade:
- setup-mgr-node: False
- check-for-healthy: True
- roles:
- - mon.a
- - mon.b
- - mon.c
-- systemd:
-- workunit:
- clients:
- all:
- - rados/load-gen-mix.sh
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client 0,1,2 third node.
- Use xfs beneath the osds.
- CephFS tests running on client 2,3
-roles:
-- - mon.a
- - mgr.x
- - mds.a
- - osd.0
- - osd.1
-- - mon.b
- - mon.c
- - osd.2
- - osd.3
-- - client.0
- - client.1
- - client.2
- - client.3
-overrides:
- ceph:
- log-whitelist:
- - scrub mismatch
- - ScrubResult
- - wrongly marked
- - \(POOL_APP_NOT_ENABLED\)
- - overall HEALTH_
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- fs: xfs
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install ceph/kraken latest
- run workload and upgrade-sequence in parallel
- upgrade the client node
-tasks:
-- install:
- branch: kraken
-- print: "**** done installing kraken"
-- ceph:
- log-whitelist:
- - overall HEALTH_
- - \(FS_
- - \(MDS_
- - \(OSD_
- - \(MON_DOWN\)
- - \(CACHE_POOL_
- - \(POOL_
- - \(MGR_DOWN\)
- - \(PG_
- - \(SMALLER_PGP_NUM\)
- - Monitor daemon marked osd
- - Behind on trimming
- - Manager daemon
- conf:
- global:
- mon warn on pool no app: false
-- print: "**** done ceph"
-- install.upgrade:
- mon.a:
- mon.b:
-- print: "**** done install.upgrade both hosts"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done parallel"
-- install.upgrade:
- client.0:
-- print: "**** done install.upgrade on client.0"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.2 before running workunit
-workload:
- full_sequential:
- - sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 2-workload"
- - workunit:
- clients:
- client.2:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - workunit:
- branch: kraken
- clients:
- client.0:
- - cls
- - print: "**** done cls 2-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - workunit:
- branch: kraken
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done rados/load-gen-big.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done rbd/test_librbd.sh 2-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done rbd/test_librbd_python.sh 2-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a, mon.b, mon.c, mgr.x]
- - ceph.restart:
- daemons: [osd.0, osd.1, osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - ceph.restart:
- daemons: [mds.a]
- wait-for-healthy: false
- wait-for-osds-up: true
- - print: "**** done ceph.restart all"
+++ /dev/null
-meta:
-- desc: |
- upgrade the ceph cluster,
- upgrate in two steps
- step one ordering: mon.a, osd.0, osd.1, mds.a
- step two ordering: mon.b, mon.c, osd.2, osd.3
- ceph expected to be healthy state after each step
-upgrade-sequence:
- sequential:
- - ceph.restart:
- daemons: [mon.a]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [mon.b, mon.c, mgr.x]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.0, osd.1]
- wait-for-healthy: true
- - sleep:
- duration: 60
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - sleep:
- duration: 60
- - ceph.restart:
- daemons: [osd.2, osd.3]
- wait-for-healthy: false
- wait-for-osds-up: true
- - sleep:
- duration: 60
+++ /dev/null
-.qa/releases/luminous-with-mgr.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run a cephfs stress test
- mount ceph-fuse on client.3 before running workunit
-tasks:
-- sequential:
- - ceph-fuse:
- - print: "**** done ceph-fuse 5-final-workload"
- - workunit:
- clients:
- client.3:
- - suites/blogbench.sh
- - print: "**** done suites/blogbench.sh 5-final-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshots
-tasks:
- - rados:
- clients: [client.1]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- - print: "**** done rados 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1 byte to 1MB
-tasks:
- - workunit:
- clients:
- client.1:
- - rados/load-gen-mix.sh
- - print: "**** done rados/load-gen-mix.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- librados C and C++ api tests
-overrides:
- ceph:
- log-whitelist:
- - reached quota
-tasks:
- - mon_thrash:
- revive_delay: 20
- thrash_delay: 1
- - print: "**** done mon_thrash 4-final-workload"
- - workunit:
- branch: kraken
- clients:
- client.1:
- - rados/test.sh
- - print: "**** done rados/test.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
- - workunit:
- clients:
- client.1:
- - cls/test_cls_rbd.sh
- - print: "**** done cls/test_cls_rbd.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-tasks:
- - workunit:
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done rbd/import_export.sh 4-final-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-overrides:
- rgw:
- frontend: civetweb
-tasks:
- - rgw: [client.1]
- - print: "**** done rgw 4-final-workload"
- - swift:
- client.1:
- rgw_server: client.1
- - print: "**** done swift 4-final-workload"
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../stress-split/0-cluster/
\ No newline at end of file
+++ /dev/null
-../stress-split/1-kraken-install/
\ No newline at end of file
+++ /dev/null
-../stress-split/2-partial-upgrade/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- min_in: 4
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-stress-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-../stress-split/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-../stress-split/6-luminous-with-mgr.yaml
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- crush-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../stress-split/objectstore/
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 3
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- fs: xfs
- log-whitelist:
- - overall HEALTH_
- - \(MON_DOWN\)
- - \(MGR_DOWN\)
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
-roles:
-- - mon.a
- - mon.b
- - mon.c
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
-- - osd.3
- - osd.4
- - osd.5
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: install ceph/kraken latest
-tasks:
-- install:
- branch: kraken
-- print: "**** done install kraken"
-- ceph:
-- print: "**** done ceph"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on one node only
- 1st half
- restart : osd.0,1,2
-tasks:
-- install.upgrade:
- osd.0:
-- print: "**** done install.upgrade osd.0"
-- ceph.restart:
- daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2]
-- print: "**** done ceph.restart 1st half"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-whitelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
- - radosbench:
- clients: [client.0]
- time: 150
-- print: "**** done radosbench 7-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: kraken
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.3:
- client.0:
-- ceph.restart:
- daemons: [osd.3, osd.4, osd.5]
- wait-for-healthy: false
- wait-for-osds-up: true
-
+++ /dev/null
-.qa/releases/luminous-with-mgr.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-tasks:
-- workunit:
- branch: kraken
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh 9-workload"
+++ /dev/null
-meta:
-- desc: |
- swift api tests for rgw
-tasks:
-- rgw:
- client.0:
-- print: "**** done rgw 9-workload"
-- swift:
- client.0:
- rgw_server: client.0
-- print: "**** done swift 9-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-.qa/distros/supported
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/objectstore/bluestore.yaml
\ No newline at end of file
+++ /dev/null
-.qa/objectstore/filestore-xfs.yaml
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file