--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/mimic v13.2.0 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/mimic v13.2.1 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/mimic v13.2.2 point version
+ run workload and upgrade-sequence in parallel
+ install ceph/mimic v13.2.x point version
+ (every point reslease should be tested)
+ run workload and upgrade-sequence in parallel
+ install ceph/mimic latest version
+ run workload and upgrade-sequence in parallel
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ - FS_DEGRADED
+ - POOL_APP_NOT_ENABLED
+ - CACHE_POOL_NO_HIT_SET
+ - POOL_FULL
+ - SMALLER_PG
+ - pool\(s\) full
+ - OSD_DOWN
+ - missing hit_sets
+ - CACHE_POOL_NEAR_FULL
+ - PG_AVAILABILITY
+ - PG_DEGRADED
+ - application not enabled
+ - cache pools at or near target size
+ - filesystem is degraded
+ - OBJECT_MISPLACED
+ fs: xfs
+ conf:
+ global:
+ mon_warn_on_pool_no_app: false
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version otp"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ replica_log rgw sdk statelog timeindex user version otp"
+ client:
+ rgw_crypt_require_ssl: false
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** mimic v13.2.0 about to install"
+- install:
+ tag: v13.2.0
+ # line below can be removed its from jewel test
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v13.2.0 install"
+- ceph:
+ fs: xfs
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v13.2.0"
+
+####### upgrade to v13.2.1
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ tag: v13.2.1
+ mon.b:
+ tag: v13.2.1
+ # Note that client.a IS NOT upgraded at this point
+- parallel:
+ - workload_mimic
+ - upgrade-sequence_mimic
+- print: "**** done parallel mimic v13.2.1"
+- parallel:
+ - workload_mimic
+ - upgrade-sequence_mimic
+- print: "**** done workload v13.2.1"
+
+####### upgrade to v13.2.2
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ tag: v13.2.2
+ mon.b:
+ tag: v13.2.2
+ # Note that client.a IS NOT upgraded at this point
+- parallel:
+ - workload_mimic
+ - upgrade-sequence_mimic
+- print: "**** done parallel mimic v13.2.2"
+- parallel:
+ - workload_mimic
+ - upgrade-sequence_mimic
+- print: "**** done workload v13.2.2"
+
+
+####### upgrade to v13.2.x TO BE ADDED for future point releases
+### - install.upgrade:
+### #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+### mon.a:
+### tag: v13.2.x
+### mon.b:
+### tag: v13.2.x
+### # Note that client.a IS NOT upgraded at this point
+###- parallel:
+### - workload_mimic
+### - upgrade-sequence_mimic
+###- print: "**** done parallel mimic v13.2.x"
+###- parallel:
+### - workload_mimic
+### - upgrade-sequence_mimic
+###- print: "**** done workload v13.2.x"
+
+
+#### upgrade to latest mimic
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ mon.b:
+ # Note that client.a IS NOT upgraded at this point
+- parallel:
+ - workload_mimic
+ - upgrade-sequence_mimic
+- print: "**** done parallel mimic branch"
+
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+workload_mimic:
+ full_sequential:
+ - workunit:
+ tag: v13.2.0
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
+ - print: "**** done rados/test.sh & cls workload_mimic"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_mimic"
+ - s3tests:
+ client.0:
+ force-branch: ceph-mimic
+ rgw_server: client.0
+ scan_for_encryption_keys: false
+ - print: "**** done s3tests workload_mimic"
+upgrade-sequence_mimic:
+ sequential:
+ - print: "**** done branch: mimic install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all mimic branch mds/osd/mon"
--- /dev/null
+../../../../distros/supported-all-distro/
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - client.0
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+meta:
+- desc: install ceph/mimic latest
+tasks:
+- install:
+ tag: v13.2.4
+ exclude_packages: ['librados3']
+ extra_packages: ['librados2']
+- print: "**** done install mimic latest"
+- ceph:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release mimic
+ - ceph osd set-require-min-compat-client mimic
+- print: "**** done ceph"
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
--- /dev/null
+../../luminous-x/parallel/1.1-pg-log-overrides/
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3]
+ mon-health-to-clog: false
+- print: "**** done ceph.restart 1st half"
+- exec:
+ osd.0:
+ - ceph osd set pglog_hardlimit && exit 1 || true
+ - ceph osd dump --format=json-pretty | grep "flags"
+- print: "**** try to set pglog_hardlimit, should not succeed"
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done radosbench 4-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: mimic
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+- print: "**** done cls/test_cls_rbd.sh 4-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: mimic
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 4-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: mimic
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 4-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 4-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 4-workload"
--- /dev/null
+tasks:
+- install.upgrade:
+ osd.4:
+ client.0:
+- ceph.restart:
+ daemons: [osd.4, osd.5, osd.6, osd.7]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+- exec:
+ osd.0:
+ - ceph osd set pglog_hardlimit
+ - ceph osd dump --format=json-pretty | grep "flags"
+- print: "**** try to set pglog_hardlimit again, should succeed"
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: mimic
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ swift api tests for rgw
+tasks:
+- rgw:
+ client.0:
+- print: "**** done rgw 9-workload"
+- swift:
+ client.0:
+ rgw_server: client.0
+- print: "**** done swift 9-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../../../../../objectstore/bluestore.yaml
\ No newline at end of file
--- /dev/null
+../../../../../objectstore/filestore-xfs.yaml
\ No newline at end of file
--- /dev/null
+../../../../distros/supported-all-distro/
\ No newline at end of file
--- /dev/null
+../../../../tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes, using one of them as a client,
- with a separate client-only node.
- Use xfs beneath the osds.
- install ceph/mimic v13.2.0 point version
- run workload and upgrade-sequence in parallel
- install ceph/mimic v13.2.1 point version
- run workload and upgrade-sequence in parallel
- install ceph/mimic v13.2.2 point version
- run workload and upgrade-sequence in parallel
- install ceph/mimic v13.2.x point version
- (every point reslease should be tested)
- run workload and upgrade-sequence in parallel
- install ceph/mimic latest version
- run workload and upgrade-sequence in parallel
-overrides:
- ceph:
- log-whitelist:
- - reached quota
- - scrub
- - osd_map_max_advance
- - wrongly marked
- - FS_DEGRADED
- - POOL_APP_NOT_ENABLED
- - CACHE_POOL_NO_HIT_SET
- - POOL_FULL
- - SMALLER_PG
- - pool\(s\) full
- - OSD_DOWN
- - missing hit_sets
- - CACHE_POOL_NEAR_FULL
- - PG_AVAILABILITY
- - PG_DEGRADED
- - application not enabled
- - cache pools at or near target size
- - filesystem is degraded
- - OBJECT_MISPLACED
- fs: xfs
- conf:
- global:
- mon_warn_on_pool_no_app: false
- mon:
- mon debug unsafe allow tier with nonempty snaps: true
- osd:
- osd map max advance: 1000
- osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
- replica_log rgw sdk statelog timeindex user version otp"
- osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
- replica_log rgw sdk statelog timeindex user version otp"
- client:
- rgw_crypt_require_ssl: false
- rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
-roles:
-- - mon.a
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - mgr.x
-- - mon.b
- - mon.c
- - osd.3
- - osd.4
- - osd.5
- - client.0
-- - client.1
-openstack:
-- volumes: # attached to each instance
- count: 3
- size: 30 # GB
-tasks:
-- print: "**** mimic v13.2.0 about to install"
-- install:
- tag: v13.2.0
- # line below can be removed its from jewel test
- #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
-- print: "**** done v13.2.0 install"
-- ceph:
- fs: xfs
- add_osds_to_crush: true
-- print: "**** done ceph xfs"
-- sequential:
- - workload
-- print: "**** done workload v13.2.0"
-
-####### upgrade to v13.2.1
-- install.upgrade:
- #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- mon.a:
- tag: v13.2.1
- mon.b:
- tag: v13.2.1
- # Note that client.a IS NOT upgraded at this point
-- parallel:
- - workload_mimic
- - upgrade-sequence_mimic
-- print: "**** done parallel mimic v13.2.1"
-- parallel:
- - workload_mimic
- - upgrade-sequence_mimic
-- print: "**** done workload v13.2.1"
-
-####### upgrade to v13.2.2
-- install.upgrade:
- #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- mon.a:
- tag: v13.2.2
- mon.b:
- tag: v13.2.2
- # Note that client.a IS NOT upgraded at this point
-- parallel:
- - workload_mimic
- - upgrade-sequence_mimic
-- print: "**** done parallel mimic v13.2.2"
-- parallel:
- - workload_mimic
- - upgrade-sequence_mimic
-- print: "**** done workload v13.2.2"
-
-
-####### upgrade to v13.2.x TO BE ADDED for future point releases
-### - install.upgrade:
-### #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
-### mon.a:
-### tag: v13.2.x
-### mon.b:
-### tag: v13.2.x
-### # Note that client.a IS NOT upgraded at this point
-###- parallel:
-### - workload_mimic
-### - upgrade-sequence_mimic
-###- print: "**** done parallel mimic v13.2.x"
-###- parallel:
-### - workload_mimic
-### - upgrade-sequence_mimic
-###- print: "**** done workload v13.2.x"
-
-
-#### upgrade to latest mimic
-- install.upgrade:
- #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
- mon.a:
- mon.b:
- # Note that client.a IS NOT upgraded at this point
-- parallel:
- - workload_mimic
- - upgrade-sequence_mimic
-- print: "**** done parallel mimic branch"
-
-#######################
-workload:
- sequential:
- - workunit:
- clients:
- client.0:
- - suites/blogbench.sh
-workload_mimic:
- full_sequential:
- - workunit:
- tag: v13.2.0
- clients:
- client.1:
- - rados/test.sh
- - cls
- env:
- CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
- - print: "**** done rados/test.sh & cls workload_mimic"
- - sequential:
- - rgw: [client.0]
- - print: "**** done rgw workload_mimic"
- - s3tests:
- client.0:
- force-branch: ceph-mimic
- rgw_server: client.0
- scan_for_encryption_keys: false
- - print: "**** done s3tests workload_mimic"
-upgrade-sequence_mimic:
- sequential:
- - print: "**** done branch: mimic install.upgrade"
- - ceph.restart: [mds.a]
- - sleep:
- duration: 60
- - ceph.restart: [osd.0]
- - sleep:
- duration: 30
- - ceph.restart: [osd.1]
- - sleep:
- duration: 30
- - ceph.restart: [osd.2]
- - sleep:
- duration: 30
- - ceph.restart: [osd.3]
- - sleep:
- duration: 30
- - ceph.restart: [osd.4]
- - sleep:
- duration: 30
- - ceph.restart: [osd.5]
- - sleep:
- duration: 60
- - ceph.restart: [mon.a]
- - sleep:
- duration: 60
- - ceph.restart: [mon.b]
- - sleep:
- duration: 60
- - ceph.restart: [mon.c]
- - sleep:
- duration: 60
- - print: "**** done ceph.restart all mimic branch mds/osd/mon"
+++ /dev/null
-../../../distros/supported-all-distro/
\ No newline at end of file