--------
- [ ] create qa/suites/upgrade/(X-1)-x
-- [ ] remove qa/suites/upgrade/(X-3)-x-*
+- [x] remove qa/suites/upgrade/(X-3)-x-*
- [ ] remove qa/suites/rados/upgrade/(X-3)-x-singleton symlink
-- [ ] create qa/releases/X.yaml
-- [ ] create qa/suites/rados/cephadm/thrash-old-clients/1-install/(X-1).yaml
+- [x] create qa/releases/X.yaml
+- [x] create qa/suites/rados/thrash-old-clients/1-install/(X-1).yaml
--- /dev/null
+tasks:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release reef
+ - ceph osd set-require-min-compat-client reef
+- ceph.healthy:
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MON_DOWN\)
+tasks:
+- install:
+ branch: quincy
+ exclude_packages:
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - ceph-base-debuginfo
+ - ceph-common-debuginfo
+ - ceph-immutable-object-cache-debuginfo
+ - ceph-radosgw-debuginfo
+ - ceph-test-debuginfo
+ - ceph-base-debuginfo
+ - ceph-mgr-debuginfo
+ - ceph-mds-debuginfo
+ - ceph-mon-debuginfo
+ - ceph-osd-debuginfo
+ - ceph-fuse-debuginfo
+ - librados-devel-debuginfo
+ - libcephfs2-debuginfo
+ - librados2-debuginfo
+ - librbd1-debuginfo
+ - python3-cephfs-debuginfo
+ - python3-rados-debuginfo
+ - python3-rbd-debuginfo
+ - python3-rgw-debuginfo
+ - rbd-fuse-debuginfo
+ - rbd-mirror-debuginfo
+ - rbd-nbd-debuginfo
+ - ceph-volume
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/container-hosts
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: true
- conf:
- osd:
- osd shutdown pgref assert: true
- bluestore fsck quick fix on mount: true
+++ /dev/null
-tasks:
-- install:
- branch: octopus
- exclude_packages:
- - ceph-volume
-- print: "**** done install task..."
-- print: "**** done start installing octopus cephadm ..."
-- cephadm:
- image: quay.io/ceph/daemon-base:latest-octopus
- cephadm_branch: octopus
- cephadm_git_url: https://github.com/ceph/ceph
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-- print: "**** done end installing octopus cephadm ..."
-
-- cephadm.shell:
- mon.a:
- - ceph fs volume create foo
-- ceph.healthy:
-
-- print: "**** done creating new fs"
-
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest true --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
-
-- print: "**** verifying SMART data exists"
-
-- cephadm.shell:
- mon.a:
- # debugging
- - ceph device ls
- # ensure something is scraped
- - ceph device scrape-health-metrics
- # more than 0 devices
- - ceph device ls --format=json | jq -e '. | length > 0'
- # dump metrics
- - "for devid in $(ceph device ls --format=json | jq -r '.[].devid'); do ceph device get-health-metrics $devid; done"
- # check scraped sanity
- - "for devid in $(ceph device ls --format=json | jq -r '.[].devid'); do ceph device get-health-metrics $devid | jq -e '. | length > 0'; done"
- # check device_health_metrics pool exists
- - rados --pool=device_health_metrics ls | wc -l
-
-- print: "**** done cephadm.shell ceph config set mgr..."
-
-- print: "**** done start parallel"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done end parallel"
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-# renamed tasks: to upgrade-sequence:
-upgrade-sequence:
- sequential:
- - print: "**** done start upgrade, wait"
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph config set global log_to_journald false --force
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
- - print: "**** done end upgrade, wait..."
-
- - print: "**** verifying SMART data upgrade"
-
- - cephadm.shell:
- mon.a:
- # check device_health_metrics pool is gone
- - rados --pool=device_health_metrics ls && exit 1 || true
- # check device_health_metrics pool is now .mgr
- - rados --pool=.mgr --all ls | wc -l
- # debugging
- - ceph device ls
- # more than 0 devices
- - ceph device ls --format=json | jq -e '. | length > 0'
- # dump metrics
- - "for devid in $(ceph device ls --format=json | jq -r '.[].devid'); do ceph device get-health-metrics $devid; done"
- # check scraped sanity
- - "for devid in $(ceph device ls --format=json | jq -r '.[].devid'); do ceph device get-health-metrics $devid | jq '. | length > 0'; done"
-
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - print: "**** done start ec-rados-default.yaml"
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done end ec-rados-default.yaml"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - print: "**** done start rados_api.yaml"
- - workunit:
- branch: octopus
- clients:
- client.0:
- - cls
- - print: "**** done end rados_api.yaml"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - print: "**** done start rados_loadgenbig.yaml"
- - workunit:
- branch: octopus
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done end rados_loadgenbig.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-workload:
- full_sequential:
- - print: "**** done start rbd_import_export.yaml"
- - workunit:
- branch: octopus
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done end rbd_import_export.yaml"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - print: "**** done start test_rbd_api.yaml"
- - workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/test_librbd.sh
- - print: "**** done end test_rbd_api.yaml"
+++ /dev/null
-
-
-##### This is disabled due to https://tracker.ceph.com/issues/48759
-
-
-meta:
-- desc: |
- librbd python api tests
-workload:
- full_sequential:
- - print: "**** done start test_rbd_python.yaml"
- - workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- - print: "**** done end test_rbd_python.yaml"
-
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-roles:
-- [c1.mon.a, c1.mgr.x, c1.osd.0, c1.osd.1, c1.osd.2, c1.client.0]
-- [c2.mon.a, c2.mgr.x, c2.osd.0, c2.osd.1, c2.osd.2, c2.client.0, c2.client.1]
+++ /dev/null
-.qa/rgw_frontend/beast.yaml
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - \(PG_AVAILABILITY\)
- wait-for-scrub: false
- conf:
- mon:
- mon_warn_on_insecure_global_id_reclaim: false
- mon_warn_on_insecure_global_id_reclaim_allowed: false
- client:
- setuser: ceph
- setgroup: ceph
- debug rgw: 20
- rgw crypt s3 kms backend: testing
- rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo=
- rgw crypt require ssl: false
- rgw sync log trim interval: 0
- rgw curl low speed time: 300
- rgw md log max shards: 4
- rgw data log num shards: 4
- rgw sync obj etag verify: true
- rgw:
- compression type: random
- install:
- ceph:
- branch: octopus
- rgw-multisite-tests:
- branch: octopus # run octopus branch of tests
- repo: https://github.com/ceph/ceph.git
+++ /dev/null
-overrides:
- rgw-multisite:
- realm:
- name: test-realm
- is default: true
- zonegroups:
- - name: test-zonegroup
- is_master: true
- is_default: true
- endpoints: [c1.client.0]
- zones:
- - name: test-zone1
- is_master: true
- is_default: true
- endpoints: [c1.client.0]
- - name: test-zone2
- is_default: true
- endpoints: [c2.client.0]
- rgw-multisite-tests:
- args: [tests.py]
+++ /dev/null
-tasks:
-- install:
- exclude_packages:
- - ceph-volume
-- ceph: {cluster: c1}
-- ceph: {cluster: c2}
-- parallel:
- - upgrade-task
-- rgw:
- c1.client.0:
- port: 8000
- c2.client.0:
- port: 8000
- c2.client.1:
- port: 8001
-- rgw-multisite:
-- rgw-multisite-tests:
- config:
- reconfigure_delay: 60
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-upgrade-task:
-- install.upgrade: # leave c2 on octopus, upgrade c1 to current
- exclude_packages:
- - ceph-volume
- c1.mon.a:
- c1.mgr.x:
- c1.osd.0:
- c1.osd.1:
- c1.osd.2:
- c1.client.0:
+++ /dev/null
-upgrade-task:
-- install.upgrade: # leave c1 on octopus, upgrade c2 to current
- exclude_packages:
- - ceph-volume
- c2.mon.a:
- c2.mgr.x:
- c2.osd.0:
- c2.osd.1:
- c2.osd.2:
- c2.client.0:
- c2.client.1:
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../stress-split-no-cephadm/0-cluster
\ No newline at end of file
+++ /dev/null
-../stress-split-no-cephadm/1-ceph-install
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd min pg log entries: 1
- osd max pg log entries: 2
+++ /dev/null
-../stress-split-no-cephadm/2-partial-upgrade
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
-- print: "**** done thrashosds default.yaml"
+++ /dev/null
-../stress-split-no-cephadm/objectstore
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
-stress-tasks:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done rados ec task"
+++ /dev/null
-meta:
-- desc: |
- run rbd tests on EC pool
- overrides => force bluestore since it's required for ec-overwrite
- use an EC pool for rbd and run xfstests on top of it to verify correctness
-tasks:
-- exec:
- client.0:
- - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
- - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
- - sudo ceph osd pool set datapool allow_ec_overwrites true
- - rbd pool init datapool
-- qemu:
- all:
- clone: true
- type: block
- disks: 3
- test: qa/run_xfstests_qemu.sh
-- print: "**** done rbd/qemu ec task"
-exclude_arch: armv7l
-overrides:
- thrashosds:
- bdev_inject_crash: 2
- bdev_inject_crash_probability: .5
- ceph:
- fs: xfs
- conf:
- client:
- rbd default data pool: datapool
- osd: # force bluestore since it's required for ec overwrites
- osd objectstore: bluestore
+++ /dev/null
-../stress-split-no-cephadm/5-finish-upgrade.yaml
\ No newline at end of file
+++ /dev/null
-.qa/releases/quincy.yaml
\ No newline at end of file
+++ /dev/null
-#
-# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
-# the default value of 4096 It is also not a multiple of 1024*1024 and
-# creates situations where rounding rules during recovery becomes
-# necessary.
-#
-meta:
-- desc: |
- randomized correctness test for rados operations on an erasure coded pool
- using the jerasure plugin with k=3 and m=1
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- erasure_code_profile:
- name: jerasure31profile
- plugin: jerasure
- k: 3
- m: 1
- technique: reed_sol_van
- crush-failure-domain: osd
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
-- print: "**** done rados ec 7-final-workload.yaml"
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - machine:
- disk: 100 # GB
- - volumes: # attached to each instance
- count: 4
- size: 30 # GB
+++ /dev/null
-meta:
-- desc: |
- Run ceph on two nodes,
- with a separate client-only node.
- Use xfs beneath the osds.
-overrides:
- ceph:
- mon_bind_msgr2: false
- mon_bind_addrvec: false
- fs: xfs
- log-ignorelist:
- - overall HEALTH_
- - \(MON_DOWN\)
- - \(MGR_DOWN\)
- - slow request
- - \(MON_MSGR2_NOT_ENABLED\)
- conf:
- global:
- enable experimental unrecoverable data corrupting features: "*"
- mon warn on msgr2 not enabled: false
- mon:
- mon warn on osd down out interval zero: false
-roles:
-- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - osd.2
- - osd.3
-- - mon.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
-- - mon.c
-- - osd.8
- - osd.9
- - osd.10
- - osd.11
-- - client.0
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: install ceph/octopus latest
-tasks:
-- install:
- exclude_packages:
- - ceph-mgr-cephadm
- - cephadm
- - libcephfs-dev
- - ceph-volume
- branch: octopus
-- print: "**** done install octopus"
-- ceph:
- conf:
- global:
- bluestore_warn_on_legacy_statfs: false
- bluestore warn on no per pool omap: false
- mon pg warn min per osd: 0
- mon:
- mon_warn_on_insecure_global_id_reclaim: false
- mon_warn_on_insecure_global_id_reclaim_allowed: false
- log-ignorelist:
- - Not found or unloadable
- - evicting unresponsive client
-- exec:
- osd.0:
- - ceph osd require-osd-release octopus
- - ceph osd set-require-min-compat-client octopus
-- print: "**** done ceph"
-- rgw:
- - client.0
-- print: "**** done => started rgw client.0"
-overrides:
- ceph:
- conf:
- mon:
- mon warn on osd down out interval zero: false
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd min pg log entries: 1
- osd max pg log entries: 2
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- install upgrade ceph/-x on 2/3 of cluster
- restart : mons, osd.0-7
-tasks:
-- install.upgrade:
- mon.a:
- mon.b:
- mon.c:
-- print: "**** done install.upgrade of first 3 nodes"
-- ceph.restart:
- daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7]
- mon-health-to-clog: false
-- print: "**** done ceph.restart of all mons and 2/3 of osds"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- randomly kill and revive osd
- small chance to increase the number of pgs
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - wrongly marked me down
- - objects unfound and apparently lost
- - log bound mismatch
-tasks:
-- parallel:
- - stress-tasks
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
-- print: "**** done thrashosds 3-thrash"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- rgw ragweed prepare before upgrade
-stress-tasks:
- - full_sequential:
- - sequential:
- - ragweed:
- client.0:
- default-branch: ceph-octopus
- rgw_server: client.0
- stages: prepare
- - print: "**** done rgw ragweed prepare 4-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-tasks:
-- install.upgrade:
- osd.8:
- client.0:
- extra_packages:
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- - ceph-volume
-- ceph.restart:
- daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*]
- wait-for-healthy: false
- wait-for-osds-up: true
-- print: "**** done restarted/upgraded => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*"
-
+++ /dev/null
-.qa/releases/quincy.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-overrides:
- ceph:
- conf:
- client:
- rbd default clone format: 1
-tasks:
-- workunit:
- clients:
- client.0:
- - rbd/test_librbd_python.sh
-- print: "**** done rbd/test_librbd_python.sh"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-tasks:
-- rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/objectstore_debug/bluestore-hybrid.yaml
\ No newline at end of file
+++ /dev/null
-.qa/objectstore_debug/filestore-xfs.yaml
\ No newline at end of file
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/container-hosts
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: true
- conf:
- osd:
- osd shutdown pgref assert: true
- bluestore fsck quick fix on mount: true
+++ /dev/null
-tasks:
-- install:
- branch: octopus
- exclude_packages:
- - ceph-volume
-
-- cephadm:
- image: quay.io/ceph/daemon-base:latest-octopus
- cephadm_branch: octopus
- cephadm_git_url: https://github.com/ceph/ceph
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-- cephadm.shell:
- mon.a:
- - ceph fs volume create foo
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
-
-- ceph.healthy:
-
-- print: "**** upgrading first half of cluster, with stress ****"
-- parallel:
- - first-half-tasks
- - first-half-sequence
-- print: "**** done upgrading first half of cluster ****"
-
-- ceph.healthy:
-
-- print: "**** applying stress + thrashing to mixed-version cluster ****"
-
-- parallel:
- - stress-tasks
-
-- ceph.healthy:
-
-- print: "**** finishing upgrade ****"
-- parallel:
- - second-half-tasks
- - second-half-sequence
-
-- ceph.healthy:
-
-
-#################
-
-first-half-sequence:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
- - ceph config set global log_to_journald false --force
-
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - ceph orch ps
-
- - echo wait for minority of mons to upgrade
- - while ! ceph mon versions | grep $sha1 ; do sleep 2 ; done
- - ceph orch ps
- - ceph orch upgrade pause
- - sleep 60
- - ceph orch upgrade resume
-
- - echo wait for majority of mons to upgrade
- - "while ! ceph mon versions | grep $sha1 | egrep ': [23]' ; do sleep 2 ; done"
- - ceph orch ps
- - ceph orch upgrade pause
- - sleep 60
- - ceph orch upgrade resume
-
- - echo wait for all mons to upgrade
- - "while ! ceph mon versions | grep $sha1 | grep ': 3' ; do sleep 2 ; done"
- - ceph orch ps
- - ceph orch upgrade pause
- - sleep 60
- - ceph orch upgrade resume
-
- - echo wait for half of osds to upgrade
- - "while ! ceph osd versions | grep $sha1 | egrep ': [45678]'; do sleep 2 ; done"
- - ceph orch upgrade pause
- - ceph orch ps
-
- - ceph orch ps
- - ceph versions
-
-
-#################
-
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
-
-
-#################
-
-second-half-sequence:
- sequential:
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph orch upgrade resume
- - sleep 60
-
- - echo wait for upgrade to complete
- - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
-
- - echo upgrade complete
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-first-half-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-first-half-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-first-half-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-first-half-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-first-half-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-first-half-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - cls/test_cls_rbd.sh
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/test_librbd.sh
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-second-half-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-second-half-tasks:
-- workunit:
- branch: octopus
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-.qa/mon_election
\ No newline at end of file