-../../upgrade/reef-x/parallel
\ No newline at end of file
+../../upgrade/squid-x/parallel
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-container-hosts
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mds.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: true
- conf:
- osd:
- osd shutdown pgref assert: true
- log-ignorelist:
- - do not have an application enabled
- - application not enabled
- - or freeform for custom applications
- - POOL_APP_NOT_ENABLED
- - is down
- - OSD_DOWN
- - mons down
- - mon down
- - MON_DOWN
- - out of quorum
- - PG_AVAILABILITY
- - PG_DEGRADED
- - Reduced data availability
- - Degraded data redundancy
- - pg .* is stuck inactive
- - pg .* is .*degraded
- - FS_DEGRADED
- - OSDMAP_FLAGS
- - OSD_UPGRADE_FINISHED
- - filesystem is degraded
- - filesystem is offline
- - osds down
- - osd down
- - OBJECT_UNFOUND
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - Telemetry requires re-opt-in
- - telemetry module includes new collections
-tasks:
-- install:
- branch: reef
- exclude_packages:
- - ceph-osd-classic
- - ceph-volume
- - ceph-osd-crimson
-- print: "**** done install task..."
-- print: "**** done start installing reef cephadm ..."
-- cephadm:
- image: quay.ceph.io/ceph-ci/ceph:reef
- compiled_cephadm_branch: reef
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
-- print: "**** done end installing reef cephadm ..."
-
-- print: "**** done start cephadm.shell ceph config set mgr..."
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest true --force
-- print: "**** done cephadm.shell ceph config set mgr..."
-
-- print: "**** done start telemetry reef..."
-- workunit:
- clients:
- client.0:
- - test_telemetry_reef.sh
-- print: "**** done end telemetry reef..."
-
-- print: "**** done start parallel"
-- parallel:
- - workload
- - upgrade-sequence
-- print: "**** done end parallel"
-
-- print: "**** done start telemetry x..."
-- workunit:
- clients:
- client.0:
- - test_telemetry_reef_x.sh
-- print: "**** done end telemetry x..."
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - MDS_ALL_DOWN
- - MDS_UP_LESS_THAN_MAX
- - OSD_SLOW_PING_TIME
- - reached quota
- - running out of quota
- - overall HEALTH_
- - CACHE_POOL_NO_HIT_SET
- - pool\(s\) full
- - POOL_FULL
- - SMALLER_PGP_NUM
- - SLOW_OPS
- - CACHE_POOL_NEAR_FULL
- - OBJECT_MISPLACED
- - slow request
- - noscrub
- - nodeep-scrub
- - osds down
+++ /dev/null
-# renamed tasks: to upgrade-sequence:
-upgrade-sequence:
- sequential:
- - print: "**** done start upgrade, wait"
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph config set global log_to_journald false --force
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
- - print: "**** done end upgrade, wait..."
-
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run run randomized correctness test for rados operations
- on an erasure-coded pool
-workload:
- full_sequential:
- - print: "**** done start ec-rados-default.yaml"
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- ec_pool: true
- write_append_excl: false
- op_weights:
- read: 100
- write: 0
- append: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
- setattr: 25
- rmattr: 25
- - print: "**** done end ec-rados-default.yaml"
+++ /dev/null
-meta:
-- desc: |
- object class functional tests
-workload:
- full_sequential:
- - print: "**** done start rados_api.yaml"
- - workunit:
- branch: reef
- clients:
- client.0:
- - cls
- env:
- CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
- - print: "**** done end rados_api.yaml"
+++ /dev/null
-meta:
-- desc: |
- generate read/write load with rados objects ranging from 1MB to 25MB
-workload:
- full_sequential:
- - print: "**** done start rados_loadgenbig.yaml"
- - workunit:
- branch: reef
- clients:
- client.0:
- - rados/load-gen-big.sh
- - print: "**** done end rados_loadgenbig.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-workload:
- full_sequential:
- - print: "**** done start rbd_import_export.yaml"
- - workunit:
- branch: reef
- clients:
- client.1:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
- - print: "**** done end rbd_import_export.yaml"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
- full_sequential:
- - print: "**** done start test_rbd_api.yaml"
- - workunit:
- branch: reef
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "61"
- - print: "**** done end test_rbd_api.yaml"
+++ /dev/null
-meta:
-- desc: |
- librbd python api tests
-overrides:
- install:
- ceph:
- extra_system_packages:
- - python3-pytest
-workload:
- full_sequential:
- - print: "**** done start test_rbd_python.yaml"
- - workunit:
- branch: reef
- clients:
- client.0:
- - rbd/test_librbd_python.sh
- env:
- RBD_FEATURES: "61"
- - print: "**** done end test_rbd_python.yaml"
-
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-container-hosts
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: true
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - do not have an application enabled
- - application not enabled
- - or freeform for custom applications
- - POOL_APP_NOT_ENABLED
- - is down
- - OSD_DOWN
- - mons down
- - mon down
- - MON_DOWN
- - out of quorum
- - PG_AVAILABILITY
- - PG_DEGRADED
- - Reduced data availability
- - Degraded data redundancy
- - pg .* is stuck inactive
- - pg .* is .*degraded
- - FS_DEGRADED
- - OSDMAP_FLAGS
- - OSD_UPGRADE_FINISHED
- - Replacing daemon mds
- - MDS_ALL_DOWN
- - MDS_UP_LESS_THAN_MAX
- - filesystem is offline
- - with fewer MDS than max_mds
- - filesystem is degraded
- - osds down
- - osd down
- - OBJECT_UNFOUND
-tasks:
-- install:
- branch: reef
- exclude_packages:
- - ceph-volume
- - ceph-osd-classic
-
-- cephadm:
- image: quay.ceph.io/ceph-ci/ceph:reef
- compiled_cephadm_branch: reef
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
-
-- cephadm.shell:
- mon.a:
- - ceph fs volume create foo
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
-
-- ceph.healthy:
-
-- print: "**** upgrading first half of cluster, with stress ****"
-- parallel:
- - first-half-tasks
- - first-half-sequence
-- print: "**** done upgrading first half of cluster ****"
-
-- ceph.healthy:
-
-- print: "**** applying stress + thrashing to mixed-version cluster ****"
-
-- parallel:
- - stress-tasks
-
-- ceph.healthy:
-
-- print: "**** finishing upgrade ****"
-- parallel:
- - second-half-tasks
- - second-half-sequence
-
-- ceph.healthy:
-
-
-#################
-
-first-half-sequence:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
- - ceph config set global log_to_journald false --force
-
- - echo wait for mgr daemons to upgrade
- # upgrade the mgr daemons first
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
-
- - echo wait for minority of mons to upgrade
- # upgrade 1 of 3 mon daemons, then wait 60 seconds
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
- - sleep 60
-
- - echo wait for majority of mons to upgrade
- # upgrade one more mon daemon (to get us to 2/3 upgraded) and wait 60 seconds
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
- - sleep 60
-
- - echo wait for all mons to upgrade
- # upgrade final mon daemon and wait 60 seconds
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
- - sleep 60
-
- - echo wait for half of osds to upgrade
- # upgrade 4 of the 8 OSDs
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 4
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
-
-
-#################
-
-stress-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- chance_thrash_cluster_full: 0
- chance_thrash_pg_upmap: 0
- chance_thrash_pg_upmap_items: 0
- disable_objectstore_tool_tests: true
- chance_force_recovery: 0
- aggressive_pg_num_changes: false
-
-
-#################
-
-second-half-sequence:
- sequential:
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - sleep 60
-
- - echo wait for upgrade to complete
- # upgrade whatever is left
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
-
- - echo upgrade complete
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-first-half-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-first-half-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - cls/test_cls_rbd.sh
- env:
- CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-first-half-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-first-half-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "61"
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-first-half-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-first-half-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-stress-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic cls tests for rbd
-stress-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - cls/test_cls_rbd.sh
- env:
- CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
-- print: "**** done cls/test_cls_rbd.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-stress-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-stress-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - rbd/test_librbd.sh
- env:
- RBD_FEATURES: "61"
-- print: "**** done rbd/test_librbd.sh 7-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool,
- using only reads, writes, and deletes
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 500
- write_append_excl: false
- op_weights:
- read: 45
- write: 45
- delete: 10
-- print: "**** done rados/readwrite 5-workload"
+++ /dev/null
-meta:
-- desc: |
- randomized correctness test for rados operations on a replicated pool with snapshot operations
-stress-tasks:
-- full_sequential:
- - rados:
- clients: [client.0]
- ops: 4000
- objects: 50
- write_append_excl: false
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
-- print: "**** done rados/snaps-few-objects 5-workload"
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-meta:
-- desc: |
- run randomized correctness test for rados operations
- generate write load with rados bench
-second-half-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
- - radosbench:
- clients: [client.0]
- time: 90
-- print: "**** done end radosbench.yaml"
+++ /dev/null
-meta:
-- desc: |
- run basic import/export cli tests for rbd
-second-half-tasks:
-- workunit:
- branch: reef
- clients:
- client.0:
- - rbd/import_export.sh
- env:
- RBD_CREATE_ARGS: --new-format
-- print: "**** done rbd/import_export.sh 5-workload"
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - MDS_ALL_DOWN
- - MDS_UP_LESS_THAN_MAX
- - OSD_SLOW_PING_TIME
- - reached quota
- - running out of quota
- - overall HEALTH_
- - CACHE_POOL_NO_HIT_SET
- - pool\(s\) full
- - POOL_FULL
- - SMALLER_PGP_NUM
- - SLOW_OPS
- - CACHE_POOL_NEAR_FULL
- - OBJECT_MISPLACED
- - slow request
- - noscrub
- - nodeep-scrub
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/supported-container-hosts
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - mds.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mds.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- create_rbd_pool: true
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - mons down
- - mon down
- - MON_DOWN
- - out of quorum
- - PG_
- - OSD_DOWN
- - POOL_APP_NOT_ENABLED
- - pgs degraded
- - pg degraded
- - object degraded
- - objects degraded
- - FS_DEGRADED
- - MDS_ALL_DOWN
- - OSD_UPGRADE_FINISHED
- - do not have an application enabled
- - is down
- - TELEMETRY_CHANGED
- - pg .*? is .*?degraded.*?, acting
- - pg .* is stuck peering
- - filesystem is degraded
- - filesystem is offline
- - osds down
- - osd down
- - OBJECT_UNFOUND
-tasks:
-- install:
- branch: reef
- exclude_packages:
- - ceph-volume
-- print: "**** done install task..."
-- print: "**** done start installing reef cephadm ..."
-- cephadm:
- image: quay.ceph.io/ceph-ci/ceph:reef
- compiled_cephadm_branch: reef
- conf:
- osd:
- #set config option for which cls modules are allowed to be loaded / used
- osd_class_load_list: "*"
- osd_class_default_list: "*"
-- print: "**** done end installing reef cephadm ..."
-
-- print: "**** done start cephadm.shell ceph config set mgr..."
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest true --force
-- print: "**** done cephadm.shell ceph config set mgr..."
-
-- print: "**** done start telemetry reef..."
-- workunit:
- clients:
- client.0:
- - test_telemetry_reef.sh
-- print: "**** done end telemetry reef..."
-
-- print: "**** done start upgrade sequence..."
-- sequential:
- - print: "**** done start upgrade..."
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph config set global log_to_journald false --force
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
- - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
- - print: "**** done end upgrade..."
-
- - print: "**** done set require-osd-release ..."
- - cephadm.shell:
- env: [sha1]
- mon.a:
- - ceph osd require-osd-release tentacle
-
- - print: "**** done start telemetry x..."
- - workunit:
- clients:
- client.0:
- - test_telemetry_reef_x.sh
- - print: "**** done end telemetry x..."
-- print: "**** done end upgrade sequence..."
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-container-hosts
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mds.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: true
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - mons down
+ - mon down
+ - MON_DOWN
+ - out of quorum
+ - PG_
+ - OSD_DOWN
+ - POOL_APP_NOT_ENABLED
+ - pgs degraded
+ - pg degraded
+ - object degraded
+ - objects degraded
+ - FS_DEGRADED
+ - MDS_ALL_DOWN
+ - OSD_UPGRADE_FINISHED
+ - do not have an application enabled
+ - is down
+ - TELEMETRY_CHANGED
+ - pg .*? is .*?degraded.*?, acting
+ - pg .* is stuck peering
+ - filesystem is degraded
+ - filesystem is offline
+ - osds down
+ - osd down
+ - OBJECT_UNFOUND
+tasks:
+- install:
+ branch: tentacle
+ exclude_packages:
+ - ceph-volume
+- print: "**** done install task..."
+- print: "**** done start installing tentacle cephadm ..."
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:tentacle
+ compiled_cephadm_branch: tentacle
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing tentacle cephadm ..."
+
+- print: "**** done start cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ mon.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+
+- print: "**** done start telemetry tentacle..."
+- workunit:
+ clients:
+ client.0:
+ - test_telemetry_tentacle.sh
+- print: "**** done end telemetry tentacle..."
+
+- print: "**** done start upgrade sequence..."
+- sequential:
+ - print: "**** done start upgrade..."
+ - cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - ceph config set global log_to_journald false --force
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
+ - print: "**** done end upgrade..."
+
+ - print: "**** done set require-osd-release ..."
+ - cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - ceph osd require-osd-release tentacle
+
+ - print: "**** done start telemetry x..."
+ - workunit:
+ clients:
+ client.0:
+ - test_telemetry_tentacle_x.sh
+ - print: "**** done end telemetry x..."
+- print: "**** done end upgrade sequence..."
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-container-hosts
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mds.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: true
+ conf:
+ osd:
+ osd shutdown pgref assert: true
+ log-ignorelist:
+ - do not have an application enabled
+ - application not enabled
+ - or freeform for custom applications
+ - POOL_APP_NOT_ENABLED
+ - is down
+ - OSD_DOWN
+ - mons down
+ - mon down
+ - MON_DOWN
+ - out of quorum
+ - PG_AVAILABILITY
+ - PG_DEGRADED
+ - Reduced data availability
+ - Degraded data redundancy
+ - pg .* is stuck inactive
+ - pg .* is .*degraded
+ - FS_DEGRADED
+ - OSDMAP_FLAGS
+ - OSD_UPGRADE_FINISHED
+ - filesystem is degraded
+ - filesystem is offline
+ - osds down
+ - osd down
+ - OBJECT_UNFOUND
+ - PG_DEGRADED
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - Telemetry requires re-opt-in
+ - telemetry module includes new collections
+tasks:
+- install:
+ branch: tentacle
+ exclude_packages:
+ - ceph-osd-classic
+ - ceph-volume
+ - ceph-osd-crimson
+- print: "**** done install task..."
+- print: "**** done start installing tentacle cephadm ..."
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:tentacle
+ compiled_cephadm_branch: tentacle
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing tentacle cephadm ..."
+
+- print: "**** done start cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ mon.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+
+- print: "**** done start telemetry tentacle..."
+- workunit:
+ clients:
+ client.0:
+ - test_telemetry_tentacle.sh
+- print: "**** done end telemetry tentacle..."
+
+- print: "**** done start parallel"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done end parallel"
+
+- print: "**** done start telemetry x..."
+- workunit:
+ clients:
+ client.0:
+ - test_telemetry_tentacle_x.sh
+- print: "**** done end telemetry x..."
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MDS_ALL_DOWN\)
+ - \(MDS_UP_LESS_THAN_MAX\)
+ - \(OSD_SLOW_PING_TIME
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(SLOW_OPS\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ - \(OBJECT_MISPLACED\)
+ - slow request
+ - \(MON_DOWN\)
+ - noscrub
+ - nodeep-scrub
+ - osds down
--- /dev/null
+# renamed tasks: to upgrade-sequence:
+upgrade-sequence:
+ sequential:
+ - print: "**** done start upgrade, wait"
+ - cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - ceph config set global log_to_journald false --force
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
+ - print: "**** done end upgrade, wait..."
+
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run run randomized correctness test for rados operations
+ on an erasure-coded pool
+workload:
+ full_sequential:
+ - print: "**** done start ec-rados-default.yaml"
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done end ec-rados-default.yaml"
--- /dev/null
+meta:
+- desc: |
+ object class functional tests
+workload:
+ full_sequential:
+ - print: "**** done start rados_api.yaml"
+ - workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
+ - print: "**** done end rados_api.yaml"
--- /dev/null
+meta:
+- desc: |
+ generate read/write load with rados objects ranging from 1MB to 25MB
+workload:
+ full_sequential:
+ - print: "**** done start rados_loadgenbig.yaml"
+ - workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rados/load-gen-big.sh
+ - print: "**** done end rados_loadgenbig.yaml"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+workload:
+ full_sequential:
+ - print: "**** done start rbd_import_export.yaml"
+ - workunit:
+ branch: tentacle
+ clients:
+ client.1:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+ - print: "**** done end rbd_import_export.yaml"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+ full_sequential:
+ - print: "**** done start test_rbd_api.yaml"
+ - workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
+ - print: "**** done end test_rbd_api.yaml"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+overrides:
+ install:
+ ceph:
+ extra_system_packages:
+ - python3-pytest
+workload:
+ full_sequential:
+ - print: "**** done start test_rbd_python.yaml"
+ - workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ env:
+ RBD_FEATURES: "61"
+ - print: "**** done end test_rbd_python.yaml"
+
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/distros/supported-container-hosts
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: true
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - do not have an application enabled
+ - application not enabled
+ - or freeform for custom applications
+ - POOL_APP_NOT_ENABLED
+ - is down
+ - OSD_DOWN
+ - mons down
+ - mon down
+ - MON_DOWN
+ - out of quorum
+ - PG_AVAILABILITY
+ - PG_DEGRADED
+ - Reduced data availability
+ - Degraded data redundancy
+ - pg .* is stuck inactive
+ - pg .* is .*degraded
+ - FS_DEGRADED
+ - OSDMAP_FLAGS
+ - OSD_UPGRADE_FINISHED
+ - Replacing daemon mds
+ - MDS_ALL_DOWN
+ - MDS_UP_LESS_THAN_MAX
+ - filesystem is offline
+ - with fewer MDS than max_mds
+ - filesystem is degraded
+ - osds down
+ - osd down
+ - OBJECT_UNFOUND
+tasks:
+- install:
+ branch: tentacle
+ exclude_packages:
+ - ceph-osd-classic
+ - ceph-volume
+ - ceph-osd-crimson
+
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:tentacle
+ compiled_cephadm_branch: tentacle
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+
+- cephadm.shell:
+ mon.a:
+ - ceph fs volume create foo
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
+
+- ceph.healthy:
+
+- print: "**** upgrading first half of cluster, with stress ****"
+- parallel:
+ - first-half-tasks
+ - first-half-sequence
+- print: "**** done upgrading first half of cluster ****"
+
+- ceph.healthy:
+
+- print: "**** applying stress + thrashing to mixed-version cluster ****"
+
+- parallel:
+ - stress-tasks
+
+- ceph.healthy:
+
+- print: "**** finishing upgrade ****"
+- parallel:
+ - second-half-tasks
+ - second-half-sequence
+
+- ceph.healthy:
+
+
+#################
+
+first-half-sequence:
+- cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - ceph config set mgr mgr/cephadm/daemon_cache_timeout 60
+ - ceph config set global log_to_journald false --force
+
+ - echo wait for mgr daemons to upgrade
+ # upgrade the mgr daemons first
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mgr
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+
+ - echo wait for minority of mons to upgrade
+ # upgrade 1 of 3 mon daemons, then wait 60 seconds
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+ - sleep 60
+
+ - echo wait for majority of mons to upgrade
+ # upgrade one more mon daemon (to get us to 2/3 upgraded) and wait 60 seconds
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon --limit 1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+ - sleep 60
+
+ - echo wait for all mons to upgrade
+ # upgrade final mon daemon and wait 60 seconds
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types mon
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+ - sleep 60
+
+ - echo wait for half of osds to upgrade
+ # upgrade 4 of the 8 OSDs
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 --daemon-types osd --limit 4
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+
+
+#################
+
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+ aggressive_pg_num_changes: false
+
+
+#################
+
+second-half-sequence:
+ sequential:
+ - cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - sleep 60
+
+ - echo wait for upgrade to complete
+ # upgrade whatever is left
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+ - while ceph orch upgrade status | jq '.in_progress' | grep true && ! ceph orch upgrade status | jq '.message' | grep Error ; do ceph orch ps ; ceph versions ; ceph orch upgrade status ; sleep 30 ; done
+
+ - echo upgrade complete
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+first-half-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done end radosbench.yaml"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+first-half-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ env:
+ CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+first-half-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+first-half-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+first-half-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+first-half-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done end radosbench.yaml"
--- /dev/null
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ env:
+ CLS_RBD_GTEST_FILTER: '-TestClsRbd.group_snap_set:TestClsRbd.group_snap_remove'
+- print: "**** done cls/test_cls_rbd.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+stress-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+ env:
+ RBD_FEATURES: "61"
+- print: "**** done rbd/test_librbd.sh 7-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 5-workload"
--- /dev/null
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 5-workload"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+second-half-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done end radosbench.yaml"
--- /dev/null
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+second-half-tasks:
+- workunit:
+ branch: tentacle
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 5-workload"
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MDS_ALL_DOWN\)
+ - \(MDS_UP_LESS_THAN_MAX\)
+ - \(OSD_SLOW_PING_TIME
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(SLOW_OPS\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ - \(OBJECT_MISPLACED\)
+ - slow request
+ - \(MON_DOWN\)
+ - noscrub
+ - nodeep-scrub
+++ /dev/null
-#!/bin/bash -ex
-
-# Set up ident details for cluster
-ceph config set mgr mgr/telemetry/channel_ident true
-ceph config set mgr mgr/telemetry/organization 'ceph-qa'
-ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
-
-#Run preview commands
-ceph telemetry preview
-ceph telemetry preview-device
-ceph telemetry preview-all
-
-# Opt in to new collections right away to avoid "TELEMETRY_CHANGED"
-# warning (see https://tracker.ceph.com/issues/64458)
-ceph telemetry on --license sharing-1-0
-ceph telemetry enable channel perf
-
-# The last_opt_revision remains at 1 since last_opt_revision
-# was phased out for fresh installs of quincy.
-LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
-if [ $LAST_OPT_REVISION -ne 1 ]; then
- echo "last_opt_revision is incorrect"
- exit 1
-fi
-
-# Check the warning:
-ceph -s
-
-# Verify collections
-REPORTED_COLLECTIONS=$(ceph telemetry collection ls)
-NUM_REPORTED_COLLECTIONS=$(echo "$REPORTED_COLLECTIONS" | awk '/^NAME/ {flag=1; next} flag' | wc -l)
-KNOWN_COLLECTIONS=("basic_base" "basic_mds_metadata" "basic_pool_options_bluestore"
- "basic_pool_usage" "basic_rook_v01" "basic_usage_by_class" "crash_base"
- "device_base" "ident_base" "perf_memory_metrics" "perf_perf")
-
-if ! [[ $NUM_REPORTED_COLLECTIONS == "${#KNOWN_COLLECTIONS[@]}" ]];
-then
- echo "Number of reported collections ($NUM_REPORTED_COLLECTIONS) does not match KNOWN_COLLECTIONS ("${#KNOWN_COLLECTIONS[@]}")."
- exit 1
-fi
-
-for col in ${KNOWN_COLLECTIONS[@]}; do
- if ! [[ $REPORTED_COLLECTIONS == *$col* ]];
- then
- echo "COLLECTIONS does not contain" "'"$col"'."
- exit 1
- fi
-done
-
-#Run preview commands
-ceph telemetry preview
-ceph telemetry preview-device
-ceph telemetry preview-all
-
-# Run show commands
-ceph telemetry show
-ceph telemetry show-device
-ceph telemetry show-all
-
-echo OK
+++ /dev/null
-#!/bin/bash -ex
-
-# Opt in to new collections right away to avoid "TELEMETRY_CHANGED"
-# warning (see https://tracker.ceph.com/issues/64458)
-ceph telemetry on --license sharing-1-0
-
-# The last_opt_revision remains at 1 since last_opt_revision
-# was phased out for fresh installs of quincy.
-LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
-if [ $LAST_OPT_REVISION -ne 1 ]; then
- echo "last_opt_revision is incorrect"
- exit 1
-fi
-
-# Check the warning:
-ceph -s
-
-# Verify collections
-REPORTED_COLLECTIONS=$(ceph telemetry collection ls)
-NUM_REPORTED_COLLECTIONS=$(echo "$REPORTED_COLLECTIONS" | awk '/^NAME/ {flag=1; next} flag' | wc -l)
-KNOWN_COLLECTIONS=("basic_base" "basic_mds_metadata" "basic_pool_flags" "basic_pool_options_bluestore"
- "basic_pool_usage" "basic_rook_v01" "basic_usage_by_class" "crash_base" "device_base"
- "ident_base" "perf_memory_metrics" "perf_perf" "basic_stretch_cluster")
-
-if ! [[ $NUM_REPORTED_COLLECTIONS == "${#KNOWN_COLLECTIONS[@]}" ]];
-then
- echo "Number of reported collections ($NUM_REPORTED_COLLECTIONS) does not match KNOWN_COLLECTIONS ("${#KNOWN_COLLECTIONS[@]}")."
- exit 1
-fi
-
-for col in ${KNOWN_COLLECTIONS[@]}; do
- if ! [[ $REPORTED_COLLECTIONS == *$col* ]];
- then
- echo "COLLECTIONS does not contain" "'"$col"'."
- exit 1
- fi
-done
-
-#Run preview commands
-ceph telemetry preview
-ceph telemetry preview-device
-ceph telemetry preview-all
-
-# Run show commands
-ceph telemetry show
-ceph telemetry show-device
-ceph telemetry show-all
-
-# Opt out
-ceph telemetry off
-
-echo OK
REPORTED_COLLECTIONS=$(ceph telemetry collection ls)
NUM_REPORTED_COLLECTIONS=$(echo "$REPORTED_COLLECTIONS" | awk '/^NAME/ {flag=1; next} flag' | wc -l)
KNOWN_COLLECTIONS=("basic_base" "basic_mds_metadata" "basic_pool_flags" "basic_pool_options_bluestore"
- "basic_pool_usage" "basic_rook_v01" "basic_usage_by_class" "crash_base"
+ "basic_pool_usage" "basic_rook_v01" "basic_stretch_cluster" "basic_usage_by_class" "crash_base"
"device_base" "ident_base" "perf_memory_metrics" "perf_perf")
if ! [[ $NUM_REPORTED_COLLECTIONS == "${#KNOWN_COLLECTIONS[@]}" ]];
--- /dev/null
+#!/bin/bash -ex
+
+# Set up ident details for cluster
+ceph config set mgr mgr/telemetry/channel_ident true
+ceph config set mgr mgr/telemetry/organization 'ceph-qa'
+ceph config set mgr mgr/telemetry/description 'upgrade test cluster'
+
+#Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Opt in to new collections right away to avoid "TELEMETRY_CHANGED"
+# warning (see https://tracker.ceph.com/issues/64458)
+ceph telemetry on --license sharing-1-0
+ceph telemetry enable channel perf
+
+# The last_opt_revision remains at 1 since last_opt_revision
+# was phased out for fresh installs of quincy.
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 1 ]; then
+ echo "last_opt_revision is incorrect"
+ exit 1
+fi
+
+# Check the warning:
+ceph -s
+
+# Verify collections
+REPORTED_COLLECTIONS=$(ceph telemetry collection ls)
+NUM_REPORTED_COLLECTIONS=$(echo "$REPORTED_COLLECTIONS" | awk '/^NAME/ {flag=1; next} flag' | wc -l)
+KNOWN_COLLECTIONS=("basic_base" "basic_mds_metadata" "basic_pool_flags" "basic_pool_options_bluestore"
+ "basic_pool_usage" "basic_rook_v01" "basic_usage_by_class" "crash_base"
+ "device_base" "ident_base" "perf_memory_metrics" "perf_perf" "basic_stretch_cluster")
+
+if ! [[ $NUM_REPORTED_COLLECTIONS == "${#KNOWN_COLLECTIONS[@]}" ]];
+then
+ echo "Number of reported collections ($NUM_REPORTED_COLLECTIONS) does not match KNOWN_COLLECTIONS ("${#KNOWN_COLLECTIONS[@]}")."
+ exit 1
+fi
+
+for col in ${KNOWN_COLLECTIONS[@]}; do
+ if ! [[ $REPORTED_COLLECTIONS == *$col* ]];
+ then
+ echo "COLLECTIONS does not contain" "'"$col"'."
+ exit 1
+ fi
+done
+
+#Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Run show commands
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show-all
+
+echo OK
--- /dev/null
+#!/bin/bash -ex
+
+# Opt in to new collections right away to avoid "TELEMETRY_CHANGED"
+# warning (see https://tracker.ceph.com/issues/64458)
+ceph telemetry on --license sharing-1-0
+
+# The last_opt_revision remains at 1 since last_opt_revision
+# was phased out for fresh installs of quincy.
+LAST_OPT_REVISION=$(ceph config get mgr mgr/telemetry/last_opt_revision)
+if [ $LAST_OPT_REVISION -ne 1 ]; then
+ echo "last_opt_revision is incorrect"
+ exit 1
+fi
+
+# Check the warning:
+ceph -s
+
+# Verify collections
+REPORTED_COLLECTIONS=$(ceph telemetry collection ls)
+NUM_REPORTED_COLLECTIONS=$(echo "$REPORTED_COLLECTIONS" | awk '/^NAME/ {flag=1; next} flag' | wc -l)
+KNOWN_COLLECTIONS=("basic_base" "basic_mds_metadata" "basic_pool_flags" "basic_pool_options_bluestore"
+ "basic_pool_usage" "basic_rook_v01" "basic_usage_by_class" "crash_base" "device_base"
+ "ident_base" "perf_memory_metrics" "perf_perf" "basic_stretch_cluster")
+
+if ! [[ $NUM_REPORTED_COLLECTIONS == "${#KNOWN_COLLECTIONS[@]}" ]];
+then
+ echo "Number of reported collections ($NUM_REPORTED_COLLECTIONS) does not match KNOWN_COLLECTIONS ("${#KNOWN_COLLECTIONS[@]}")."
+ exit 1
+fi
+
+for col in ${KNOWN_COLLECTIONS[@]}; do
+ if ! [[ $REPORTED_COLLECTIONS == *$col* ]];
+ then
+ echo "COLLECTIONS does not contain" "'"$col"'."
+ exit 1
+ fi
+done
+
+#Run preview commands
+ceph telemetry preview
+ceph telemetry preview-device
+ceph telemetry preview-all
+
+# Run show commands
+ceph telemetry show
+ceph telemetry show-device
+ceph telemetry show-all
+
+# Opt out
+ceph telemetry off
+
+echo OK