--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
--- /dev/null
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client 0,1,2 third node.
+ Use xfs beneath the osds.
+ CephFS tests running on client 2,3
+roles:
+- - mon.a
+ - mgr.x
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - mon.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - mon.c
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+- - client.0
+ - client.1
+ - client.2
+ - client.3
+overrides:
+ ceph:
+ log-whitelist:
+ - scrub mismatch
+ - ScrubResult
+ - wrongly marked
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SLOW_OPS\)
+ - overall HEALTH_
+ - slow request
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+ osd:
+ osd class load list: "*"
+ osd class default list: "*"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ install ceph/octopus latest
+ run workload and upgrade-sequence in parallel
+ upgrade the client node
+tasks:
+- install:
+ exclude_packages:
+ - ceph-mgr-cephadm
+ - cephadm
+ - libcephfs-dev
+ branch: octopus
+- print: "**** done installing octopus"
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(FS_
+ - \(MDS_
+ - \(OSD_
+ - \(MON_DOWN\)
+ - \(CACHE_POOL_
+ - \(POOL_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - Monitor daemon marked osd
+ - Behind on trimming
+ - Manager daemon
+ conf:
+ global:
+ mon warn on pool no app: false
+ bluestore_warn_on_legacy_statfs: false
+ bluestore warn on no per pool omap: false
+ mon pg warn min per osd: 0
+- exec:
+ osd.0:
+ - ceph osd set-require-min-compat-client octopus
+- print: "**** done ceph"
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
+- print: "**** done install.upgrade non-client hosts"
+- rgw:
+ - client.1
+- print: "**** done => started rgw client.1"
+- parallel:
+ - workload
+ - upgrade-sequence
+- print: "**** done parallel"
+- install.upgrade:
+ client.0:
+- print: "**** done install.upgrade on client.0"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd min pg log entries: 1
+ osd max pg log entries: 2
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ rgw ragweed prepare
+workload:
+ full_sequential:
+ - sequential:
+ - ragweed:
+ client.1:
+ default-branch: ceph-octopus
+ rgw_server: client.1
+ stages: prepare
+ - print: "**** done rgw ragweed prepare 2-workload"
--- /dev/null
+meta:
+- desc: |
+ librbd python api tests
+workload:
+ full_sequential:
+ - workunit:
+ branch: octopus
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+ - print: "**** done rbd/test_librbd_python.sh 2-workload"
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mon.a, mon.b, mon.c, mgr.x]
+ mon-health-to-clog: false
+ wait-for-healthy: false
+ - exec:
+ mon.a:
+ - ceph config set global mon_warn_on_msgr2_not_enabled false
+ - ceph.healthy:
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - ceph.restart:
+ daemons: [mds.a, rgw.*]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+ - print: "**** done ceph.restart all"
--- /dev/null
+meta:
+- desc: |
+ upgrade the ceph cluster,
+ upgrate in two steps
+ step one ordering: mon.a, osd.0, osd.1, mds.a
+ step two ordering: mon.b, mon.c, osd.2, osd.3
+ step three ordering: client.1
+ ceph expected to be healthy state after each step
+upgrade-sequence:
+ sequential:
+ - ceph.restart:
+ daemons: [mgr.x, mon.a]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.b]
+ wait-for-healthy: true
+ mon-health-to-clog: false
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+ mon-health-to-clog: false
+ - ceph.healthy:
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.0, osd.1, osd.2, osd.3]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.4, osd.5, osd.6, osd.7]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [osd.8, osd.9, osd.10, osd.11]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
+ - ceph.restart:
+ daemons: [rgw.*]
+ wait-for-healthy: true
+ - sleep:
+ duration: 60
--- /dev/null
+.qa/releases/pacific.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ rgw:
+ frontend: civetweb
+tasks:
+ - sequential:
+ - rgw-final-workload
+ - print: "**** done rgw 4-final-workload"
--- /dev/null
+meta:
+- desc: |
+ ragweed check for rgw
+rgw-final-workload:
+ full_sequential:
+ - ragweed:
+ client.1:
+ default-branch: ceph-master
+ rgw_server: client.1
+ stages: check
+ - print: "**** done ragweed check 4-final-workload"
--- /dev/null
+.qa/distros/supported/ubuntu_latest.yaml
\ No newline at end of file