--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+roles:
+# 3 osd roles on host.a is required for cephadm task. It checks if the cluster is healthy.
+# More daemons will be deployed on both hosts in e2e tests.
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mon.a
+ - mgr.a
+ - client.0
+- - host.b
+ - client.1
+tasks:
+- install:
+- cephadm:
+- workunit:
+ clients:
+ client.1:
+ - cephadm/create_iscsi_disks.sh
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_dashboard_e2e.sh
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+.qa/clusters/2-node-mgr.yaml
\ No newline at end of file
--- /dev/null
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-ignorelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(DEVICE_IDENT_ON\)
+ - \(DEVICE_FAULT_ON\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_orchestrator_cli
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ roleless: true
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+- - host.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch host label add `hostname` foo
+ - ceph auth get-or-create client.foo mon 'allow r'
+ - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222
+- exec:
+ host.a:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+ - test -e /etc/ceph/ceph.conf
+- exec:
+ host.b:
+ - test ! -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.b:
+ - ceph orch host label add `hostname` foo
+- exec:
+ host.b:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+- cephadm.shell:
+ host.b:
+ - ceph orch host label rm `hostname` foo
+- exec:
+ host.b:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+- exec:
+ host.a:
+ - test -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.a:
+ - ceph orch client-keyring rm client.foo
+- exec:
+ host.a:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph osd pool create foo
+ - rbd pool init foo
+ - ceph orch apply iscsi foo u p
+- cephadm.wait_for_service:
+ service: iscsi.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rbd-mirror "--placement=*"
+ - ceph orch apply cephfs-mirror "--placement=*"
+- cephadm.wait_for_service:
+ service: rbd-mirror
+- cephadm.wait_for_service:
+ service: cephfs-mirror
--- /dev/null
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# deploy rgw + ingress
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+ - service_type: ingress
+ service_id: rgw.foo
+ placement:
+ count: 2
+ spec:
+ backend_service: rgw.foo
+ frontend_port: 9000
+ monitor_port: 9001
+ virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
+- cephadm.wait_for_service:
+ service: rgw.foo
+- cephadm.wait_for_service:
+ service: ingress.rgw.foo
+
+# take each component down in turn and ensure things still work
+- cephadm.shell:
+ host.a:
+ - |
+ echo "Check while healthy..."
+ curl http://{{VIP0}}:9000/
+
+ # stop each rgw in turn
+ echo "Check with each rgw stopped in turn..."
+ for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $rgw
+ while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $rgw
+ while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done
+ done
+
+ # stop each haproxy in turn
+ echo "Check with each haproxy down in turn..."
+ for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $haproxy
+ while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $haproxy
+ while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
+ done
+
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
--- /dev/null
+tasks:
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count_per_host: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+- cephadm.wait_for_service:
+ service: rgw.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ roleless: true
+ single_host_defaults: true
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+tasks:
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count_per_host: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+- cephadm.wait_for_service:
+ service: rgw.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/rhel_8.3_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - ceph.rgw.foo.a
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+ - ceph orch ls --format yaml
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/overrides/2-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+.qa/overrides/3-size-2-min-size.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ ms type: async
+ ms bind msgr2: false
+tasks:
+- install:
+ branch: luminous
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - cephadm
+ - ceph-immutable-object-cache
+ - ceph-base
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ - librados-devel
+ extra_packages:
+ - librados2
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ mon warn on msgr2 not enabled: false
+tasks:
+- install:
+ branch: luminous
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - cephadm
+ - ceph-immutable-object-cache
+ - ceph-base
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ - librados-devel
+ extra_packages:
+ - librados2
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ ms type: async
+ ms bind msgr2: false
+tasks:
+- install:
+ branch: mimic
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - cephadm
+ - ceph-immutable-object-cache
+ - ceph-base
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ - librados-devel
+ extra_packages:
+ - librados2
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ ms type: async
+ mon warn on msgr2 not enabled: false
+tasks:
+- install:
+ branch: mimic
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - cephadm
+ - ceph-immutable-object-cache
+ - ceph-base
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ - librados-devel
+ extra_packages:
+ - librados2
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ mon_bind_msgr2: false
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ ms type: async
+ ms bind msgr2: false
+tasks:
+- install:
+ branch: nautilus
+ exclude_packages:
+ - cephadm
+ - ceph-mgr-cephadm
+ - ceph-immutable-object-cache
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ extra_packages:
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MON_DOWN\)
+ conf:
+ global:
+ ms type: async
+ ms bind msgr2: true
+ ms bind msgr1: false
+tasks:
+- install:
+ branch: nautilus
+ exclude_packages:
+ - cephadm
+ - ceph-mgr-cephadm
+ - ceph-immutable-object-cache
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ extra_packages:
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MON_DOWN\)
+tasks:
+- install:
+ branch: nautilus
+ exclude_packages:
+ - cephadm
+ - ceph-mgr-cephadm
+ - ceph-immutable-object-cache
+ - python3-rados
+ - python3-rgw
+ - python3-rbd
+ - python3-cephfs
+ extra_packages:
+ - python-rados
+ - python-rgw
+ - python-rbd
+ - python-cephfs
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(MON_DOWN\)
+tasks:
+- install:
+ branch: octopus
+ exclude_packages:
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-rook
+ - ceph-mgr-cephadm
+ - ceph-base-debuginfo
+ - ceph-common-debuginfo
+ - ceph-immutable-object-cache-debuginfo
+ - ceph-radosgw-debuginfo
+ - ceph-test-debuginfo
+ - ceph-base-debuginfo
+ - ceph-mgr-debuginfo
+ - ceph-mds-debuginfo
+ - ceph-mon-debuginfo
+ - ceph-osd-debuginfo
+ - ceph-fuse-debuginfo
+ - librados-devel-debuginfo
+ - libcephfs2-debuginfo
+ - librados2-debuginfo
+ - librbd1-debuginfo
+ - python3-cephfs-debuginfo
+ - python3-rados-debuginfo
+ - python3-rbd-debuginfo
+ - python3-rgw-debuginfo
+ - rbd-fuse-debuginfo
+ - rbd-mirror-debuginfo
+ - rbd-nbd-debuginfo
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
--- /dev/null
+# Don't verify os + flavor + sha1
+verify_ceph_hash: false
+tasks:
+- cephadm:
+ conf:
+ mon:
+ auth allow insecure global id reclaim: true
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
--- /dev/null
+roles:
+- [mon.a, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
+- [mon.c, osd.8, osd.9, osd.10, osd.11, client.2]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
+ mon client directed command retry: 5
+ log-ignorelist:
+ - \(OSD_SLOW_PING_TIME
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ mon client directed command retry: 5
+ osd:
+ osd heartbeat use min delay socket: true
+ log-ignorelist:
+ - \(OSD_SLOW_PING_TIME
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ mon client directed command retry: 5
+ log-ignorelist:
+ - \(OSD_SLOW_PING_TIME
--- /dev/null
+.qa/config/rados.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ aggressive_pg_num_changes: false
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
--- /dev/null
+.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.2]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ client.2:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ clients: [client.2]
+ time: 90
--- /dev/null
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+- exec:
+ client.2:
+ - ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents:TestClsRbd.mirror
--- /dev/null
+tasks:
+- rados:
+ clients: [client.2]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
--- /dev/null
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+- workunit:
+ clients:
+ client.2:
+ - rbd/test_librbd.sh
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../workunits/0-distro
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ osd delete sleep: 1
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/rados_api_tests.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/radosbench.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/small-objects.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
\ No newline at end of file
--- /dev/null
+../smoke/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+.qa/msgr
\ No newline at end of file
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: root
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+os_type: centos
+os_version: "8.3"
+overrides:
+ selinux:
+ whitelist:
+ - scontext=system_u:system_r:logrotate_t:s0
+
+tasks:
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:octopus
+ cephadm_branch: octopus
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
--- /dev/null
+os_type: ubuntu
+os_version: "20.04"
+
+tasks:
+- cephadm:
+ image: docker.io/ceph/ceph:v15.2.9
+ cephadm_branch: v15.2.9
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
--- /dev/null
+os_type: ubuntu
+os_version: "20.04"
+
+tasks:
+- cephadm:
+ image: docker.io/ceph/ceph:v15.2.0
+ cephadm_branch: v15.2.0
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+# - ceph.iscsi.iscsi.a # needs later start point
--- /dev/null
+tasks:
+- cephadm.shell:
+ mon.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest false --force
--- /dev/null
+tasks:
+- cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - radosgw-admin realm create --rgw-realm=r --default
+ - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
+ - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
+ - radosgw-admin period update --rgw-realm=r --commit
+ - ceph orch apply rgw r z --placement=2 --port=8000
+ - sleep 120
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
--- /dev/null
+tasks:
+- cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - echo "wait for servicemap items w/ changing names to refresh"
+ - sleep 60
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../smoke/distro/
\ No newline at end of file
--- /dev/null
+../smoke/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: cephadm-package
+ install:
+ extra_packages: [cephadm]
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: root
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+.qa/msgr
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/basic/tasks/rados_api_tests.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/basic/tasks/rados_python.yaml
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- install:
+- exec:
+ mon.a:
+ - yum install -y python3 || apt install -y python3
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_adoption.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- install:
+- exec:
+ mon.a:
+ - yum install -y python3 || apt install -y python3
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_cephadm.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- workunit:
+ no_coverage_and_limits: true
+ clients:
+ client.0:
+ - cephadm/test_repos.sh
--- /dev/null
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mon.a
+ - mgr.a
+ - client.0
+tasks:
+- install:
+- cephadm:
+- cephadm.shell:
+ host.a:
+ - ceph orch apply mds a
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_nfs
+ - tasks.cephadm_cases.test_cli
--- /dev/null
+../orch/cephadm
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-roles:
-# 3 osd roles on host.a is required for cephadm task. It checks if the cluster is healthy.
-# More daemons will be deployed on both hosts in e2e tests.
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - mon.a
- - mgr.a
- - client.0
-- - host.b
- - client.1
-tasks:
-- install:
-- cephadm:
-- workunit:
- clients:
- client.1:
- - cephadm/create_iscsi_disks.sh
-- workunit:
- clients:
- client.0:
- - cephadm/test_dashboard_e2e.sh
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-.qa/clusters/2-node-mgr.yaml
\ No newline at end of file
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(DEVICE_IDENT_ON\)
- - \(DEVICE_FAULT_ON\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_orchestrator_cli
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- roleless: true
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-- - host.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch host label add `hostname` foo
- - ceph auth get-or-create client.foo mon 'allow r'
- - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222
-- exec:
- host.a:
- - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
- - test -e /etc/ceph/ceph.conf
-- exec:
- host.b:
- - test ! -e /etc/ceph/ceph.client.foo.keyring
-- cephadm.shell:
- host.b:
- - ceph orch host label add `hostname` foo
-- exec:
- host.b:
- - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
-- cephadm.shell:
- host.b:
- - ceph orch host label rm `hostname` foo
-- exec:
- host.b:
- - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
-- exec:
- host.a:
- - test -e /etc/ceph/ceph.client.foo.keyring
-- cephadm.shell:
- host.a:
- - ceph orch client-keyring rm client.foo
-- exec:
- host.a:
- - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph osd pool create foo
- - rbd pool init foo
- - ceph orch apply iscsi foo u p
-- cephadm.wait_for_service:
- service: iscsi.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch apply rbd-mirror "--placement=*"
- - ceph orch apply cephfs-mirror "--placement=*"
-- cephadm.wait_for_service:
- service: rbd-mirror
-- cephadm.wait_for_service:
- service: cephfs-mirror
+++ /dev/null
-tasks:
-- vip:
-
-# make sure cephadm notices the new IP
-- cephadm.shell:
- host.a:
- - ceph orch device ls --refresh
-
-# deploy rgw + ingress
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
- - service_type: ingress
- service_id: rgw.foo
- placement:
- count: 2
- spec:
- backend_service: rgw.foo
- frontend_port: 9000
- monitor_port: 9001
- virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
-- cephadm.wait_for_service:
- service: rgw.foo
-- cephadm.wait_for_service:
- service: ingress.rgw.foo
-
-# take each component down in turn and ensure things still work
-- cephadm.shell:
- host.a:
- - |
- echo "Check while healthy..."
- curl http://{{VIP0}}:9000/
-
- # stop each rgw in turn
- echo "Check with each rgw stopped in turn..."
- for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do
- ceph orch daemon stop $rgw
- while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
- ceph orch daemon start $rgw
- while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done
- done
-
- # stop each haproxy in turn
- echo "Check with each haproxy down in turn..."
- for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do
- ceph orch daemon stop $haproxy
- while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
- ceph orch daemon start $haproxy
- while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
- done
-
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+++ /dev/null
-tasks:
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count_per_host: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
-- cephadm.wait_for_service:
- service: rgw.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- roleless: true
- single_host_defaults: true
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-tasks:
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count_per_host: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
-- cephadm.wait_for_service:
- service: rgw.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/rhel_8.3_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - ceph.rgw.foo.a
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
-- cephadm.shell:
- mon.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
- - ceph orch ls --format yaml
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/overrides/2-size-2-min-size.yaml
\ No newline at end of file
+++ /dev/null
-.qa/overrides/3-size-2-min-size.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- mon_bind_msgr2: false
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- ms type: async
- ms bind msgr2: false
-tasks:
-- install:
- branch: luminous
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-rook
- - ceph-mgr-cephadm
- - cephadm
- - ceph-immutable-object-cache
- - ceph-base
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- - librados-devel
- extra_packages:
- - librados2
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- mon_bind_msgr2: false
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- mon warn on msgr2 not enabled: false
-tasks:
-- install:
- branch: luminous
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-rook
- - ceph-mgr-cephadm
- - cephadm
- - ceph-immutable-object-cache
- - ceph-base
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- - librados-devel
- extra_packages:
- - librados2
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- mon_bind_msgr2: false
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- ms type: async
- ms bind msgr2: false
-tasks:
-- install:
- branch: mimic
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-rook
- - ceph-mgr-cephadm
- - cephadm
- - ceph-immutable-object-cache
- - ceph-base
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- - librados-devel
- extra_packages:
- - librados2
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- mon_bind_msgr2: false
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- ms type: async
- mon warn on msgr2 not enabled: false
-tasks:
-- install:
- branch: mimic
- exclude_packages:
- - librados3
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-rook
- - ceph-mgr-cephadm
- - cephadm
- - ceph-immutable-object-cache
- - ceph-base
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- - librados-devel
- extra_packages:
- - librados2
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- mon_bind_msgr2: false
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- ms type: async
- ms bind msgr2: false
-tasks:
-- install:
- branch: nautilus
- exclude_packages:
- - cephadm
- - ceph-mgr-cephadm
- - ceph-immutable-object-cache
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- extra_packages:
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - \(MON_DOWN\)
- conf:
- global:
- ms type: async
- ms bind msgr2: true
- ms bind msgr1: false
-tasks:
-- install:
- branch: nautilus
- exclude_packages:
- - cephadm
- - ceph-mgr-cephadm
- - ceph-immutable-object-cache
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- extra_packages:
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - \(MON_DOWN\)
-tasks:
-- install:
- branch: nautilus
- exclude_packages:
- - cephadm
- - ceph-mgr-cephadm
- - ceph-immutable-object-cache
- - python3-rados
- - python3-rgw
- - python3-rbd
- - python3-cephfs
- extra_packages:
- - python-rados
- - python-rgw
- - python-rbd
- - python-cephfs
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - \(MON_DOWN\)
-tasks:
-- install:
- branch: octopus
- exclude_packages:
- - ceph-mgr-dashboard
- - ceph-mgr-diskprediction-local
- - ceph-mgr-rook
- - ceph-mgr-cephadm
- - ceph-base-debuginfo
- - ceph-common-debuginfo
- - ceph-immutable-object-cache-debuginfo
- - ceph-radosgw-debuginfo
- - ceph-test-debuginfo
- - ceph-base-debuginfo
- - ceph-mgr-debuginfo
- - ceph-mds-debuginfo
- - ceph-mon-debuginfo
- - ceph-osd-debuginfo
- - ceph-fuse-debuginfo
- - librados-devel-debuginfo
- - libcephfs2-debuginfo
- - librados2-debuginfo
- - librbd1-debuginfo
- - python3-cephfs-debuginfo
- - python3-rados-debuginfo
- - python3-rbd-debuginfo
- - python3-rgw-debuginfo
- - rbd-fuse-debuginfo
- - rbd-mirror-debuginfo
- - rbd-nbd-debuginfo
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd backoff on peering: true
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd backoff on peering: true
- osd backoff on degraded: true
+++ /dev/null
-# Don't verify os + flavor + sha1
-verify_ceph_hash: false
-tasks:
-- cephadm:
- conf:
- mon:
- auth allow insecure global id reclaim: true
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-openstack:
- - volumes: # attached to each instance
- count: 4
- size: 30 # GB
+++ /dev/null
-roles:
-- [mon.a, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
-- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
-- [mon.c, osd.8, osd.9, osd.10, osd.11, client.2]
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-tasks:
-- exec:
- mon.a:
- - while ! ceph balancer status ; do sleep 1 ; done
- - ceph balancer mode crush-compat
- - ceph balancer on
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- ms tcp read timeout: 5
- mon client directed command retry: 5
- log-ignorelist:
- - \(OSD_SLOW_PING_TIME
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 5000
- mon client directed command retry: 5
- osd:
- osd heartbeat use min delay socket: true
- log-ignorelist:
- - \(OSD_SLOW_PING_TIME
+++ /dev/null
-overrides:
- ceph:
- conf:
- global:
- ms inject socket failures: 2500
- ms inject delay type: osd
- ms inject delay probability: .005
- ms inject delay max: 1
- ms inject internal delays: .002
- mon client directed command retry: 5
- log-ignorelist:
- - \(OSD_SLOW_PING_TIME
+++ /dev/null
-.qa/config/rados.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd max backfills: 3
- osd snap trim sleep: 2
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
- aggressive_pg_num_changes: false
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd max backfills: 3
- osd snap trim sleep: 2
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- - osd_map_cache_size
- conf:
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
- osd:
- osd map cache size: 1
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd scrub during recovery: false
- osd max backfills: 6
-tasks:
-- thrashosds:
- timeout: 1800
- chance_pgnum_grow: 0.25
- chance_pgpnum_fix: 0.25
- chance_test_map_discontinuity: 2
+++ /dev/null
-overrides:
- ceph:
- conf:
- osd:
- osd scrub min interval: 60
- osd scrub max interval: 120
- journal throttle high multiple: 2
- journal throttle max multiple: 10
- filestore queue throttle high multiple: 2
- filestore queue throttle max multiple: 10
- osd max backfills: 9
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 3
- chance_pgpnum_fix: 1
-openstack:
-- volumes:
- size: 50
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd scrub min interval: 60
- osd scrub max interval: 120
- filestore odsync write: true
- osd max backfills: 2
- osd snap trim sleep: .5
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 2
- chance_pgpnum_fix: 1
+++ /dev/null
-.qa/tasks/thrashosds-health.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - must scrub before tier agent can activate
-tasks:
-- exec:
- client.0:
- - sudo ceph osd pool create base 4
- - sudo ceph osd pool application enable base rados
- - sudo ceph osd pool create cache 4
- - sudo ceph osd tier add base cache
- - sudo ceph osd tier cache-mode cache writeback
- - sudo ceph osd tier set-overlay base cache
- - sudo ceph osd pool set cache hit_set_type bloom
- - sudo ceph osd pool set cache hit_set_count 8
- - sudo ceph osd pool set cache hit_set_period 3600
- - sudo ceph osd pool set cache target_max_objects 250
- - sudo ceph osd pool set cache min_read_recency_for_promote 2
-- rados:
- clients: [client.2]
- pools: [base]
- ops: 4000
- objects: 500
- op_weights:
- read: 100
- write: 100
- delete: 50
- copy_from: 50
- cache_flush: 50
- cache_try_flush: 50
- cache_evict: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
+++ /dev/null
-overrides:
- ceph:
- conf:
- client.2:
- debug ms: 1
- debug objecter: 20
- debug rados: 20
-tasks:
-- full_sequential:
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
- - radosbench:
- clients: [client.2]
- time: 90
+++ /dev/null
-meta:
-- desc: |
- rbd object class functional tests
-tasks:
-- exec:
- client.2:
- - ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents:TestClsRbd.mirror
+++ /dev/null
-tasks:
-- rados:
- clients: [client.2]
- ops: 4000
- objects: 50
- op_weights:
- read: 100
- write: 100
- delete: 50
- snap_create: 50
- snap_remove: 50
- rollback: 50
- copy_from: 50
+++ /dev/null
-meta:
-- desc: |
- librbd C and C++ api tests
-workload:
-- workunit:
- clients:
- client.2:
- - rbd/test_librbd.sh
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../workunits/0-distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd max backfills: 3
- osd snap trim sleep: 2
- osd delete sleep: 1
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgnum_shrink: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/rados_api_tests.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/radosbench.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/small-objects.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/snaps-few-objects.yaml
\ No newline at end of file
+++ /dev/null
-../smoke/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-.qa/msgr
\ No newline at end of file
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: root
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-os_type: centos
-os_version: "8.3"
-overrides:
- selinux:
- whitelist:
- - scontext=system_u:system_r:logrotate_t:s0
-
-tasks:
-- cephadm:
- image: quay.ceph.io/ceph-ci/ceph:octopus
- cephadm_branch: octopus
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
+++ /dev/null
-os_type: ubuntu
-os_version: "20.04"
-
-tasks:
-- cephadm:
- image: docker.io/ceph/ceph:v15.2.9
- cephadm_branch: v15.2.9
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
+++ /dev/null
-os_type: ubuntu
-os_version: "20.04"
-
-tasks:
-- cephadm:
- image: docker.io/ceph/ceph:v15.2.0
- cephadm_branch: v15.2.0
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-# - ceph.iscsi.iscsi.a # needs later start point
+++ /dev/null
-tasks:
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest false --force
+++ /dev/null
-tasks:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - radosgw-admin realm create --rgw-realm=r --default
- - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
- - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
- - radosgw-admin period update --rgw-realm=r --commit
- - ceph orch apply rgw r z --placement=2 --port=8000
- - sleep 120
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+++ /dev/null
-tasks:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - echo "wait for servicemap items w/ changing names to refresh"
- - sleep 60
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../smoke/distro/
\ No newline at end of file
+++ /dev/null
-../smoke/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: cephadm-package
- install:
- extra_packages: [cephadm]
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: root
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-.qa/msgr
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../../../basic/tasks/rados_api_tests.yaml
\ No newline at end of file
+++ /dev/null
-../../../basic/tasks/rados_python.yaml
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- install:
-- exec:
- mon.a:
- - yum install -y python3 || apt install -y python3
-- workunit:
- clients:
- client.0:
- - cephadm/test_adoption.sh
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- install:
-- exec:
- mon.a:
- - yum install -y python3 || apt install -y python3
-- workunit:
- clients:
- client.0:
- - cephadm/test_cephadm.sh
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- workunit:
- no_coverage_and_limits: true
- clients:
- client.0:
- - cephadm/test_repos.sh
+++ /dev/null
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - mon.a
- - mgr.a
- - client.0
-tasks:
-- install:
-- cephadm:
-- cephadm.shell:
- host.a:
- - ceph orch apply mds a
-- cephfs_test_runner:
- modules:
- - tasks.cephfs.test_nfs
- - tasks.cephadm_cases.test_cli