--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+roles:
+# 3 osd roles on host.a is required for cephadm task. It checks if the cluster is healthy.
+# More daemons will be deployed on both hosts in e2e tests.
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mon.a
+ - mgr.a
+ - client.0
+- - host.b
+ - client.1
+tasks:
+- install:
+- cephadm:
+- workunit:
+ clients:
+ client.1:
+ - cephadm/create_iscsi_disks.sh
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_dashboard_e2e.sh
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+.qa/clusters/2-node-mgr.yaml
\ No newline at end of file
--- /dev/null
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-ignorelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(DEVICE_IDENT_ON\)
+ - \(DEVICE_FAULT_ON\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_orchestrator_cli
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ roleless: true
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+- - host.b
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch host label add `hostname` foo
+ - ceph auth get-or-create client.foo mon 'allow r'
+ - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222
+- exec:
+ host.a:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+ - test -e /etc/ceph/ceph.conf
+- exec:
+ host.b:
+ - test ! -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.b:
+ - ceph orch host label add `hostname` foo
+- exec:
+ host.b:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+- cephadm.shell:
+ host.b:
+ - ceph orch host label rm `hostname` foo
+- exec:
+ host.b:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+- exec:
+ host.a:
+ - test -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.a:
+ - ceph orch client-keyring rm client.foo
+- exec:
+ host.a:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph osd pool create foo
+ - rbd pool init foo
+ - ceph orch apply iscsi foo u p
+- cephadm.wait_for_service:
+ service: iscsi.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rbd-mirror "--placement=*"
+ - ceph orch apply cephfs-mirror "--placement=*"
+- cephadm.wait_for_service:
+ service: rbd-mirror
+- cephadm.wait_for_service:
+ service: cephfs-mirror
--- /dev/null
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# deploy rgw + ingress
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+ - service_type: ingress
+ service_id: rgw.foo
+ placement:
+ count: 2
+ spec:
+ backend_service: rgw.foo
+ frontend_port: 9000
+ monitor_port: 9001
+ virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
+- cephadm.wait_for_service:
+ service: rgw.foo
+- cephadm.wait_for_service:
+ service: ingress.rgw.foo
+
+# take each component down in turn and ensure things still work
+- cephadm.shell:
+ host.a:
+ - |
+ echo "Check while healthy..."
+ curl http://{{VIP0}}:9000/
+
+ # stop each rgw in turn
+ echo "Check with each rgw stopped in turn..."
+ for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $rgw
+ while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $rgw
+ while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done
+ done
+
+ # stop each haproxy in turn
+ echo "Check with each haproxy down in turn..."
+ for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $haproxy
+ while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $haproxy
+ while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
+ done
+
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
--- /dev/null
+tasks:
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count_per_host: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+- cephadm.wait_for_service:
+ service: rgw.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../smoke/distro
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ roleless: true
+ single_host_defaults: true
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+tasks:
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count_per_host: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+- cephadm.wait_for_service:
+ service: rgw.foo
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/rhel_8.3_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
--- /dev/null
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+ - ceph.rgw.foo.a
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
+- cephadm.shell:
+ mon.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+ - ceph orch ls --format yaml
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../workunits/0-distro
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
--- /dev/null
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ osd delete sleep: 1
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/rados_api_tests.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/radosbench.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/small-objects.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
\ No newline at end of file
--- /dev/null
+../smoke/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+.qa/msgr
\ No newline at end of file
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: root
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+os_type: centos
+os_version: "8.3"
+overrides:
+ selinux:
+ whitelist:
+ - scontext=system_u:system_r:logrotate_t:s0
+
+tasks:
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:octopus
+ cephadm_branch: octopus
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
--- /dev/null
+os_type: ubuntu
+os_version: "20.04"
+
+tasks:
+- cephadm:
+ image: docker.io/ceph/ceph:v15.2.9
+ cephadm_branch: v15.2.9
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+ - ceph.iscsi.iscsi.a
--- /dev/null
+os_type: ubuntu
+os_version: "20.04"
+
+tasks:
+- cephadm:
+ image: docker.io/ceph/ceph:v15.2.0
+ cephadm_branch: v15.2.0
+ cephadm_git_url: https://github.com/ceph/ceph
+ # avoid --cap-add=PTRACE + --privileged for older cephadm versions
+ allow_ptrace: false
+ # deploy additional mons the "old" (octopus) way
+ add_mons_via_daemon_add: true
+ avoid_pacific_features: true
+
+roles:
+- - mon.a
+ - mon.c
+ - mgr.y
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
+ - node-exporter.a
+ - alertmanager.a
+- - mon.b
+ - mgr.x
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.1
+ - prometheus.a
+ - grafana.a
+ - node-exporter.b
+# - ceph.iscsi.iscsi.a # needs later start point
--- /dev/null
+tasks:
+- cephadm.shell:
+ mon.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest false --force
--- /dev/null
+tasks:
+- cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - radosgw-admin realm create --rgw-realm=r --default
+ - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
+ - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
+ - radosgw-admin period update --rgw-realm=r --commit
+ - ceph orch apply rgw r z --placement=2 --port=8000
+ - sleep 120
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
--- /dev/null
+tasks:
+- cephadm.shell:
+ env: [sha1]
+ mon.a:
+ - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - echo "wait for servicemap items w/ changing names to refresh"
+ - sleep 60
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+../smoke/distro/
\ No newline at end of file
--- /dev/null
+../smoke/fixed-2.yaml
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: cephadm-package
+ install:
+ extra_packages: [cephadm]
--- /dev/null
+overrides:
+ cephadm:
+ cephadm_mode: root
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+.qa/msgr
\ No newline at end of file
--- /dev/null
+tasks:
+- install:
+- cephadm:
+ conf:
+ mgr:
+ debug ms: 1
+ debug mgr: 20
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/basic/tasks/rados_api_tests.yaml
\ No newline at end of file
--- /dev/null
+.qa/suites/rados/basic/tasks/rados_python.yaml
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+../.qa
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
--- /dev/null
+.qa/mon_election
\ No newline at end of file
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- install:
+- exec:
+ mon.a:
+ - yum install -y python3 || apt install -y python3
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_adoption.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- install:
+- exec:
+ mon.a:
+ - yum install -y python3 || apt install -y python3
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_cephadm.sh
--- /dev/null
+roles:
+- [mon.a, mgr.x, osd.0, client.0]
+tasks:
+- workunit:
+ no_coverage_and_limits: true
+ clients:
+ client.0:
+ - cephadm/test_repos.sh
--- /dev/null
+roles:
+- - host.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mon.a
+ - mgr.a
+ - client.0
+tasks:
+- install:
+- cephadm:
+- cephadm.shell:
+ host.a:
+ - ceph orch apply mds a
+- cephfs_test_runner:
+ modules:
+ - tasks.cephfs.test_nfs
+ - tasks.cephadm_cases.test_cli
--- /dev/null
+../orch/cephadm
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-roles:
-# 3 osd roles on host.a is required for cephadm task. It checks if the cluster is healthy.
-# More daemons will be deployed on both hosts in e2e tests.
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - mon.a
- - mgr.a
- - client.0
-- - host.b
- - client.1
-tasks:
-- install:
-- cephadm:
-- workunit:
- clients:
- client.1:
- - cephadm/create_iscsi_disks.sh
-- workunit:
- clients:
- client.0:
- - cephadm/test_dashboard_e2e.sh
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-.qa/clusters/2-node-mgr.yaml
\ No newline at end of file
+++ /dev/null
-
-tasks:
- - install:
- - ceph:
- # tests may leave mgrs broken, so don't try and call into them
- # to invoke e.g. pg dump during teardown.
- wait-for-scrub: false
- log-ignorelist:
- - overall HEALTH_
- - \(MGR_DOWN\)
- - \(DEVICE_IDENT_ON\)
- - \(DEVICE_FAULT_ON\)
- - \(PG_
- - replacing it with standby
- - No standby daemons available
- - cephfs_test_runner:
- modules:
- - tasks.mgr.test_orchestrator_cli
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- roleless: true
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-- - host.b
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch host label add `hostname` foo
- - ceph auth get-or-create client.foo mon 'allow r'
- - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222
-- exec:
- host.a:
- - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
- - test -e /etc/ceph/ceph.conf
-- exec:
- host.b:
- - test ! -e /etc/ceph/ceph.client.foo.keyring
-- cephadm.shell:
- host.b:
- - ceph orch host label add `hostname` foo
-- exec:
- host.b:
- - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
- - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
-- cephadm.shell:
- host.b:
- - ceph orch host label rm `hostname` foo
-- exec:
- host.b:
- - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
-- exec:
- host.a:
- - test -e /etc/ceph/ceph.client.foo.keyring
-- cephadm.shell:
- host.a:
- - ceph orch client-keyring rm client.foo
-- exec:
- host.a:
- - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph osd pool create foo
- - rbd pool init foo
- - ceph orch apply iscsi foo u p
-- cephadm.wait_for_service:
- service: iscsi.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch apply rbd-mirror "--placement=*"
- - ceph orch apply cephfs-mirror "--placement=*"
-- cephadm.wait_for_service:
- service: rbd-mirror
-- cephadm.wait_for_service:
- service: cephfs-mirror
+++ /dev/null
-tasks:
-- vip:
-
-# make sure cephadm notices the new IP
-- cephadm.shell:
- host.a:
- - ceph orch device ls --refresh
-
-# deploy rgw + ingress
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
- - service_type: ingress
- service_id: rgw.foo
- placement:
- count: 2
- spec:
- backend_service: rgw.foo
- frontend_port: 9000
- monitor_port: 9001
- virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
-- cephadm.wait_for_service:
- service: rgw.foo
-- cephadm.wait_for_service:
- service: ingress.rgw.foo
-
-# take each component down in turn and ensure things still work
-- cephadm.shell:
- host.a:
- - |
- echo "Check while healthy..."
- curl http://{{VIP0}}:9000/
-
- # stop each rgw in turn
- echo "Check with each rgw stopped in turn..."
- for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do
- ceph orch daemon stop $rgw
- while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
- ceph orch daemon start $rgw
- while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done
- done
-
- # stop each haproxy in turn
- echo "Check with each haproxy down in turn..."
- for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do
- ceph orch daemon stop $haproxy
- while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
- ceph orch daemon start $haproxy
- while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
- done
-
- while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+++ /dev/null
-tasks:
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count_per_host: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
-- cephadm.wait_for_service:
- service: rgw.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
+++ /dev/null
-../smoke/distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- roleless: true
- single_host_defaults: true
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-tasks:
-- cephadm.apply:
- specs:
- - service_type: rgw
- service_id: foo
- placement:
- count_per_host: 4
- host_pattern: "*"
- spec:
- rgw_frontend_port: 8000
-- cephadm.wait_for_service:
- service: rgw.foo
+++ /dev/null
-tasks:
-- cephadm.shell:
- host.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/rhel_8.3_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file
+++ /dev/null
-.qa/distros/all/ubuntu_20.04.yaml
\ No newline at end of file
+++ /dev/null
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
- - ceph.rgw.foo.a
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
-openstack:
-- volumes: # attached to each instance
- count: 4
- size: 10 # GB
-overrides:
- ceph:
- conf:
- osd:
- osd shutdown pgref assert: true
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-tasks:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
-- cephadm.shell:
- mon.a:
- - ceph orch status
- - ceph orch ps
- - ceph orch ls
- - ceph orch host ls
- - ceph orch device ls
- - ceph orch ls --format yaml
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../workunits/0-distro
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
+++ /dev/null
-overrides:
- ceph:
- log-ignorelist:
- - but it is still running
- - objects unfound and apparently lost
- conf:
- osd:
- osd debug reject backfill probability: .3
- osd scrub min interval: 60
- osd scrub max interval: 120
- osd max backfills: 3
- osd snap trim sleep: 2
- osd delete sleep: 1
- mon:
- mon min osdmap epochs: 50
- paxos service trim min: 10
- # prune full osdmaps regularly
- mon osdmap full prune min: 15
- mon osdmap full prune interval: 2
- mon osdmap full prune txsize: 2
-tasks:
-- thrashosds:
- timeout: 1200
- chance_pgnum_grow: 1
- chance_pgnum_shrink: 1
- chance_pgpnum_fix: 1
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/rados_api_tests.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/radosbench.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/small-objects.yaml
\ No newline at end of file
+++ /dev/null
-../../../thrash/workloads/snaps-few-objects.yaml
\ No newline at end of file
+++ /dev/null
-../smoke/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-.qa/msgr
\ No newline at end of file
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: root
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-os_type: centos
-os_version: "8.3"
-overrides:
- selinux:
- whitelist:
- - scontext=system_u:system_r:logrotate_t:s0
-
-tasks:
-- cephadm:
- image: quay.ceph.io/ceph-ci/ceph:octopus
- cephadm_branch: octopus
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
+++ /dev/null
-os_type: ubuntu
-os_version: "20.04"
-
-tasks:
-- cephadm:
- image: docker.io/ceph/ceph:v15.2.9
- cephadm_branch: v15.2.9
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # CLI change in v16 pacific
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
- - ceph.iscsi.iscsi.a
+++ /dev/null
-os_type: ubuntu
-os_version: "20.04"
-
-tasks:
-- cephadm:
- image: docker.io/ceph/ceph:v15.2.0
- cephadm_branch: v15.2.0
- cephadm_git_url: https://github.com/ceph/ceph
- # avoid --cap-add=PTRACE + --privileged for older cephadm versions
- allow_ptrace: false
- # deploy additional mons the "old" (octopus) way
- add_mons_via_daemon_add: true
- avoid_pacific_features: true
-
-roles:
-- - mon.a
- - mon.c
- - mgr.y
- - osd.0
- - osd.1
- - osd.2
- - osd.3
- - client.0
-# - ceph.rgw.realm.zone.a # Disabled, needs 15.2.4 as an upgrade start
- - node-exporter.a
- - alertmanager.a
-- - mon.b
- - mgr.x
- - osd.4
- - osd.5
- - osd.6
- - osd.7
- - client.1
- - prometheus.a
- - grafana.a
- - node-exporter.b
-# - ceph.iscsi.iscsi.a # needs later start point
+++ /dev/null
-tasks:
-- cephadm.shell:
- mon.a:
- - ceph config set mgr mgr/cephadm/use_repo_digest false --force
+++ /dev/null
-tasks:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - radosgw-admin realm create --rgw-realm=r --default
- - radosgw-admin zonegroup create --rgw-zonegroup=default --master --default
- - radosgw-admin zone create --rgw-zonegroup=default --rgw-zone=z --master --default
- - radosgw-admin period update --rgw-realm=r --commit
- - ceph orch apply rgw r z --placement=2 --port=8000
- - sleep 120
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
- - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
- - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+++ /dev/null
-tasks:
-- cephadm.shell:
- env: [sha1]
- mon.a:
- - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done
- - ceph orch ps
- - ceph versions
- - echo "wait for servicemap items w/ changing names to refresh"
- - sleep 60
- - ceph orch ps
- - ceph versions
- - ceph versions | jq -e '.overall | length == 1'
- - ceph versions | jq -e '.overall | keys' | grep $sha1
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../smoke/distro/
\ No newline at end of file
+++ /dev/null
-../smoke/fixed-2.yaml
\ No newline at end of file
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: cephadm-package
- install:
- extra_packages: [cephadm]
+++ /dev/null
-overrides:
- cephadm:
- cephadm_mode: root
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-.qa/msgr
\ No newline at end of file
+++ /dev/null
-tasks:
-- install:
-- cephadm:
- conf:
- mgr:
- debug ms: 1
- debug mgr: 20
+++ /dev/null
-../.qa/
\ No newline at end of file
+++ /dev/null
-../../../basic/tasks/rados_api_tests.yaml
\ No newline at end of file
+++ /dev/null
-../../../basic/tasks/rados_python.yaml
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-../.qa
\ No newline at end of file
+++ /dev/null
-.qa/distros/podman/centos_8.2_kubic_stable.yaml
\ No newline at end of file
+++ /dev/null
-.qa/mon_election
\ No newline at end of file
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- install:
-- exec:
- mon.a:
- - yum install -y python3 || apt install -y python3
-- workunit:
- clients:
- client.0:
- - cephadm/test_adoption.sh
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- install:
-- exec:
- mon.a:
- - yum install -y python3 || apt install -y python3
-- workunit:
- clients:
- client.0:
- - cephadm/test_cephadm.sh
+++ /dev/null
-roles:
-- [mon.a, mgr.x, osd.0, client.0]
-tasks:
-- workunit:
- no_coverage_and_limits: true
- clients:
- client.0:
- - cephadm/test_repos.sh
+++ /dev/null
-roles:
-- - host.a
- - osd.0
- - osd.1
- - osd.2
- - mon.a
- - mgr.a
- - client.0
-tasks:
-- install:
-- cephadm:
-- cephadm.shell:
- host.a:
- - ceph orch apply mds a
-- cephfs_test_runner:
- modules:
- - tasks.cephfs.test_nfs
- - tasks.cephadm_cases.test_cli