--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+.qa/cephfs/objectstore-ec/bluestore-bitmap.yaml
\ No newline at end of file
--- /dev/null
+.qa/distros/podman/centos_8.3_container_tools_3.0.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/conf/
\ No newline at end of file
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+overrides:
+ ceph:
+ conf:
+ global:
+ mon pg warn min per osd: 0
--- /dev/null
+.qa/cephfs/overrides/whitelist_health.yaml
\ No newline at end of file
--- /dev/null
+.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml
\ No newline at end of file
--- /dev/null
+roles:
+- - host.a
+ - client.0
+ - osd.0
+ - osd.1
+ - osd.2
+- - host.b
+ - client.1
+ - osd.3
+ - osd.4
+ - osd.5
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+meta:
+- desc: |
+ setup ceph/pacific v16.2.4
+
+tasks:
+- install:
+ tag: v16.2.4
+ exclude_packages:
+ - ceph-volume
+- cephadm:
+ roleless: true
+ image: docker.io/ceph/ceph:v16.2.4
+ cephadm_branch: v16.2.4
+ cephadm_git_url: https://github.com/ceph/ceph
+ # needed for v16.2.4 due to --skip-admin-label
+ avoid_pacific_features: true
+- print: "**** done starting v16.2.4"
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
+
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs volume create cephfs --placement=4
+ - ceph fs dump
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs set cephfs max_mds 1
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs set cephfs max_mds 2
--- /dev/null
+../.qa/
\ No newline at end of file
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs set cephfs allow_standby_replay false
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs set cephfs allow_standby_replay true
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs dump
+ - ceph --format=json fs dump | jq -e ".filesystems | length == 1"
+ - ceph --format=json mds versions | jq -e ". | add == 4"
+- fs.pre_upgrade_save:
--- /dev/null
+tasks:
+- kclient:
+- print: "**** done kclient client"
--- /dev/null
+tasks:
+- parallel:
+ - upgrade-tasks
+ - workload-tasks
+
+upgrade-tasks:
+ sequential:
+ - cephadm.shell:
+ env: [sha1]
+ host.a:
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force
+ - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force
+ - ceph config set global log_to_journald false --force
+ - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1
+ - cephadm.shell:
+ env: [sha1]
+ host.a:
+ - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; ceph fs dump; sleep 30 ; done
+ - ceph orch ps
+ - ceph versions
+ - echo "wait for servicemap items w/ changing names to refresh"
+ - sleep 60
+ - ceph orch ps
+ - ceph versions
+ - ceph versions | jq -e '.overall | length == 1'
+ - ceph versions | jq -e '.overall | keys' | grep $sha1
+
+workload-tasks:
+ sequential:
+ - workunit:
+ clients:
+ all:
+ - suites/fsstress.sh
--- /dev/null
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph fs dump
+- fs.post_upgrade_checks:
--- /dev/null
+.qa/suites/fs/upgrade/mds_upgrade_sequence/
\ No newline at end of file