--- /dev/null
+#
+# Test the expected behavior of the
+#
+# CEPH_FEATURE_HAMMER_0_94_4
+#
+# feature that forbids a cluster with a mix of
+# OSD < v0.94.4 and OSD >= v0.94.4
+#
+roles:
+- - mon.a
+ - osd.0
+ - osd.1
+- - osd.2
+tasks:
+- print: "**** Install version lower than v0.94.4"
+- install:
+ tag: v0.94.3
+- ceph:
+ fs: xfs
+
+- print: "*** Upgrade the target that runs osd.0 and osd.1 to -x while the target that runs osd.2 stays v0.94.3"
+- install.upgrade:
+ osd.0:
+
+- print: "*** Restart the mon.a so that it is post-hammer v0.94.4 and implements the CEPH_FEATURE_HAMMER_0_94_4 feature"
+- ceph.restart:
+ daemons: [mon.a]
+
+- print: "*** Verify that osd.0 cannot restart because osd.1 and osd.2 are still < v0.94.4"
+- ceph.restart:
+ daemons: [osd.0]
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - |-
+ set -x
+ success=false
+ for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
+ if ceph daemon osd.0 log flush ; then
+ if grep "one or more pre-v0.94.4 hammer" /var/log/ceph/ceph-osd.0.log ; then
+ success=true
+ break
+ fi
+ fi
+ sleep $delay
+ done
+ $success || exit 1
+
+- print: "*** Stop all OSDs and restart osd.0 and osd.1 which are >= v0.94.4"
+- ceph.stop:
+ daemons: [osd.0, osd.1, osd.2]
+- exec:
+ mon.a:
+ - |-
+ set -x
+ ceph osd down osd.0
+ ceph osd down osd.1
+ ceph osd down osd.2
+- ceph.restart:
+ daemons: [osd.0, osd.1]
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - |-
+ set -x
+ success=false
+ for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
+ if ceph osd dump | grep 'osd.1 up' && ceph osd dump | grep 'osd.0 up' ; then
+ success=true
+ break
+ fi
+ ceph osd dump
+ sleep $delay
+ done
+ $success || exit 1
+ ceph osd dump | grep 'osd.2 down' || exit 1
+
+- print: "*** Verify that osd.2 cannot restart because it is < v0.94.4 and all other OSDs are >= v0.94.4"
+- ceph.restart:
+ daemons: [osd.2]
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - |-
+ set -x
+ success=false
+ for delay in 1 2 4 8 16 32 64 128 256 512 1024 ; do
+ ceph daemon mon.a log flush
+ if grep "disallowing boot of pre-hammer v0.94.4 OSD" /var/log/ceph/*.log ; then
+ success=true
+ break
+ fi
+ sleep $delay
+ ceph osd dump
+ done
+ $success || exit 1
+
+- print: "*** Upgrade the target that runs osd.2 to -x and verify the cluster is back to being healthy"
+- install.upgrade:
+ osd.2:
+- ceph.restart:
+ daemons: [osd.2]