From: Samuel Just Date: Sun, 21 Apr 2019 01:51:08 +0000 (-0700) Subject: test-erasure-eio: first eio may be fixed during recovery X-Git-Tag: v15.1.0~2774^2~9 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=5ea5c47152c798f5282f0203b5df74bf5de242e2;p=ceph-ci.git test-erasure-eio: first eio may be fixed during recovery The changes to the way EC/ReplicatedBackend communicate read t showerrors had a side effect of making first eio on the object in TEST_rados_get_subread_eio_shard_[01] repair itself depending on the timing of the killed osd recovering. The test should be improved to actually test that behavior at some point. Signed-off-by: Samuel Just --- diff --git a/qa/standalone/erasure-code/test-erasure-eio.sh b/qa/standalone/erasure-code/test-erasure-eio.sh index ce234d4adbd..7a6bb36b69b 100755 --- a/qa/standalone/erasure-code/test-erasure-eio.sh +++ b/qa/standalone/erasure-code/test-erasure-eio.sh @@ -175,13 +175,14 @@ function rados_put_get_data() { ceph osd in ${last_osd} || return 1 run_osd $dir ${last_osd} || return 1 wait_for_clean || return 1 + # Won't check for eio on get here -- recovery above might have fixed it + else + shard_id=$(expr $shard_id + 1) + inject_$inject ec data $poolname $objname $dir $shard_id || return 1 + rados_get $dir $poolname $objname fail || return 1 + rm $dir/ORIGINAL fi - shard_id=$(expr $shard_id + 1) - inject_$inject ec data $poolname $objname $dir $shard_id || return 1 - # Now 2 out of 3 shards get an error, so should fail - rados_get $dir $poolname $objname fail || return 1 - rm $dir/ORIGINAL } # Change the size of speificied shard