]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
test-erasure-eio: first eio may be fixed during recovery
authorSamuel Just <sjust@redhat.com>
Sun, 21 Apr 2019 01:51:08 +0000 (18:51 -0700)
committersjust@redhat.com <sjust@redhat.com>
Wed, 1 May 2019 18:22:28 +0000 (11:22 -0700)
The changes to the way EC/ReplicatedBackend communicate read
t showerrors had a side effect of making first eio on the object in
TEST_rados_get_subread_eio_shard_[01] repair itself depending
on the timing of the killed osd recovering.  The test should
be improved to actually test that behavior at some point.

Signed-off-by: Samuel Just <sjust@redhat.com>
qa/standalone/erasure-code/test-erasure-eio.sh

index ce234d4adbd4f3c131c3e0df080c22e1a3bcb305..7a6bb36b69bf7d3b4cebbbc61f5416afaa288c7e 100755 (executable)
@@ -175,13 +175,14 @@ function rados_put_get_data() {
         ceph osd in ${last_osd} || return 1
         run_osd $dir ${last_osd} || return 1
         wait_for_clean || return 1
+        # Won't check for eio on get here -- recovery above might have fixed it
+    else
+        shard_id=$(expr $shard_id + 1)
+        inject_$inject ec data $poolname $objname $dir $shard_id || return 1
+        rados_get $dir $poolname $objname fail || return 1
+        rm $dir/ORIGINAL
     fi
 
-    shard_id=$(expr $shard_id + 1)
-    inject_$inject ec data $poolname $objname $dir $shard_id || return 1
-    # Now 2 out of 3 shards get an error, so should fail
-    rados_get $dir $poolname $objname fail || return 1
-    rm $dir/ORIGINAL
 }
 
 # Change the size of speificied shard