2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (c) 2018, Jeff Layton <jlayton@redhat.com>
7 # Open a file and write to it and fsync. Then flip the data device to throw
8 # errors, write to it again and call sync. Close the file, reopen it and
9 # then call fsync on it. Is the error reported?
12 seqres=$RESULT_DIR/$seq
13 echo "QA output created by $seq"
17 status=1 # failure is the default!
18 trap "_cleanup; exit \$status" 0 1 2 3 15
27 # get standard environment, filters and checks
32 # real QA test starts here
34 _require_scratch_dev_pool
36 _require_dm_target error
40 # bring up dmerror device
43 # Replace first device with error-test device
44 old_SCRATCH_DEV=$SCRATCH_DEV
45 SCRATCH_DEV_POOL=`echo $SCRATCH_DEV_POOL | perl -pe "s#$SCRATCH_DEV#$DMERROR_DEV#"`
46 SCRATCH_DEV=$DMERROR_DEV
48 echo "Format and mount"
49 _scratch_pool_mkfs "-d raid0 -m raid1" > $seqres.full 2>&1
52 # How much do we need to write? We need to hit all of the stripes. btrfs uses a
53 # fixed 64k stripesize, so write enough to hit each one. In the case of
54 # compression, each 128K input data chunk will be compressed to 4K (because of
55 # the characters written are duplicate). Therefore we have to write
56 # (128K * 16) = 2048K to make sure every stripe can be hit.
57 number_of_devices=`echo $SCRATCH_DEV_POOL | wc -w`
58 write_kb=$(($number_of_devices * 2048))
59 _require_fs_space $SCRATCH_MNT $write_kb
60 datalen=$((($write_kb * 1024)-1))
62 # use fd 5 to hold file open
63 testfile=$SCRATCH_MNT/fsync-open-after-err
66 # write some data to file and fsync it out
67 $XFS_IO_PROG -c "pwrite -q 0 $datalen" -c fsync $testfile
69 # flip device to non-working mode
70 _dmerror_load_error_table
72 # rewrite the data, call sync to ensure it's written back w/o scraping error
73 $XFS_IO_PROG -c "pwrite -q 0 $datalen" -c sync $testfile
75 # heal the device error
76 _dmerror_load_working_table
78 # open again and call fsync
79 echo "The following fsync should fail with EIO:"
80 $XFS_IO_PROG -c fsync $testfile