2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (C) 2013 STRATO. All rights reserved.
5 # FSQA Test No. btrfs/011
7 # Test of the btrfs replace operation.
9 # The amount of tests done depends on the number of devices in the
10 # SCRATCH_DEV_POOL. For full test coverage, at least 5 devices should
11 # be available (e.g. 5 partitions).
13 # The source and target devices for the replace operation are
14 # arbitrarily chosen out of SCRATCH_DEV_POOl. Since the target device
15 # mustn't be smaller than the source device, the requirement for this
16 # test is that all devices have _exactly_ the same size. If this is
17 # not the case, this test is not run.
19 # To check the filesystems after replacing a device, a scrub run is
20 # performed, a btrfsck run, and finally the filesystem is remounted.
23 seqres=$RESULT_DIR/$seq
24 echo "QA output created by $seq"
33 if [ $noise_pid -ne 0 ] && ps -p $noise_pid | grep -q $noise_pid; then
38 # we need this umount and couldn't rely on _require_scratch to umount
39 # it from next test, because we would replace SCRATCH_DEV, which is
40 # needed by _require_scratch, and make it umounted.
41 _scratch_unmount > /dev/null 2>&1
43 trap "_cleanup; exit \$status" 0 1 2 3 15
45 # get standard environment, filters and checks
49 # real QA test starts here
51 _require_scratch_nocheck
52 _require_scratch_dev_pool 5
53 _require_scratch_dev_pool_equal_size
54 _require_command "$WIPEFS_PROG" wipefs
59 echo "*** test btrfs replace"
65 # Fill inline extents.
66 for i in `seq 1 500`; do
67 _ddt of=$SCRATCH_MNT/s$i bs=3800 count=1
71 for i in `seq 1 500`; do
72 _ddt of=$SCRATCH_MNT/l$i bs=16385 count=1
74 _ddt of=$SCRATCH_MNT/t0 bs=1M count=1 > /dev/null 2>&1
75 for i in `seq $fssize`; do
76 cp $SCRATCH_MNT/t0 $SCRATCH_MNT/t$i || _fail "cp failed"
77 done > /dev/null 2>> $seqres.full
83 local mkfs_options="$1"
84 local num_devs4raid="$2"
85 local with_cancel="$3"
87 local source_dev="`echo ${SCRATCH_DEV_POOL} | awk '{print $1}'`"
90 [[ $fssize != 64 ]] && quick="thorough"
92 echo -e "\\n---------workout \"$1\" $2 $3 $4-----------" >> $seqres.full
94 $WIPEFS_PROG -a $SCRATCH_DEV_POOL > /dev/null 2>&1
95 _scratch_dev_pool_get $num_devs4raid
98 _scratch_pool_mkfs $mkfs_options >> $seqres.full 2>&1 ||\
102 _require_fs_space $SCRATCH_MNT $((2 * 512 * 1024)) #2.5G
105 _run_btrfs_util_prog filesystem show -m $SCRATCH_MNT
107 echo -e "Replace from $source_dev to $SPARE_DEV\\n" >> $seqres.full
108 btrfs_replace_test $source_dev $SPARE_DEV "" $with_cancel $quick
110 _run_btrfs_util_prog filesystem show -m $SCRATCH_MNT
112 # Skip -r test for configs without mirror OR replace cancel
113 if echo $mkfs_options | egrep -qv "raid1|raid5|raid6|raid10" || \
114 [ "${with_cancel}Q" = "cancelQ" ]; then
115 _scratch_unmount > /dev/null 2>&1
116 _scratch_dev_pool_put
121 # Due to above replace, now SPARE_DEV is part of the FS, check that.
122 $BTRFS_UTIL_PROG filesystem show -m $SCRATCH_MNT |\
123 grep -qs $SPARE_DEV$ ||\
124 _fail "$SPARE_DEV is not part of SCRATCH_FS"
126 btrfs_replace_test $SPARE_DEV $source_dev "-r" $with_cancel $quick
128 _scratch_unmount > /dev/null 2>&1
129 _scratch_dev_pool_put
135 local source_dev="$1"
136 local target_dev="$2"
137 local replace_options="$3"
138 local with_cancel="$4"
141 # generate some (slow) background traffic in parallel to the
142 # replace operation. It is not a problem if cat fails early
144 cat /dev/urandom | od > $SCRATCH_MNT/noise 2>> $seqres.full &
147 if [ "${with_cancel}Q" = "cancelQ" ]; then
148 # background the replace operation (no '-B' option given)
149 _run_btrfs_util_prog replace start -f $replace_options $source_dev $target_dev $SCRATCH_MNT
151 _run_btrfs_util_prog replace cancel $SCRATCH_MNT
153 # 'replace status' waits for the replace operation to finish
154 # before the status is printed
155 $BTRFS_UTIL_PROG replace status $SCRATCH_MNT > $tmp.tmp 2>&1
156 cat $tmp.tmp >> $seqres.full
157 grep -q canceled $tmp.tmp || _fail "btrfs replace status (canceled) failed"
159 if [ "${quick}Q" = "thoroughQ" ]; then
160 # On current hardware, the thorough test runs
161 # more than a second. This is a chance to force
162 # a sync in the middle of the replace operation.
163 (sleep 1; sync) > /dev/null 2>&1 &
165 _run_btrfs_util_prog replace start -Bf $replace_options $source_dev $target_dev $SCRATCH_MNT
167 $BTRFS_UTIL_PROG replace status $SCRATCH_MNT > $tmp.tmp 2>&1
168 cat $tmp.tmp >> $seqres.full
169 grep -q finished $tmp.tmp || _fail "btrfs replace status (finished) failed"
172 if ps -p $noise_pid | grep -q $noise_pid; then
173 kill -TERM $noise_pid 2> /dev/null
178 # scrub tests on-disk data, that's the reason for the sync.
179 # With the '-B' option (don't background), any type of error causes
180 # exit values != 0, including detected correctable and uncorrectable
181 # errors on the device.
183 _run_btrfs_util_prog scrub start -B $SCRATCH_MNT
185 # Two tests are performed, the 1st is to btrfsck the filesystem,
186 # and the 2nd test is to mount the filesystem.
187 # Usually _check_btrfs_filesystem would perform the mount test,
188 # but it gets confused by the mount output that shows SCRATCH_MNT
189 # mounted but not being mounted to SCRATCH_DEV. This happens
190 # because in /proc/mounts the 2nd device of the filesystem is
191 # shown after the replace operation. Let's just do the mount
192 # test manually after _check_btrfs_filesystem is finished.
193 _scratch_unmount > /dev/null 2>&1
194 if [ "${with_cancel}Q" != "cancelQ" ]; then
195 # after the replace operation, use the target_dev for everything
196 echo "_check_btrfs_filesystem $target_dev" >> $seqres.full
197 _check_btrfs_filesystem $target_dev
198 _mount -t $FSTYP `_scratch_mount_options | sed "s&${SCRATCH_DEV}&${target_dev}&"`
200 _check_btrfs_filesystem $source_dev
205 workout "-m single -d single" 1 no 64
206 workout "-m single -d single -M" 1 no 64
207 workout "-m dup -d single" 1 no 64
208 workout "-m dup -d single" 1 cancel 1024
209 workout "-m dup -d dup -M" 1 no 64
210 workout "-m raid0 -d raid0" 2 no 64
211 workout "-m raid1 -d raid1" 2 no 2048
212 workout "-m raid5 -d raid5" 2 no 64
213 workout "-m raid6 -d raid6" 3 no 64
214 workout "-m raid10 -d raid10" 4 no 64