_scratch_unmount
}
+# this test requires duperemove working for the file system
+_require_scratch_duperemove()
+{
+ _require_scratch
+ _require_command "$DUPEREMOVE_PROG" duperemove
+
+ _scratch_mkfs > /dev/null
+ _scratch_mount
+ dd if=/dev/zero of="$SCRATCH_MNT/file1" bs=128k count=1 >& /dev/null
+ dd if=/dev/zero of="$SCRATCH_MNT/file2" bs=128k count=1 >& /dev/null
+ if ! "$DUPEREMOVE_PROG" -d "$SCRATCH_MNT/file1" \
+ "$SCRATCH_MNT/file2" >& /dev/null ; then
+ _scratch_unmount
+ _notrun "duperemove does not support file system type: $FSTYP"
+ fi
+ _scratch_unmount
+}
+
# this test requires scratch fs to report explicit SHARED flag
# e.g.
# 0 4K 8K
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
+#
+# FS QA Test generic/559
+#
+# Dedupe a single big file and verify integrity
+#
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/reflink
+
+# remove previous $seqres.full before test
+rm -f $seqres.full
+
+# real QA test starts here
+_supported_fs generic
+_supported_os Linux
+_require_scratch_duperemove
+
+fssize=$((2 * 1024 * 1024 * 1024))
+_scratch_mkfs_sized $fssize > $seqres.full 2>&1
+_scratch_mount >> $seqres.full 2>&1
+
+# fill the fs with a big file has same contents
+$XFS_IO_PROG -f -c "pwrite -S 0x55 0 $fssize" $SCRATCH_MNT/${seq}.file \
+ >> $seqres.full 2>&1
+md5sum $SCRATCH_MNT/${seq}.file > ${tmp}.md5sum
+
+echo "= before cycle mount ="
+# Dedupe with 1M blocksize
+$DUPEREMOVE_PROG -dr --dedupe-options=same -b 1048576 $SCRATCH_MNT/ >>$seqres.full 2>&1
+# Verify integrity
+md5sum -c --quiet ${tmp}.md5sum
+# Dedupe with 64k blocksize
+$DUPEREMOVE_PROG -dr --dedupe-options=same -b 65536 $SCRATCH_MNT/ >>$seqres.full 2>&1
+# Verify integrity again
+md5sum -c --quiet ${tmp}.md5sum
+
+# umount and mount again, verify pagecache contents don't mutate
+_scratch_cycle_mount
+echo "= after cycle mount ="
+md5sum -c --quiet ${tmp}.md5sum
+
+status=0
+exit
--- /dev/null
+QA output created by 559
+= before cycle mount =
+= after cycle mount =
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
+#
+# FS QA Test generic/560
+#
+# Iterate dedupe integrity test. Copy an original data0 several
+# times (d0 -> d1, d1 -> d2, ... dn-1 -> dn), dedupe dataN everytime
+# before copy. At last, verify dataN same with data0.
+#
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/reflink
+
+# remove previous $seqres.full before test
+rm -f $seqres.full
+
+# real QA test starts here
+_supported_fs generic
+_supported_os Linux
+_require_scratch_duperemove
+
+_scratch_mkfs > $seqres.full 2>&1
+_scratch_mount >> $seqres.full 2>&1
+
+function iterate_dedup_verify()
+{
+ local src=$srcdir
+ local dest=$dupdir/1
+
+ for ((index = 1; index <= times; index++)); do
+ cp -a $src $dest
+ find $dest -type f -exec md5sum {} \; \
+ > $md5file$index
+ # Make some noise
+ $FSSTRESS_PROG $fsstress_opts -d $noisedir \
+ -n 200 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
+ # Too many output, so only save error output
+ $DUPEREMOVE_PROG -dr --dedupe-options=same $dupdir \
+ >/dev/null 2>$seqres.full
+ md5sum -c --quiet $md5file$index
+ src=$dest
+ dest=$dupdir/$((index + 1))
+ done
+}
+
+srcdir=$SCRATCH_MNT/src
+dupdir=$SCRATCH_MNT/dup
+noisedir=$dupdir/noise
+mkdir $srcdir $dupdir
+mkdir $dupdir/noise
+
+md5file=${tmp}.md5sum
+
+fsstress_opts="-w -r"
+# Create some files to be original data
+$FSSTRESS_PROG $fsstress_opts -d $srcdir \
+ -n 500 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
+
+# Calculate how many test cycles will be run
+src_size=`du -ks $srcdir | awk '{print $1}'`
+free_size=`df -kP $SCRATCH_MNT | grep -v Filesystem | awk '{print $4}'`
+times=$((free_size / src_size))
+if [ $times -gt $((4 * TIME_FACTOR)) ]; then
+ times=$((4 * TIME_FACTOR))
+fi
+
+echo "= Do dedup and verify ="
+iterate_dedup_verify
+
+# Use the last checksum file to verify the original data
+sed -e s#dup/$times#src#g $md5file$times > $md5file
+echo "= Backwords verify ="
+md5sum -c --quiet $md5file
+
+# read from the disk also doesn't show mutations.
+_scratch_cycle_mount
+echo "= Verify after cycle mount ="
+for ((index = 1; index <= times; index++)); do
+ md5sum -c --quiet $md5file$index
+done
+
+status=0
+exit
--- /dev/null
+QA output created by 560
+= Do dedup and verify =
+= Backwords verify =
+= Verify after cycle mount =
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
+#
+# FS QA Test generic/561
+#
+# Dedup & random I/O race test, do multi-threads fsstress and dedupe on
+# same directory/files
+#
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+ end_test
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/reflink
+
+# remove previous $seqres.full before test
+rm -f $seqres.full
+
+# real QA test starts here
+_supported_fs generic
+_supported_os Linux
+_require_scratch_duperemove
+_require_command "$KILLALL_PROG" killall
+
+_scratch_mkfs > $seqres.full 2>&1
+_scratch_mount >> $seqres.full 2>&1
+
+function end_test()
+{
+ local f=1
+
+ # stop duperemove running
+ if [ -e $dupe_run ]; then
+ rm -f $dupe_run
+ wait $dedup_pids
+ fi
+
+ # Make sure all fsstress get killed
+ while [ $f -ne 0 ]; do
+ $KILLALL_PROG -q $FSSTRESS_PROG > /dev/null 2>&1
+ sleep 1
+ f=`ps -eLf | grep $FSSTRESS_PROG | grep -v "grep" | wc -l`
+ done
+}
+
+sleep_time=$((50 * TIME_FACTOR))
+
+# Start fsstress
+testdir="$SCRATCH_MNT/dir"
+mkdir $testdir
+fsstress_opts="-r -n 1000 -p $((5 * LOAD_FACTOR))"
+$FSSTRESS_PROG $fsstress_opts -d $testdir -l 0 >> $seqres.full 2>&1 &
+dedup_pids=""
+dupe_run=$TEST_DIR/${seq}-running
+# Start several dedupe processes on same directory
+touch $dupe_run
+for ((i = 0; i < $((2 * LOAD_FACTOR)); i++)); do
+ while [ -e $dupe_run ]; do
+ $DUPEREMOVE_PROG -dr --dedupe-options=same $testdir \
+ >>$seqres.full 2>&1
+ done &
+ dedup_pids="$! $dedup_pids"
+done
+
+# End the test after $sleep_time seconds
+sleep $sleep_time
+end_test
+
+# umount and mount again, verify pagecache contents don't mutate and a fresh
+# read from the disk also doesn't show mutations.
+find $testdir -type f -exec md5sum {} \; > ${tmp}.md5sum
+_scratch_cycle_mount
+md5sum -c --quiet ${tmp}.md5sum
+
+echo "Silence is golden"
+status=0
+exit
--- /dev/null
+QA output created by 561
+Silence is golden
556 auto quick casefold
557 auto quick log
558 auto enospc
+559 auto stress dedupe
+560 auto stress dedupe
+561 auto stress dedupe
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
-#
-# FS QA Test 008
-#
-# Dedupe a single big file and verify integrity
-#
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- cd /
- rm -f $tmp.*
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/reflink
-
-# remove previous $seqres.full before test
-rm -f $seqres.full
-
-# duperemove only supports btrfs and xfs (with reflink feature).
-# Add other filesystems if it supports more later.
-_supported_fs xfs btrfs
-_supported_os Linux
-_require_scratch_dedupe
-_require_command "$DUPEREMOVE_PROG" duperemove
-
-fssize=$((2 * 1024 * 1024 * 1024))
-_scratch_mkfs_sized $fssize > $seqres.full 2>&1
-_scratch_mount >> $seqres.full 2>&1
-
-# fill the fs with a big file has same contents
-$XFS_IO_PROG -f -c "pwrite -S 0x55 0 $fssize" $SCRATCH_MNT/${seq}.file \
- >> $seqres.full 2>&1
-md5sum $SCRATCH_MNT/${seq}.file > ${tmp}.md5sum
-
-echo "= before cycle mount ="
-# Dedupe with 1M blocksize
-$DUPEREMOVE_PROG -dr --dedupe-options=same -b 1048576 $SCRATCH_MNT/ >>$seqres.full 2>&1
-# Verify integrity
-md5sum -c --quiet ${tmp}.md5sum
-# Dedupe with 64k blocksize
-$DUPEREMOVE_PROG -dr --dedupe-options=same -b 65536 $SCRATCH_MNT/ >>$seqres.full 2>&1
-# Verify integrity again
-md5sum -c --quiet ${tmp}.md5sum
-
-# umount and mount again, verify pagecache contents don't mutate
-_scratch_cycle_mount
-echo "= after cycle mount ="
-md5sum -c --quiet ${tmp}.md5sum
-
-status=0
-exit
+++ /dev/null
-QA output created by 008
-= before cycle mount =
-= after cycle mount =
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
-#
-# FS QA Test 009
-#
-# Iterate dedupe integrity test. Copy an original data0 several
-# times (d0 -> d1, d1 -> d2, ... dn-1 -> dn), dedupe dataN everytime
-# before copy. At last, verify dataN same with data0.
-#
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- cd /
- rm -f $tmp.*
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/reflink
-
-# remove previous $seqres.full before test
-rm -f $seqres.full
-
-# real QA test starts here
-
-# duperemove only supports btrfs and xfs (with reflink feature).
-# Add other filesystems if it supports more later.
-_supported_fs xfs btrfs
-_supported_os Linux
-_require_scratch_dedupe
-_require_command "$DUPEREMOVE_PROG" duperemove
-
-_scratch_mkfs > $seqres.full 2>&1
-_scratch_mount >> $seqres.full 2>&1
-
-function iterate_dedup_verify()
-{
- local src=$srcdir
- local dest=$dupdir/1
-
- for ((index = 1; index <= times; index++)); do
- cp -a $src $dest
- find $dest -type f -exec md5sum {} \; \
- > $md5file$index
- # Make some noise
- $FSSTRESS_PROG $fsstress_opts -d $noisedir \
- -n 200 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
- # Too many output, so only save error output
- $DUPEREMOVE_PROG -dr --dedupe-options=same $dupdir \
- >/dev/null 2>$seqres.full
- md5sum -c --quiet $md5file$index
- src=$dest
- dest=$dupdir/$((index + 1))
- done
-}
-
-srcdir=$SCRATCH_MNT/src
-dupdir=$SCRATCH_MNT/dup
-noisedir=$dupdir/noise
-mkdir $srcdir $dupdir
-mkdir $dupdir/noise
-
-md5file=${tmp}.md5sum
-
-fsstress_opts="-w -r"
-# Create some files to be original data
-$FSSTRESS_PROG $fsstress_opts -d $srcdir \
- -n 500 -p $((5 * LOAD_FACTOR)) >/dev/null 2>&1
-
-# Calculate how many test cycles will be run
-src_size=`du -ks $srcdir | awk '{print $1}'`
-free_size=`df -kP $SCRATCH_MNT | grep -v Filesystem | awk '{print $4}'`
-times=$((free_size / src_size))
-if [ $times -gt $((4 * TIME_FACTOR)) ]; then
- times=$((4 * TIME_FACTOR))
-fi
-
-echo "= Do dedup and verify ="
-iterate_dedup_verify
-
-# Use the last checksum file to verify the original data
-sed -e s#dup/$times#src#g $md5file$times > $md5file
-echo "= Backwords verify ="
-md5sum -c --quiet $md5file
-
-# read from the disk also doesn't show mutations.
-_scratch_cycle_mount
-echo "= Verify after cycle mount ="
-for ((index = 1; index <= times; index++)); do
- md5sum -c --quiet $md5file$index
-done
-
-status=0
-exit
+++ /dev/null
-QA output created by 009
-= Do dedup and verify =
-= Backwords verify =
-= Verify after cycle mount =
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0
-# Copyright (c) 2018 Red Hat Inc. All Rights Reserved.
-#
-# FS QA Test 010
-#
-# Dedup & random I/O race test, do multi-threads fsstress and dedupe on
-# same directory/files
-#
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- cd /
- rm -f $tmp.*
- end_test
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/reflink
-
-# remove previous $seqres.full before test
-rm -f $seqres.full
-
-# real QA test starts here
-
-# duperemove only supports btrfs and xfs (with reflink feature).
-# Add other filesystems if it supports more later.
-_supported_fs xfs btrfs
-_supported_os Linux
-_require_scratch_dedupe
-_require_command "$DUPEREMOVE_PROG" duperemove
-_require_command "$KILLALL_PROG" killall
-
-_scratch_mkfs > $seqres.full 2>&1
-_scratch_mount >> $seqres.full 2>&1
-
-function end_test()
-{
- local f=1
-
- # stop duperemove running
- if [ -e $dupe_run ]; then
- rm -f $dupe_run
- wait $dedup_pids
- fi
-
- # Make sure all fsstress get killed
- while [ $f -ne 0 ]; do
- $KILLALL_PROG -q $FSSTRESS_PROG > /dev/null 2>&1
- sleep 1
- f=`ps -eLf | grep $FSSTRESS_PROG | grep -v "grep" | wc -l`
- done
-}
-
-sleep_time=$((50 * TIME_FACTOR))
-
-# Start fsstress
-testdir="$SCRATCH_MNT/dir"
-mkdir $testdir
-fsstress_opts="-r -n 1000 -p $((5 * LOAD_FACTOR))"
-$FSSTRESS_PROG $fsstress_opts -d $testdir -l 0 >> $seqres.full 2>&1 &
-dedup_pids=""
-dupe_run=$TEST_DIR/${seq}-running
-# Start several dedupe processes on same directory
-touch $dupe_run
-for ((i = 0; i < $((2 * LOAD_FACTOR)); i++)); do
- while [ -e $dupe_run ]; do
- $DUPEREMOVE_PROG -dr --dedupe-options=same $testdir \
- >>$seqres.full 2>&1
- done &
- dedup_pids="$! $dedup_pids"
-done
-
-# End the test after $sleep_time seconds
-sleep $sleep_time
-end_test
-
-# umount and mount again, verify pagecache contents don't mutate and a fresh
-# read from the disk also doesn't show mutations.
-find $testdir -type f -exec md5sum {} \; > ${tmp}.md5sum
-_scratch_cycle_mount
-md5sum -c --quiet ${tmp}.md5sum
-
-echo "Silence is golden"
-status=0
-exit
+++ /dev/null
-QA output created by 010
-Silence is golden
# - comment line before each group is "new" description
#
002 auto metadata quick log
-008 auto stress dedupe
-009 auto stress dedupe
-010 auto stress dedupe
011 auto quick
032 mkfs auto quick
298 auto trim