shared/010: avoid dedupe testing blocked on large fs
authorZorro Lang <zlang@redhat.com>
Wed, 12 Sep 2018 10:15:47 +0000 (18:15 +0800)
committerEryu Guan <guaneryu@gmail.com>
Sun, 16 Sep 2018 11:49:34 +0000 (19:49 +0800)
When test on large fs (--large-fs), xfstests preallocates a large
file in SCRATCH_MNT/ at first. Duperemove will take too long time
to deal with that large file (many days on 500T XFS). So move
working directory to a sub-dir underlying $SCRATCH_MNT/.

Signed-off-by: Zorro Lang <zlang@redhat.com>
Reviewed-by: Eric Sandeen <sandeen@redhat.com>
Signed-off-by: Eryu Guan <guaneryu@gmail.com>
tests/shared/010

index 1817081b33354f61eb76be3ee5f161a44073d520..04f5589022aedaf449299da41fae2ba0e15ed9d3 100755 (executable)
@@ -65,15 +65,17 @@ function end_test()
 sleep_time=$((50 * TIME_FACTOR))
 
 # Start fsstress
+testdir="$SCRATCH_MNT/dir"
+mkdir $testdir
 fsstress_opts="-r -n 1000 -p $((5 * LOAD_FACTOR))"
-$FSSTRESS_PROG $fsstress_opts -d $SCRATCH_MNT -l 0 >> $seqres.full 2>&1 &
+$FSSTRESS_PROG $fsstress_opts -d $testdir -l 0 >> $seqres.full 2>&1 &
 dedup_pids=""
 dupe_run=$TEST_DIR/${seq}-running
 # Start several dedupe processes on same directory
 touch $dupe_run
 for ((i = 0; i < $((2 * LOAD_FACTOR)); i++)); do
        while [ -e $dupe_run ]; do
-               $DUPEREMOVE_PROG -dr --dedupe-options=same $SCRATCH_MNT/ \
+               $DUPEREMOVE_PROG -dr --dedupe-options=same $testdir \
                        >>$seqres.full 2>&1
        done &
        dedup_pids="$! $dedup_pids"