showme=false
have_test_arg=false
randomize=false
+exact_order=false
export here=`pwd`
xfile=""
+subdir_xfile=""
brief_test_summary=false
do_report=false
DUMP_OUTPUT=false
+iterations=1
+istop=false
# This is a global variable used to pass test failure text to reporting gunk
_err_msg=""
echo "Usage: $0 [options] [testlist]"'
check options
- -nfs test NFS
- -glusterfs test GlusterFS
- -cifs test CIFS
+ -nfs test NFS
+ -glusterfs test GlusterFS
+ -cifs test CIFS
-9p test 9p
+ -virtiofs test virtiofs
-overlay test overlay
- -pvfs2 test PVFS2
- -tmpfs test TMPFS
- -ubifs test ubifs
+ -pvfs2 test PVFS2
+ -tmpfs test TMPFS
+ -ubifs test ubifs
-l line mode diff
-udiff show unified diff (default)
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ --exact-order run tests in the exact order specified
+ -i <n> iterate the test list <n> times
+ -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
-d dump test output to stdout
-b brief test summary
-R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
-X exclude_file exclude individual tests
+ -e testlist exclude a specific list of tests
-E external_file exclude individual tests
[testlist] include tests matching names in testlist
local grp=$1
local grpl=""
local sub=$(dirname $grp)
+ local fsgroup="$FSTYP"
if [ -n "$sub" -a "$sub" != "." -a -d "$SRC_DIR/$sub" ]; then
# group is given as <subdir>/<group> (e.g. xfs/quick)
return
fi
- for d in $SRC_GROUPS $FSTYP; do
+ if [ "$FSTYP" = ext2 -o "$FSTYP" = ext3 ]; then
+ fsgroup=ext4
+ fi
+ for d in $SRC_GROUPS $fsgroup; do
if ! test -d "$SRC_DIR/$d" ; then
continue
fi
list=$(get_group_list $xgroup)
if [ -z "$list" ]; then
echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
+ continue
fi
trim_test_list $list
done
- # sort the list of tests into numeric order
- list=`sort -n $tmp.list | uniq`
- rm -f $tmp.list
-
- if $randomize
- then
- list=`echo $list | awk -f randomize.awk`
+ # sort the list of tests into numeric order unless we're running tests
+ # in the exact order specified
+ if ! $exact_order; then
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
+ else
+ sorter="cat"
+ fi
+ list=`sort -n $tmp.list | uniq | $sorter`
+ else
+ list=`cat $tmp.list`
fi
+ rm -f $tmp.list
}
# Process command arguments first.
-glusterfs) FSTYP=glusterfs ;;
-cifs) FSTYP=cifs ;;
-9p) FSTYP=9p ;;
+ -virtiofs) FSTYP=virtiofs ;;
-overlay) FSTYP=overlay; export OVERLAY=true ;;
-pvfs2) FSTYP=pvfs2 ;;
-tmpfs) FSTYP=tmpfs ;;
XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
;;
- -X) xfile=$2; shift ;
- for d in $SRC_GROUPS $FSTYP; do
- [ -f $SRC_DIR/$d/$xfile ] || continue
- for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do
- echo $d/$f >> $tmp.xlist
- done
- done
+ -X) subdir_xfile=$2; shift ;
;;
+ -e)
+ xfile=$2; shift ;
+ echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
+ ;;
+
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
sed "s/#.*$//" "$xfile" >> $tmp.xlist
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
- -r) randomize=true ;;
-
+ -r)
+ if $exact_order; then
+ echo "Cannot specify -r and --exact-order."
+ exit 1
+ fi
+ randomize=true
+ ;;
+ --exact-order)
+ if $randomize; then
+ echo "Cannnot specify --exact-order and -r."
+ exit 1
+ fi
+ exact_order=true
+ ;;
+ -i) iterations=$2; shift ;;
+ -I) iterations=$2; istop=true; shift ;;
-T) timestamp=true ;;
-d) DUMP_OUTPUT=true ;;
-b) brief_test_summary=true;;
exit 1
fi
+if [ -n "$subdir_xfile" ]; then
+ for d in $SRC_GROUPS $FSTYP; do
+ [ -f $SRC_DIR/$d/$subdir_xfile ] || continue
+ for f in `sed "s/#.*$//" $SRC_DIR/$d/$subdir_xfile`; do
+ echo $d/$f >> $tmp.xlist
+ done
+ done
+fi
+
# Process tests from command line now.
if $have_test_arg; then
while [ $# -gt 0 ]; do
unset try notrun bad
}
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
+
_wrapup()
{
seq="check"
}' \
| sort -n >$tmp.out
mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
fi
- echo "" >>$check.log
- date >>$check.log
+ _global_log ""
+ _global_log "$(date)"
echo "SECTION -- $section" >>$tmp.summary
echo "=========================" >>$tmp.summary
echo "Ran:$try"
echo "Ran:$try" >>$tmp.summary
fi
- echo "Ran:$try" >>$check.log
+ _global_log "Ran:$try"
fi
$interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
if [ ! -z "$notrun" ]; then
if [ $brief_test_summary == "false" ]; then
echo "Not run:$notrun"
echo "Not run:$notrun" >>$tmp.summary
fi
- echo "Not run:$notrun" >>$check.log
+ _global_log "Not run:$notrun"
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
echo "Failures:$bad"
echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
+ _global_log "Failures:$bad"
+ _global_log "Failed $n_bad of $n_try tests"
echo "Failures:$bad" >>$tmp.summary
echo "Failed $n_bad of $n_try tests" >>$tmp.summary
else
echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
+ _global_log "Passed all $n_try tests"
echo "Passed all $n_try tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
if [ -f ${RESULT_DIR}/require_scratch ]; then
_check_scratch_fs || err=true
rm -f ${RESULT_DIR}/require_scratch*
- else
- _scratch_unmount 2> /dev/null
fi
+ _scratch_unmount 2> /dev/null
}
_expunge_test()
return 0
}
-_init_kmemleak
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
+# Make the check script unattractive to the OOM killer...
+OOM_SCORE_ADJ="/proc/self/oom_score_adj"
+test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
+
+# ...and make the tests themselves somewhat more attractive to it, so that if
+# the system runs out of memory it'll be the test that gets killed and not the
+# test framework.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself. This
+# is essential for ensuring that the post-test unmount succeeds. Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
+_run_seq() {
+ local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+ if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+ local unit="$(systemd-escape "fs$seq").scope"
+ systemctl reset-failed "${unit}" &> /dev/null
+ systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+ res=$?
+ systemctl stop "${unit}" &> /dev/null
+ return "${res}"
+ else
+ "${cmd[@]}"
+ fi
+}
+
+_detect_kmemleak
_prepare_test_list
if $OPTIONS_HAVE_SECTIONS; then
trap "_wrapup; exit \$status" 0 1 2 3 15
fi
-for section in $HOST_OPTIONS_SECTIONS; do
+function run_section()
+{
+ local section=$1
+
OLD_FSTYP=$FSTYP
OLD_TEST_FS_MOUNT_OPTS=$TEST_FS_MOUNT_OPTS
get_next_config $section
fi
done
if $skip; then
- continue
+ return
fi
fi
fi
done
if $skip; then
- continue
+ return
fi
fi
status=1
exit
fi
+ # TEST_DEV has been recreated, previous FSTYP derived from
+ # TEST_DEV could be changed, source common/rc again with
+ # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+ . common/rc
_prepare_test_list
elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
_test_unmount 2> /dev/null
# call the overridden mount - make sure the FS mounts with
# the same options that we'll mount with later.
- if ! _scratch_mount >$tmp.err 2>&1
+ if ! _try_scratch_mount >$tmp.err 2>&1
then
echo "our local mount routine ..."
cat $tmp.err
echo "check: failed to mount \$SCRATCH_DEV using specified options"
status=1
exit
+ else
+ _scratch_unmount
fi
fi
seqres="$REPORT_DIR/$seqnum"
mkdir -p $RESULT_DIR
+ rm -f ${RESULT_DIR}/require_scratch*
+ rm -f ${RESULT_DIR}/require_test*
echo -n "$seqnum"
if $showme; then
# _check_dmesg depends on this log in dmesg
touch ${RESULT_DIR}/check_dmesg
fi
+ _try_wipe_scratch_devs > /dev/null 2>&1
+
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+
if [ "$DUMP_OUTPUT" = true ]; then
- ./$seq 2>&1 | tee $tmp.out
+ _run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
sts=${PIPESTATUS[0]}
else
- ./$seq >$tmp.out 2>&1
+ _run_seq >$tmp.out 2>&1
sts=$?
fi
_dump_err_cont "[failed, exit status $sts]"
_test_unmount 2> /dev/null
_scratch_unmount 2> /dev/null
+ rm -f ${RESULT_DIR}/require_test*
+ rm -f ${RESULT_DIR}/require_scratch*
err=true
else
# the test apparently passed, so check for corruption
# and log messages that shouldn't be there.
_check_filesystems
_check_dmesg || err=true
- _check_kmemleak || err=true
fi
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
+ # Scan for memory leaks after every test so that associating
+ # a leak to a particular test will be as accurate as possible.
+ _check_kmemleak || err=true
+
# test ends after all checks are done.
$timestamp && _timestamp
stop=`_wallclock`
else
head -n "$DIFF_LENGTH"
echo "..."
- echo "(Run '$diff $seq.out $seqres.out.bad'" \
+ echo "(Run '$diff $here/$seq.out $seqres.out.bad'" \
" to see the entire diff)"
fi; } | sed -e 's/^\(.\)/ \1/'
err=true
_test_unmount 2> /dev/null
_scratch_unmount 2> /dev/null
+}
+
+for ((iters = 0; iters < $iterations; iters++)) do
+ for section in $HOST_OPTIONS_SECTIONS; do
+ run_section $section
+ if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+ interrupt=false
+ status=`expr $sum_bad != 0`
+ exit
+ fi
+ done
done
interrupt=false