showme=false
have_test_arg=false
randomize=false
+exact_order=false
export here=`pwd`
xfile=""
subdir_xfile=""
do_report=false
DUMP_OUTPUT=false
iterations=1
+istop=false
# This is a global variable used to pass test failure text to reporting gunk
_err_msg=""
-n show me, do not run tests
-T output timestamps
-r randomize test order
+ --exact-order run tests in the exact order specified
-i <n> iterate the test list <n> times
+ -I <n> iterate the test list <n> times, but stops iterating further in case of any test failure
-d dump test output to stdout
-b brief test summary
-R fmt[,fmt] generate report in formats specified. Supported format: [xunit]
-g group[,group...] include tests from these groups
-x group[,group...] exclude tests from these groups
-X exclude_file exclude individual tests
+ -e testlist exclude a specific list of tests
-E external_file exclude individual tests
[testlist] include tests matching names in testlist
list=$(get_group_list $xgroup)
if [ -z "$list" ]; then
echo "Group \"$xgroup\" is empty or not defined?"
- exit 1
+ continue
fi
trim_test_list $list
done
- # sort the list of tests into numeric order
- if $randomize; then
- if type shuf >& /dev/null; then
- sorter="shuf"
+ # sort the list of tests into numeric order unless we're running tests
+ # in the exact order specified
+ if ! $exact_order; then
+ if $randomize; then
+ if type shuf >& /dev/null; then
+ sorter="shuf"
+ else
+ sorter="awk -v seed=$RANDOM -f randomize.awk"
+ fi
else
- sorter="awk -v seed=$RANDOM -f randomize.awk"
+ sorter="cat"
fi
+ list=`sort -n $tmp.list | uniq | $sorter`
else
- sorter="cat"
+ list=`cat $tmp.list`
fi
- list=`sort -n $tmp.list | uniq | $sorter`
rm -f $tmp.list
}
-X) subdir_xfile=$2; shift ;
;;
+ -e)
+ xfile=$2; shift ;
+ echo "$xfile" | tr ', ' '\n\n' >> $tmp.xlist
+ ;;
+
-E) xfile=$2; shift ;
if [ -f $xfile ]; then
sed "s/#.*$//" "$xfile" >> $tmp.xlist
-udiff) diff="$diff -u" ;;
-n) showme=true ;;
- -r) randomize=true ;;
+ -r)
+ if $exact_order; then
+ echo "Cannot specify -r and --exact-order."
+ exit 1
+ fi
+ randomize=true
+ ;;
+ --exact-order)
+ if $randomize; then
+ echo "Cannnot specify --exact-order and -r."
+ exit 1
+ fi
+ exact_order=true
+ ;;
-i) iterations=$2; shift ;;
+ -I) iterations=$2; istop=true; shift ;;
-T) timestamp=true ;;
-d) DUMP_OUTPUT=true ;;
-b) brief_test_summary=true;;
unset try notrun bad
}
+_global_log() {
+ echo "$1" >> $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ echo "$1" >> ${REPORT_DIR}/check.log
+ fi
+}
+
_wrapup()
{
seq="check"
}' \
| sort -n >$tmp.out
mv $tmp.out $check.time
+ if $OPTIONS_HAVE_SECTIONS; then
+ cp $check.time ${REPORT_DIR}/check.time
+ fi
fi
- echo "" >>$check.log
- date >>$check.log
+ _global_log ""
+ _global_log "$(date)"
echo "SECTION -- $section" >>$tmp.summary
echo "=========================" >>$tmp.summary
echo "Ran:$try"
echo "Ran:$try" >>$tmp.summary
fi
- echo "Ran:$try" >>$check.log
+ _global_log "Ran:$try"
fi
$interrupt && echo "Interrupted!" | tee -a $check.log
+ if $OPTIONS_HAVE_SECTIONS; then
+ $interrupt && echo "Interrupted!" | tee -a \
+ ${REPORT_DIR}/check.log
+ fi
if [ ! -z "$notrun" ]; then
if [ $brief_test_summary == "false" ]; then
echo "Not run:$notrun"
echo "Not run:$notrun" >>$tmp.summary
fi
- echo "Not run:$notrun" >>$check.log
+ _global_log "Not run:$notrun"
fi
if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
echo "Failures:$bad"
echo "Failed $n_bad of $n_try tests"
- echo "Failures:$bad" >>$check.log
- echo "Failed $n_bad of $n_try tests" >>$check.log
+ _global_log "Failures:$bad"
+ _global_log "Failed $n_bad of $n_try tests"
echo "Failures:$bad" >>$tmp.summary
echo "Failed $n_bad of $n_try tests" >>$tmp.summary
else
echo "Passed all $n_try tests"
- echo "Passed all $n_try tests" >>$check.log
+ _global_log "Passed all $n_try tests"
echo "Passed all $n_try tests" >>$tmp.summary
fi
echo "" >>$tmp.summary
return 0
}
+# Can we run systemd scopes?
+HAVE_SYSTEMD_SCOPES=
+systemctl reset-failed "fstests-check" &>/dev/null
+systemd-run --quiet --unit "fstests-check" --scope bash -c "exit 77" &> /dev/null
+test $? -eq 77 && HAVE_SYSTEMD_SCOPES=yes
+
# Make the check script unattractive to the OOM killer...
OOM_SCORE_ADJ="/proc/self/oom_score_adj"
test -w ${OOM_SCORE_ADJ} && echo -1000 > ${OOM_SCORE_ADJ}
# ...and make the tests themselves somewhat more attractive to it, so that if
# the system runs out of memory it'll be the test that gets killed and not the
# test framework.
+#
+# If systemd is available, run the entire test script in a scope so that we can
+# kill all subprocesses of the test if it fails to clean up after itself. This
+# is essential for ensuring that the post-test unmount succeeds. Note that
+# systemd doesn't automatically remove transient scopes that fail to terminate
+# when systemd tells them to terminate (e.g. programs stuck in D state when
+# systemd sends SIGKILL), so we use reset-failed to tear down the scope.
_run_seq() {
- bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq"
+ local cmd=(bash -c "test -w ${OOM_SCORE_ADJ} && echo 250 > ${OOM_SCORE_ADJ}; exec ./$seq")
+
+ if [ -n "${HAVE_SYSTEMD_SCOPES}" ]; then
+ local unit="$(systemd-escape "fs$seq").scope"
+ systemctl reset-failed "${unit}" &> /dev/null
+ systemd-run --quiet --unit "${unit}" --scope "${cmd[@]}"
+ res=$?
+ systemctl stop "${unit}" &> /dev/null
+ return "${res}"
+ else
+ "${cmd[@]}"
+ fi
}
_detect_kmemleak
status=1
exit
fi
+ # TEST_DEV has been recreated, previous FSTYP derived from
+ # TEST_DEV could be changed, source common/rc again with
+ # correct FSTYP to get FSTYP specific configs, e.g. common/xfs
+ . common/rc
_prepare_test_list
elif [ "$OLD_TEST_FS_MOUNT_OPTS" != "$TEST_FS_MOUNT_OPTS" ]; then
_test_unmount 2> /dev/null
touch ${RESULT_DIR}/check_dmesg
fi
_try_wipe_scratch_devs > /dev/null 2>&1
+
+ # clear the WARN_ONCE state to allow a potential problem
+ # to be reported for each test
+ (echo 1 > $DEBUGFS_MNT/clear_warn_once) > /dev/null 2>&1
+
if [ "$DUMP_OUTPUT" = true ]; then
_run_seq 2>&1 | tee $tmp.out
# Because $? would get tee's return code
_check_dmesg || err=true
fi
+ # Reload the module after each test to check for leaks or
+ # other problems.
+ if [ -n "${TEST_FS_MODULE_RELOAD}" ]; then
+ _test_unmount 2> /dev/null
+ _scratch_unmount 2> /dev/null
+ modprobe -r fs-$FSTYP
+ modprobe fs-$FSTYP
+ fi
+
# Scan for memory leaks after every test so that associating
# a leak to a particular test will be as accurate as possible.
_check_kmemleak || err=true
for ((iters = 0; iters < $iterations; iters++)) do
for section in $HOST_OPTIONS_SECTIONS; do
run_section $section
+ if [ "$sum_bad" != 0 ] && [ "$istop" = true ]; then
+ interrupt=false
+ status=`expr $sum_bad != 0`
+ exit
+ fi
done
done