3 # Control script for QA
5 # Copyright (c) 2000-2002,2006 Silicon Graphics, Inc. All Rights Reserved.
7 # This program is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU General Public License as
9 # published by the Free Software Foundation.
11 # This program is distributed in the hope that it would be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write the Free Software Foundation,
18 # Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
39 brief_test_summary=false
43 # start the initialisation work now
46 export MSGVERB="text:action"
47 export QA_CHECK_FS=${QA_CHECK_FS:=true}
49 # number of diff lines from a failed test, 0 for whole output
50 export DIFF_LENGTH=${DIFF_LENGTH:=10}
52 # by default don't output timestamps
53 timestamp=${TIMESTAMP:=false}
55 rm -f $tmp.list $tmp.tmp $tmp.grep $here/$iam.out $tmp.xlist
57 SRC_GROUPS="generic shared"
58 export SRC_DIR="tests"
62 echo "Usage: $0 [options] [testlist]"'
70 -udiff show unified diff (default)
71 -n show me, do not run tests
73 -r randomize test order
74 -d dump test output to stdout
76 --large-fs optimise scratch device for large filesystems
77 -s section run only specified section from config file
78 -S section exclude the specified section from the config file
81 -g group[,group...] include tests from these groups
82 -x group[,group...] exclude tests from these groups
83 -X file exclude individual tests
84 -E external_file exclude individual tests
85 [testlist] include tests matching names in testlist
94 for d in $SRC_GROUPS $FSTYP; do
95 if ! test -d "$SRC_DIR/$d" ; then
98 l=$(sed -n < $SRC_DIR/$d/group \
101 -e "s;^\($VALID_TEST_NAME\).* $grp .*;$SRC_DIR/$d/\1;p")
107 # Find all tests, excluding files that are test metadata such as group files.
108 # It matches test names against $VALID_TEST_NAME defined in common/rc
112 for d in $SRC_GROUPS $FSTYP; do
113 if ! test -d "$SRC_DIR/$d" ; then
118 grep "^$SRC_DIR/$d/$VALID_TEST_NAME"| \
119 grep -v "group\|Makefile" >> $tmp.list 2>/dev/null
123 # takes the list of tests to run in $tmp.list, and removes the tests passed to
124 # the function from that list.
133 if [ $numsed -gt 100 ]; then
134 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
135 mv $tmp.tmp $tmp.list
139 echo "^$t\$" >>$tmp.grep
140 numsed=`expr $numsed + 1`
142 grep -v -f $tmp.grep <$tmp.list >$tmp.tmp
143 mv $tmp.tmp $tmp.list
161 # Tests specified on the command line
162 if [ -s $tmp.arglist ]; then
163 cat $tmp.arglist > $tmp.list
168 # Specified groups to include
169 for group in $GROUP_LIST; do
170 list=$(get_group_list $group)
171 if [ -z "$list" ]; then
172 echo "Group \"$group\" is empty or not defined?"
177 grep -s "^$t\$" $tmp.list >/dev/null || \
178 echo "$t" >>$tmp.list
182 if ! $have_test_arg && [ -z "$GROUP_LIST" ]; then
183 # no test numbers, do everything
187 # Specified groups to exclude
188 for xgroup in $XGROUP_LIST; do
189 list=$(get_group_list $xgroup)
190 if [ -z "$list" ]; then
191 echo "Group \"$xgroup\" is empty or not defined?"
198 # sort the list of tests into numeric order
199 list=`sort -n $tmp.list | uniq`
200 rm -f $tmp.list $tmp.tmp $tmp.grep
204 list=`echo $list | awk -f randomize.awk`
208 # Process command arguments first.
209 while [ $# -gt 0 ]; do
211 -\? | -h | --help) usage ;;
215 -overlay) FSTYP=overlay ;;
216 -tmpfs) FSTYP=tmpfs ;;
218 -g) group=$2 ; shift ;
219 GROUP_LIST="$GROUP_LIST ${group//,/ }"
222 -x) xgroup=$2 ; shift ;
223 XGROUP_LIST="$XGROUP_LIST ${xgroup//,/ }"
226 -X) xfile=$2; shift ;
227 for d in $SRC_GROUPS $FSTYP; do
228 [ -f $SRC_DIR/$d/$xfile ] || continue
229 for f in `sed "s/#.*$//" $SRC_DIR/$d/$xfile`; do
230 echo $d/$f >> $tmp.xlist
234 -E) xfile=$2; shift ;
235 if [ -f $xfile ]; then
236 sed "s/#.*$//" "$xfile" >> $tmp.xlist
239 -s) RUN_SECTION="$RUN_SECTION $2"; shift ;;
240 -S) EXCLUDE_SECTION="$EXCLUDE_SECTION $2"; shift ;;
242 -udiff) diff="$diff -u" ;;
245 -r) randomize=true ;;
247 -T) timestamp=true ;;
248 -d) DUMP_OUTPUT=true ;;
249 -b) brief_test_summary=true;;
251 --large-fs) export LARGE_SCRATCH_DEV=yes ;;
252 --extra-space=*) export SCRATCH_DEV_EMPTY_SPACE=${r#*=} ;;
255 *) # not an argument, we've got tests now.
256 have_test_arg=true ;;
259 # if we've found a test specification, the break out of the processing
260 # loop before we shift the arguments so that this is the first argument
261 # that we process in the test arg loop below.
262 if $have_test_arg; then
269 # we need common/config, source it after processing args, overlay needs FSTYP
270 # set before sourcing common/config
271 if ! . ./common/config; then
272 echo "$iam: failed to source common/config"
276 # Process tests from command line now.
277 if $have_test_arg; then
278 while [ $# -gt 0 ]; do
280 -*) echo "Arguments before tests, please!"
284 *) test_dir=`dirname $1`
285 test_dir=${test_dir#$SRC_DIR/*}
286 test_name=`basename $1`
287 group_file=$SRC_DIR/$test_dir/group
289 if egrep "^$test_name" $group_file >/dev/null ; then
290 # in group file ... OK
291 echo $SRC_DIR/$test_dir/$test_name >>$tmp.arglist
294 echo "$1 - unknown test, ignored"
306 echo "check: failed to source common/rc"
312 echo "check: QA must be run as root"
326 check="$RESULT_BASE/check"
331 if [ -f $check.time -a -f $tmp.time ]; then
332 cat $check.time $tmp.time \
337 for (i in t) print i " " t[i]
341 mv $tmp.out $check.time
347 echo "SECTION -- $section" >>$tmp.summary
348 echo "=========================" >>$tmp.summary
349 if [ ! -z "$n_try" -a $n_try != 0 ]; then
350 if [ $brief_test_summary == "false" ]; then
352 echo "Ran:$try" >>$tmp.summary
354 echo "Ran:$try" >>$check.log
357 $interrupt && echo "Interrupted!" >>$check.log
359 if [ ! -z "$notrun" ]; then
360 if [ $brief_test_summary == "false" ]; then
361 echo "Not run:$notrun"
362 echo "Not run:$notrun" >>$tmp.summary
364 echo "Not run:$notrun" >>$check.log
367 if [ ! -z "$n_bad" -a $n_bad != 0 ]; then
369 echo "Failed $n_bad of $n_try tests"
370 echo "Failures:$bad" >>$check.log
371 echo "Failed $n_bad of $n_try tests" >>$check.log
372 echo "Failures:$bad" >>$tmp.summary
373 echo "Failed $n_bad of $n_try tests" >>$tmp.summary
375 echo "Passed all $n_try tests"
376 echo "Passed all $n_try tests" >>$check.log
377 echo "Passed all $n_try tests" >>$tmp.summary
379 echo "" >>$tmp.summary
383 sum_bad=`expr $sum_bad + $n_bad`
385 rm -f /tmp/*.rawout /tmp/*.out /tmp/*.err /tmp/*.time
386 if ! $OPTIONS_HAVE_SECTIONS; then
397 count=`wc -L $tmp.summary | cut -f1 -d" "`
406 if [ -f ${RESULT_DIR}/require_test ]; then
407 _check_test_fs || err=true
408 rm -f ${RESULT_DIR}/require_test
410 if [ -f ${RESULT_DIR}/require_scratch ]; then
411 _check_scratch_fs || err=true
412 rm -f ${RESULT_DIR}/require_scratch
418 if $OPTIONS_HAVE_SECTIONS; then
419 trap "_summary; exit \$status" 0 1 2 3 15
421 trap "_wrapup; exit \$status" 0 1 2 3 15
424 for section in $HOST_OPTIONS_SECTIONS; do
426 OLD_MOUNT_OPTIONS=$MOUNT_OPTIONS
427 get_next_config $section
429 # Do we need to run only some sections ?
430 if [ ! -z "$RUN_SECTION" ]; then
432 for s in $RUN_SECTION; do
433 if [ $section == $s ]; then
443 # Did this section get excluded?
444 if [ ! -z "$EXCLUDE_SECTION" ]; then
446 for s in $EXCLUDE_SECTION; do
447 if [ $section == $s ]; then
457 mkdir -p $RESULT_BASE
458 if [ ! -d $RESULT_BASE ]; then
459 echo "failed to create results directory $RESULT_BASE"
464 if $OPTIONS_HAVE_SECTIONS; then
465 echo "SECTION -- $section"
468 if $RECREATE_TEST_DEV || [ "$OLD_FSTYP" != "$FSTYP" ]; then
469 echo "RECREATING -- $FSTYP on $TEST_DEV"
470 _test_unmount 2> /dev/null
471 if ! _test_mkfs >$tmp.err 2>&1
473 echo "our local _test_mkfs routine ..."
475 echo "check: failed to mkfs \$TEST_DEV using specified options"
479 out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
480 if [ $? -ne 1 ]; then
486 elif [ "$OLD_MOUNT_OPTIONS" != "$MOUNT_OPTIONS" ]; then
487 _test_unmount 2> /dev/null
488 out=`_mount_or_remount_rw "$MOUNT_OPTIONS" $TEST_DEV $TEST_DIR`
489 if [ $? -ne 1 ]; then
499 check="$RESULT_BASE/check"
501 # don't leave old full output behind on a clean run
504 [ -f $check.time ] || touch $check.time
506 # print out our test configuration
507 echo "FSTYP -- `_full_fstyp_details`"
508 echo "PLATFORM -- `_full_platform_details`"
509 if [ ! -z "$SCRATCH_DEV" ]; then
510 echo "MKFS_OPTIONS -- `_scratch_mkfs_options`"
511 echo "MOUNT_OPTIONS -- `_scratch_mount_options`"
516 if [ ! -z "$SCRATCH_DEV" ]; then
517 _scratch_unmount 2> /dev/null
518 # call the overridden mkfs - make sure the FS is built
519 # the same as we'll create it later.
521 if ! _scratch_mkfs >$tmp.err 2>&1
523 echo "our local _scratch_mkfs routine ..."
525 echo "check: failed to mkfs \$SCRATCH_DEV using specified options"
530 # call the overridden mount - make sure the FS mounts with
531 # the same options that we'll mount with later.
532 if ! _scratch_mount >$tmp.err 2>&1
534 echo "our local mount routine ..."
536 echo "check: failed to mount \$SCRATCH_DEV using specified options"
548 if [ ! -f $seq ]; then
549 # Try to get full name in case the user supplied only seq id
550 # and the test has a name. A bit of hassle to find really
551 # the test and not its sample output or helping files.
552 bname=$(basename $seq)
553 full_seq=$(find $(dirname $seq) -name $bname* -executable |
554 awk '(NR == 1 || length < length(shortest)) { shortest = $0 }\
555 END { print shortest }')
556 if [ -f $full_seq ] \
557 && [ x$(echo $bname | grep -o "^$VALID_TEST_ID") != x ]; then
562 # the filename for the test and the name output are different.
563 # we don't include the tests/ directory in the name output.
564 export seqnum=`echo $seq | sed -e "s;$SRC_DIR/;;"`
566 # Similarly, the result directory needs to replace the tests/
567 # part of the test location.
569 if $OPTIONS_HAVE_SECTIONS; then
570 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;${RESULT_BASE}/$section;"`
571 seqres="$RESULT_BASE/$section/$seqnum"
573 export RESULT_DIR=`echo $group | sed -e "s;$SRC_DIR;$RESULT_BASE;"`
574 seqres="$RESULT_BASE/$seqnum"
586 if [ ! -f $seq ]; then
587 echo " - no such test?"
589 # really going to try and run this one
591 rm -f $seqres.out.bad
593 # check if we really should run it
594 if [ -s $tmp.xlist ]; then
595 if grep $seqnum $tmp.xlist > /dev/null 2>&1 ; then
601 # slashes now in names, sed barfs on them so use grep
602 lasttime=`grep -w ^$seqnum $check.time | awk '// {print $2}'`
603 if [ "X$lasttime" != X ]; then
604 echo -n " ${lasttime}s ..."
606 echo -n " " # prettier output with timestamps.
608 rm -f core $seqres.notrun
611 $timestamp && echo -n " ["`date "+%T"`"]"
612 [ ! -x $seq ] && chmod u+x $seq # ensure we can run it
613 $LOGGER_PROG "run xfstest $seqnum"
614 if [ -w /dev/kmsg ]; then
615 export date_time=`date +"%F %T"`
616 echo "run fstests $seqnum at $date_time" > /dev/kmsg
617 # _check_dmesg depends on this log in dmesg
618 touch ${RESULT_DIR}/check_dmesg
620 if [ "$DUMP_OUTPUT" = true ]; then
621 ./$seq 2>&1 | tee $tmp.rawout
622 # Because $? would get tee's return code
625 ./$seq >$tmp.rawout 2>&1
628 $timestamp && _timestamp
631 _fix_malloc <$tmp.rawout >$tmp.out
636 echo -n " [dumped core]"
637 mv core $RESULT_BASE/$seqnum.core
641 if [ -f $seqres.notrun ]
643 $timestamp || echo -n " [not run] "
644 $timestamp && echo " [not run]" && echo -n " $seqnum -- "
646 notrun="$notrun $seqnum"
650 echo -n " [failed, exit status $sts]"
655 echo " - no qualified output"
659 # coreutils 8.16+ changed quote formats in error messages from
660 # `foo' to 'foo'. Filter old versions to match the new version.
661 sed -i "s/\`/\'/g" $tmp.out
662 if diff $seq.out $tmp.out >/dev/null 2>&1
668 echo "$seqnum `expr $stop - $start`" >>$tmp.time
669 echo -n " `expr $stop - $start`s"
673 echo " - output mismatch (see $seqres.out.bad)"
674 mv $tmp.out $seqres.out.bad
675 $diff $seq.out $seqres.out.bad | {
676 if test "$DIFF_LENGTH" -le 0; then
679 head -n "$DIFF_LENGTH"
681 echo "(Run '$diff $seq.out $seqres.out.bad'" \
682 " to see the entire diff)"
684 sed -e 's/^\(.\)/ \1/'
689 n_try=`expr $n_try + 1`
691 _check_dmesg || err=true
696 # come here for each test, except when $showme is true
701 n_bad=`expr $n_bad + 1`
710 _test_unmount 2> /dev/null
711 _scratch_unmount 2> /dev/null
715 status=`expr $sum_bad`