--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
+#
+# FS QA Test No. 516
+#
+# Update sunit and width and make sure that the filesystem still passes
+# xfs_repair afterwards.
+
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ rm -f $tmp.*
+ cd /
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/fuzzy
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+_require_scratch_nocheck
+
+# Assume that if we can run scrub on the test dev we can run it on the scratch
+# fs too.
+run_scrub=0
+_supports_xfs_scrub $TEST_DIR $TEST_DEV && run_scrub=1
+
+log()
+{
+ echo "$@" | tee -a $seqres.full /dev/ttyprintk
+}
+
+__test_mount_opts()
+{
+ local mounted=0
+
+ # Try to mount the fs with our test options.
+ _try_scratch_mount "$@" >> $seqres.full 2>&1 && mounted=1
+ if [ $mounted -gt 0 ]; then
+ # Implant a sentinel file to see if repair nukes the directory
+ # later. Scrub, unmount, and check for errors.
+ echo moo > $SCRATCH_MNT/a
+ grep "$SCRATCH_MNT" /proc/mounts >> $seqres.full
+ test $run_scrub -gt 0 && \
+ _scratch_scrub -n >> $seqres.full
+ _scratch_unmount
+ _scratch_xfs_repair -n >> $seqres.full 2>&1 || \
+ echo "Repair found problems."
+ else
+ echo "mount failed" >> $seqres.full
+ fi
+ _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
+
+ # Run xfs_repair in repair mode to see if it can be baited into nuking
+ # the root filesystem on account of the sunit update.
+ _scratch_xfs_repair >> $seqres.full 2>&1
+
+ # If the previous mount succeeded, mount the fs and look for the file
+ # we implanted.
+ if [ $mounted -gt 0 ]; then
+ _scratch_mount
+ test -f $SCRATCH_MNT/a || echo "Root directory got nuked."
+ _scratch_unmount
+ fi
+
+ echo >> $seqres.full
+}
+
+test_sunit_opts()
+{
+ echo "Format with 4k stripe unit; 1x stripe width" >> $seqres.full
+ _scratch_mkfs -b size=4k -d sunit=8,swidth=8 >> $seqres.full 2>&1
+
+ __test_mount_opts "$@"
+}
+
+test_su_opts()
+{
+ local mounted=0
+
+ echo "Format with 256k stripe unit; 4x stripe width" >> $seqres.full
+ _scratch_mkfs -b size=1k -d su=256k,sw=4 >> $seqres.full 2>&1
+
+ __test_mount_opts "$@"
+}
+
+test_repair_detection()
+{
+ local mounted=0
+
+ echo "Format with 256k stripe unit; 4x stripe width" >> $seqres.full
+ _scratch_mkfs -b size=1k -d su=256k,sw=4 >> $seqres.full 2>&1
+
+ # Try to mount the fs with our test options.
+ _try_scratch_mount >> $seqres.full 2>&1 && mounted=1
+ if [ $mounted -gt 0 ]; then
+ # Implant a sentinel file to see if repair nukes the directory
+ # later. Scrub, unmount, and check for errors.
+ echo moo > $SCRATCH_MNT/a
+ grep "$SCRATCH_MNT" /proc/mounts >> $seqres.full
+ test $run_scrub -gt 0 && \
+ _scratch_scrub -n >> $seqres.full
+ _scratch_unmount
+ _scratch_xfs_repair -n >> $seqres.full 2>&1 || \
+ echo "Repair found problems."
+ else
+ echo "mount failed" >> $seqres.full
+ fi
+
+ # Update the superblock like the kernel used to do.
+ _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
+ _scratch_xfs_db -x -c 'sb 0' -c 'write -d unit 256' -c 'write -d width 1024' >> $seqres.full
+ _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
+
+ # Run xfs_repair in repair mode to see if it can be baited into nuking
+ # the root filesystem on account of the sunit update.
+ _scratch_xfs_repair >> $seqres.full 2>&1
+
+ # If the previous mount succeeded, mount the fs and look for the file
+ # we implanted.
+ if [ $mounted -gt 0 ]; then
+ _scratch_mount
+ test -f $SCRATCH_MNT/a || echo "Root directory got nuked."
+ _scratch_unmount
+ fi
+
+ echo >> $seqres.full
+}
+
+# Format with a 256k stripe unit and 4x stripe width, and try various mount
+# options that want to change that and see if they blow up. Normally you
+# would never change the stripe *unit*, so it's no wonder this is not well
+# tested.
+
+log "Test: no raid parameters"
+test_su_opts
+
+log "Test: 256k stripe unit; 4x stripe width"
+test_su_opts -o sunit=512,swidth=2048
+
+log "Test: 256k stripe unit; 5x stripe width"
+test_su_opts -o sunit=512,swidth=2560
+
+# Note: Larger stripe units probably won't mount
+log "Test: 512k stripe unit; 4x stripe width"
+test_su_opts -o sunit=1024,swidth=4096
+
+log "Test: 512k stripe unit; 3x stripe width"
+test_su_opts -o sunit=1024,swidth=3072
+
+# Note: Should succeed with kernel warnings, and should not create repair
+# failures or nuke the root directory.
+log "Test: 128k stripe unit; 8x stripe width"
+test_su_opts -o sunit=256,swidth=2048
+
+# Note: Should succeed without nuking the root dir
+log "Test: Repair of 128k stripe unit; 8x stripe width"
+test_repair_detection
+
+# Brian Foster noticed a bug in an earlier version of the patch that avoids
+# updating the ondisk sunit/swidth values if they would cause later repair
+# failures. The bug was that we wouldn't convert the kernel mount option sunit
+# value to the correct incore units until after computing the inode geometry.
+# This caused it to behave incorrectly when the filesystem was formatted with
+# sunit=1fsb and the mount options try to increase swidth.
+log "Test: Formatting with sunit=1fsb,swidth=1fsb and mounting with larger swidth"
+test_sunit_opts -o sunit=8,swidth=64
+
+# success, all done
+status=0
+exit
--- /dev/null
+QA output created by 516
+Test: no raid parameters
+Test: 256k stripe unit; 4x stripe width
+Test: 256k stripe unit; 5x stripe width
+Test: 512k stripe unit; 4x stripe width
+Test: 512k stripe unit; 3x stripe width
+Test: 128k stripe unit; 8x stripe width
+Test: Repair of 128k stripe unit; 8x stripe width
+Test: Formatting with sunit=1fsb,swidth=1fsb and mounting with larger swidth
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2020 Oracle, Inc. All Rights Reserved.
+#
+# FS QA Test No. 517
+#
+# Race freeze and fsmap for a while to see if we crash or livelock.
+#
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 7 15
+
+_cleanup()
+{
+ cd /
+ $XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT > /dev/null 2>&1
+ rm -rf $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/fuzzy
+. ./common/inject
+. ./common/xfs
+
+# real QA test starts here
+_supported_os Linux
+_supported_fs xfs
+_require_xfs_scratch_rmapbt
+_require_xfs_io_command "fsmap"
+_require_command "$KILLALL_PROG" killall
+
+echo "Format and populate"
+_scratch_mkfs > "$seqres.full" 2>&1
+_scratch_mount
+
+STRESS_DIR="$SCRATCH_MNT/testdir"
+mkdir -p $STRESS_DIR
+
+for i in $(seq 0 9); do
+ mkdir -p $STRESS_DIR/$i
+ for j in $(seq 0 9); do
+ mkdir -p $STRESS_DIR/$i/$j
+ for k in $(seq 0 9); do
+ echo x > $STRESS_DIR/$i/$j/$k
+ done
+ done
+done
+
+cpus=$(( $(src/feature -o) * 4 * LOAD_FACTOR))
+
+echo "Concurrent fsmap and freeze"
+filter_output() {
+ egrep -v '(Device or resource busy|Invalid argument)'
+}
+freeze_loop() {
+ end="$1"
+
+ while [ "$(date +%s)" -lt $end ]; do
+ $XFS_IO_PROG -x -c 'freeze' $SCRATCH_MNT 2>&1 | filter_output
+ $XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT 2>&1 | filter_output
+ done
+}
+fsmap_loop() {
+ end="$1"
+
+ while [ "$(date +%s)" -lt $end ]; do
+ $XFS_IO_PROG -c 'fsmap -v' $SCRATCH_MNT > /dev/null
+ done
+}
+stress_loop() {
+ end="$1"
+
+ FSSTRESS_ARGS=$(_scale_fsstress_args -p 4 -d $SCRATCH_MNT -n 2000 $FSSTRESS_AVOID)
+ while [ "$(date +%s)" -lt $end ]; do
+ $FSSTRESS_PROG $FSSTRESS_ARGS >> $seqres.full
+ done
+}
+
+start=$(date +%s)
+end=$((start + (30 * TIME_FACTOR) ))
+
+echo "Loop started at $(date --date="@${start}"), ending at $(date --date="@${end}")" >> $seqres.full
+stress_loop $end &
+freeze_loop $end &
+fsmap_loop $end &
+
+# Wait until 2 seconds after the loops should have finished...
+while [ "$(date +%s)" -lt $((end + 2)) ]; do
+ sleep 1
+done
+
+# ...and clean up after the loops in case they didn't do it themselves.
+$KILLALL_PROG -TERM xfs_io fsstress >> $seqres.full 2>&1
+$XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT >> $seqres.full 2>&1
+
+echo "Loop finished at $(date)" >> $seqres.full
+echo "Test done"
+
+# success, all done
+status=0
+exit
--- /dev/null
+QA output created by 517
+Format and populate
+Concurrent fsmap and freeze
+Test done
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
+#
+# FS QA Test No. 518
+#
+# Make sure that the quota default grace period and maximum warning limits
+# survive quotacheck.
+
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/quota
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+_require_quota
+
+rm -f $seqres.full
+
+# Format filesystem and set up quota limits
+_scratch_mkfs > $seqres.full
+_qmount_option "usrquota"
+_scratch_mount >> $seqres.full
+
+$XFS_QUOTA_PROG -x -c 'timer -u 300m' $SCRATCH_MNT
+$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
+_scratch_unmount
+
+# Remount and check the limits
+_scratch_mount >> $seqres.full
+$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
+_scratch_unmount
+
+# Run repair to force quota check
+_scratch_xfs_repair >> $seqres.full 2>&1
+
+# Remount (this time to run quotacheck) and check the limits. There's a bug
+# in quotacheck where we would reset the ondisk default grace period to zero
+# while the incore copy stays at whatever was read in prior to quotacheck.
+# This will show up after the /next/ remount.
+_scratch_mount >> $seqres.full
+$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
+_scratch_unmount
+
+# Remount and check the limits
+_scratch_mount >> $seqres.full
+$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
+_scratch_unmount
+
+# success, all done
+status=0
+exit
--- /dev/null
+QA output created by 518
+Blocks grace time: [0 days 05:00:00]
+Inodes grace time: [0 days 05:00:00]
+Realtime Blocks grace time: [0 days 05:00:00]
+Blocks grace time: [0 days 05:00:00]
+Inodes grace time: [0 days 05:00:00]
+Realtime Blocks grace time: [0 days 05:00:00]
+Blocks grace time: [0 days 05:00:00]
+Inodes grace time: [0 days 05:00:00]
+Realtime Blocks grace time: [0 days 05:00:00]
+Blocks grace time: [0 days 05:00:00]
+Inodes grace time: [0 days 05:00:00]
+Realtime Blocks grace time: [0 days 05:00:00]
--- /dev/null
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
+#
+# FS QA Test No. 519
+#
+# Make sure that reflink forces the log out if we mount with wsync. We test
+# that it actually forced the log by immediately shutting down the fs without
+# flushing the log and then remounting to check file contents.
+
+seq=`basename $0`
+seqres=$RESULT_DIR/$seq
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+_cleanup()
+{
+ cd /
+ rm -f $tmp.*
+}
+
+# get standard environment, filters and checks
+. ./common/rc
+. ./common/filter
+. ./common/reflink
+
+# real QA test starts here
+_supported_fs xfs
+_supported_os Linux
+_require_scratch_reflink
+_require_cp_reflink
+
+rm -f $seqres.full
+
+# Format filesystem and set up quota limits
+_scratch_mkfs > $seqres.full
+_scratch_mount -o wsync >> $seqres.full
+
+# Set up initial files
+$XFS_IO_PROG -f -c 'pwrite -S 0x58 0 1m -b 1m' $SCRATCH_MNT/a >> $seqres.full
+$XFS_IO_PROG -f -c 'pwrite -S 0x59 0 1m -b 1m' $SCRATCH_MNT/c >> $seqres.full
+_cp_reflink $SCRATCH_MNT/a $SCRATCH_MNT/e
+_cp_reflink $SCRATCH_MNT/c $SCRATCH_MNT/d
+touch $SCRATCH_MNT/b
+sync
+
+# Test that setting the reflink flag on the dest file forces the log
+echo "test reflink flag not set"
+$XFS_IO_PROG -x -c "reflink $SCRATCH_MNT/a" -c 'shutdown' $SCRATCH_MNT/b >> $seqres.full
+_scratch_cycle_mount wsync
+md5sum $SCRATCH_MNT/a $SCRATCH_MNT/b | _filter_scratch
+
+# Test forcing the log even if both files are already reflinked
+echo "test reflink flag already set"
+$XFS_IO_PROG -x -c "reflink $SCRATCH_MNT/a" -c 'shutdown' $SCRATCH_MNT/d >> $seqres.full
+_scratch_cycle_mount wsync
+md5sum $SCRATCH_MNT/a $SCRATCH_MNT/d | _filter_scratch
+
+# success, all done
+status=0
+exit
--- /dev/null
+QA output created by 519
+test reflink flag not set
+310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/a
+310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/b
+test reflink flag already set
+310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/a
+310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/d
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
-#
-# FS QA Test No. 751
-#
-# Update sunit and width and make sure that the filesystem still passes
-# xfs_repair afterwards.
-
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- rm -f $tmp.*
- cd /
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/fuzzy
-
-# real QA test starts here
-_supported_fs xfs
-_supported_os Linux
-_require_scratch_nocheck
-
-# Assume that if we can run scrub on the test dev we can run it on the scratch
-# fs too.
-run_scrub=0
-_supports_xfs_scrub $TEST_DIR $TEST_DEV && run_scrub=1
-
-log()
-{
- echo "$@" | tee -a $seqres.full /dev/ttyprintk
-}
-
-__test_mount_opts()
-{
- local mounted=0
-
- # Try to mount the fs with our test options.
- _try_scratch_mount "$@" >> $seqres.full 2>&1 && mounted=1
- if [ $mounted -gt 0 ]; then
- # Implant a sentinel file to see if repair nukes the directory
- # later. Scrub, unmount, and check for errors.
- echo moo > $SCRATCH_MNT/a
- grep "$SCRATCH_MNT" /proc/mounts >> $seqres.full
- test $run_scrub -gt 0 && \
- _scratch_scrub -n >> $seqres.full
- _scratch_unmount
- _scratch_xfs_repair -n >> $seqres.full 2>&1 || \
- echo "Repair found problems."
- else
- echo "mount failed" >> $seqres.full
- fi
- _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
-
- # Run xfs_repair in repair mode to see if it can be baited into nuking
- # the root filesystem on account of the sunit update.
- _scratch_xfs_repair >> $seqres.full 2>&1
-
- # If the previous mount succeeded, mount the fs and look for the file
- # we implanted.
- if [ $mounted -gt 0 ]; then
- _scratch_mount
- test -f $SCRATCH_MNT/a || echo "Root directory got nuked."
- _scratch_unmount
- fi
-
- echo >> $seqres.full
-}
-
-test_sunit_opts()
-{
- echo "Format with 4k stripe unit; 1x stripe width" >> $seqres.full
- _scratch_mkfs -b size=4k -d sunit=8,swidth=8 >> $seqres.full 2>&1
-
- __test_mount_opts "$@"
-}
-
-test_su_opts()
-{
- local mounted=0
-
- echo "Format with 256k stripe unit; 4x stripe width" >> $seqres.full
- _scratch_mkfs -b size=1k -d su=256k,sw=4 >> $seqres.full 2>&1
-
- __test_mount_opts "$@"
-}
-
-test_repair_detection()
-{
- local mounted=0
-
- echo "Format with 256k stripe unit; 4x stripe width" >> $seqres.full
- _scratch_mkfs -b size=1k -d su=256k,sw=4 >> $seqres.full 2>&1
-
- # Try to mount the fs with our test options.
- _try_scratch_mount >> $seqres.full 2>&1 && mounted=1
- if [ $mounted -gt 0 ]; then
- # Implant a sentinel file to see if repair nukes the directory
- # later. Scrub, unmount, and check for errors.
- echo moo > $SCRATCH_MNT/a
- grep "$SCRATCH_MNT" /proc/mounts >> $seqres.full
- test $run_scrub -gt 0 && \
- _scratch_scrub -n >> $seqres.full
- _scratch_unmount
- _scratch_xfs_repair -n >> $seqres.full 2>&1 || \
- echo "Repair found problems."
- else
- echo "mount failed" >> $seqres.full
- fi
-
- # Update the superblock like the kernel used to do.
- _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
- _scratch_xfs_db -x -c 'sb 0' -c 'write -d unit 256' -c 'write -d width 1024' >> $seqres.full
- _scratch_xfs_db -c 'sb 0' -c 'p unit width' >> $seqres.full
-
- # Run xfs_repair in repair mode to see if it can be baited into nuking
- # the root filesystem on account of the sunit update.
- _scratch_xfs_repair >> $seqres.full 2>&1
-
- # If the previous mount succeeded, mount the fs and look for the file
- # we implanted.
- if [ $mounted -gt 0 ]; then
- _scratch_mount
- test -f $SCRATCH_MNT/a || echo "Root directory got nuked."
- _scratch_unmount
- fi
-
- echo >> $seqres.full
-}
-
-# Format with a 256k stripe unit and 4x stripe width, and try various mount
-# options that want to change that and see if they blow up. Normally you
-# would never change the stripe *unit*, so it's no wonder this is not well
-# tested.
-
-log "Test: no raid parameters"
-test_su_opts
-
-log "Test: 256k stripe unit; 4x stripe width"
-test_su_opts -o sunit=512,swidth=2048
-
-log "Test: 256k stripe unit; 5x stripe width"
-test_su_opts -o sunit=512,swidth=2560
-
-# Note: Larger stripe units probably won't mount
-log "Test: 512k stripe unit; 4x stripe width"
-test_su_opts -o sunit=1024,swidth=4096
-
-log "Test: 512k stripe unit; 3x stripe width"
-test_su_opts -o sunit=1024,swidth=3072
-
-# Note: Should succeed with kernel warnings, and should not create repair
-# failures or nuke the root directory.
-log "Test: 128k stripe unit; 8x stripe width"
-test_su_opts -o sunit=256,swidth=2048
-
-# Note: Should succeed without nuking the root dir
-log "Test: Repair of 128k stripe unit; 8x stripe width"
-test_repair_detection
-
-# Brian Foster noticed a bug in an earlier version of the patch that avoids
-# updating the ondisk sunit/swidth values if they would cause later repair
-# failures. The bug was that we wouldn't convert the kernel mount option sunit
-# value to the correct incore units until after computing the inode geometry.
-# This caused it to behave incorrectly when the filesystem was formatted with
-# sunit=1fsb and the mount options try to increase swidth.
-log "Test: Formatting with sunit=1fsb,swidth=1fsb and mounting with larger swidth"
-test_sunit_opts -o sunit=8,swidth=64
-
-# success, all done
-status=0
-exit
+++ /dev/null
-QA output created by 751
-Test: no raid parameters
-Test: 256k stripe unit; 4x stripe width
-Test: 256k stripe unit; 5x stripe width
-Test: 512k stripe unit; 4x stripe width
-Test: 512k stripe unit; 3x stripe width
-Test: 128k stripe unit; 8x stripe width
-Test: Repair of 128k stripe unit; 8x stripe width
-Test: Formatting with sunit=1fsb,swidth=1fsb and mounting with larger swidth
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (c) 2020 Oracle, Inc. All Rights Reserved.
-#
-# FS QA Test No. 755
-#
-# Race freeze and fsmap for a while to see if we crash or livelock.
-#
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 7 15
-
-_cleanup()
-{
- cd /
- $XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT > /dev/null 2>&1
- rm -rf $tmp.*
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/fuzzy
-. ./common/inject
-. ./common/xfs
-
-# real QA test starts here
-_supported_os Linux
-_supported_fs xfs
-_require_xfs_scratch_rmapbt
-_require_xfs_io_command "fsmap"
-_require_command "$KILLALL_PROG" killall
-
-echo "Format and populate"
-_scratch_mkfs > "$seqres.full" 2>&1
-_scratch_mount
-
-STRESS_DIR="$SCRATCH_MNT/testdir"
-mkdir -p $STRESS_DIR
-
-for i in $(seq 0 9); do
- mkdir -p $STRESS_DIR/$i
- for j in $(seq 0 9); do
- mkdir -p $STRESS_DIR/$i/$j
- for k in $(seq 0 9); do
- echo x > $STRESS_DIR/$i/$j/$k
- done
- done
-done
-
-cpus=$(( $(src/feature -o) * 4 * LOAD_FACTOR))
-
-echo "Concurrent fsmap and freeze"
-filter_output() {
- egrep -v '(Device or resource busy|Invalid argument)'
-}
-freeze_loop() {
- end="$1"
-
- while [ "$(date +%s)" -lt $end ]; do
- $XFS_IO_PROG -x -c 'freeze' $SCRATCH_MNT 2>&1 | filter_output
- $XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT 2>&1 | filter_output
- done
-}
-fsmap_loop() {
- end="$1"
-
- while [ "$(date +%s)" -lt $end ]; do
- $XFS_IO_PROG -c 'fsmap -v' $SCRATCH_MNT > /dev/null
- done
-}
-stress_loop() {
- end="$1"
-
- FSSTRESS_ARGS=$(_scale_fsstress_args -p 4 -d $SCRATCH_MNT -n 2000 $FSSTRESS_AVOID)
- while [ "$(date +%s)" -lt $end ]; do
- $FSSTRESS_PROG $FSSTRESS_ARGS >> $seqres.full
- done
-}
-
-start=$(date +%s)
-end=$((start + (30 * TIME_FACTOR) ))
-
-echo "Loop started at $(date --date="@${start}"), ending at $(date --date="@${end}")" >> $seqres.full
-stress_loop $end &
-freeze_loop $end &
-fsmap_loop $end &
-
-# Wait until 2 seconds after the loops should have finished...
-while [ "$(date +%s)" -lt $((end + 2)) ]; do
- sleep 1
-done
-
-# ...and clean up after the loops in case they didn't do it themselves.
-$KILLALL_PROG -TERM xfs_io fsstress >> $seqres.full 2>&1
-$XFS_IO_PROG -x -c 'thaw' $SCRATCH_MNT >> $seqres.full 2>&1
-
-echo "Loop finished at $(date)" >> $seqres.full
-echo "Test done"
-
-# success, all done
-status=0
-exit
+++ /dev/null
-QA output created by 755
-Format and populate
-Concurrent fsmap and freeze
-Test done
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
-#
-# FS QA Test No. 913
-#
-# Make sure that the quota default grace period and maximum warning limits
-# survive quotacheck.
-
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- cd /
- rm -f $tmp.*
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/quota
-
-# real QA test starts here
-_supported_fs xfs
-_supported_os Linux
-_require_quota
-
-rm -f $seqres.full
-
-# Format filesystem and set up quota limits
-_scratch_mkfs > $seqres.full
-_qmount_option "usrquota"
-_scratch_mount >> $seqres.full
-
-$XFS_QUOTA_PROG -x -c 'timer -u 300m' $SCRATCH_MNT
-$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
-_scratch_unmount
-
-# Remount and check the limits
-_scratch_mount >> $seqres.full
-$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
-_scratch_unmount
-
-# Run repair to force quota check
-_scratch_xfs_repair >> $seqres.full 2>&1
-
-# Remount (this time to run quotacheck) and check the limits. There's a bug
-# in quotacheck where we would reset the ondisk default grace period to zero
-# while the incore copy stays at whatever was read in prior to quotacheck.
-# This will show up after the /next/ remount.
-_scratch_mount >> $seqres.full
-$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
-_scratch_unmount
-
-# Remount and check the limits
-_scratch_mount >> $seqres.full
-$XFS_QUOTA_PROG -x -c 'state' $SCRATCH_MNT | grep 'grace time'
-_scratch_unmount
-
-# success, all done
-status=0
-exit
+++ /dev/null
-QA output created by 913
-Blocks grace time: [0 days 05:00:00]
-Inodes grace time: [0 days 05:00:00]
-Realtime Blocks grace time: [0 days 05:00:00]
-Blocks grace time: [0 days 05:00:00]
-Inodes grace time: [0 days 05:00:00]
-Realtime Blocks grace time: [0 days 05:00:00]
-Blocks grace time: [0 days 05:00:00]
-Inodes grace time: [0 days 05:00:00]
-Realtime Blocks grace time: [0 days 05:00:00]
-Blocks grace time: [0 days 05:00:00]
-Inodes grace time: [0 days 05:00:00]
-Realtime Blocks grace time: [0 days 05:00:00]
+++ /dev/null
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0-or-later
-# Copyright (c) 2020, Oracle and/or its affiliates. All Rights Reserved.
-#
-# FS QA Test No. 914
-#
-# Make sure that reflink forces the log out if we mount with wsync. We test
-# that it actually forced the log by immediately shutting down the fs without
-# flushing the log and then remounting to check file contents.
-
-seq=`basename $0`
-seqres=$RESULT_DIR/$seq
-echo "QA output created by $seq"
-
-here=`pwd`
-tmp=/tmp/$$
-status=1 # failure is the default!
-trap "_cleanup; exit \$status" 0 1 2 3 15
-
-_cleanup()
-{
- cd /
- rm -f $tmp.*
-}
-
-# get standard environment, filters and checks
-. ./common/rc
-. ./common/filter
-. ./common/reflink
-
-# real QA test starts here
-_supported_fs xfs
-_supported_os Linux
-_require_scratch_reflink
-_require_cp_reflink
-
-rm -f $seqres.full
-
-# Format filesystem and set up quota limits
-_scratch_mkfs > $seqres.full
-_scratch_mount -o wsync >> $seqres.full
-
-# Set up initial files
-$XFS_IO_PROG -f -c 'pwrite -S 0x58 0 1m -b 1m' $SCRATCH_MNT/a >> $seqres.full
-$XFS_IO_PROG -f -c 'pwrite -S 0x59 0 1m -b 1m' $SCRATCH_MNT/c >> $seqres.full
-_cp_reflink $SCRATCH_MNT/a $SCRATCH_MNT/e
-_cp_reflink $SCRATCH_MNT/c $SCRATCH_MNT/d
-touch $SCRATCH_MNT/b
-sync
-
-# Test that setting the reflink flag on the dest file forces the log
-echo "test reflink flag not set"
-$XFS_IO_PROG -x -c "reflink $SCRATCH_MNT/a" -c 'shutdown' $SCRATCH_MNT/b >> $seqres.full
-_scratch_cycle_mount wsync
-md5sum $SCRATCH_MNT/a $SCRATCH_MNT/b | _filter_scratch
-
-# Test forcing the log even if both files are already reflinked
-echo "test reflink flag already set"
-$XFS_IO_PROG -x -c "reflink $SCRATCH_MNT/a" -c 'shutdown' $SCRATCH_MNT/d >> $seqres.full
-_scratch_cycle_mount wsync
-md5sum $SCRATCH_MNT/a $SCRATCH_MNT/d | _filter_scratch
-
-# success, all done
-status=0
-exit
+++ /dev/null
-QA output created by 914
-test reflink flag not set
-310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/a
-310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/b
-test reflink flag already set
-310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/a
-310f146ce52077fcd3308dcbe7632bb2 SCRATCH_MNT/d
513 auto mount
514 auto quick db
515 auto quick quota
-751 auto quick
-755 auto quick fsmap freeze
-913 auto quick quota
-914 auto quick reflink
+516 auto quick
+517 auto quick fsmap freeze
+518 auto quick quota
+519 auto quick reflink