2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (C) 2015 SUSE Linux Products GmbH. All Rights Reserved.
5 # FS QA Test No. btrfs/104
7 # Test btrfs quota group consistency operations during snapshot
8 # delete. Btrfs has had long standing issues with drop snapshot
9 # failing to properly account for quota groups. This test crafts
10 # several snapshot trees with shared and exclusive elements. One of
11 # the trees is removed and then quota group consistency is checked.
13 # This issue is fixed by the following linux kernel patches:
15 # Btrfs: use btrfs_get_fs_root in resolve_indirect_ref
16 # Btrfs: keep dropped roots in cache until transaciton commit
17 # btrfs: qgroup: account shared subtree during snapshot delete
20 seqres=$RESULT_DIR/$seq
21 echo "QA output created by $seq"
25 status=1 # failure is the default!
26 trap "_cleanup; exit \$status" 0 1 2 3 15
34 # get standard environment, filters and checks
38 # remove previous $seqres.full before test
41 # real QA test starts here
44 _require_btrfs_qgroup_report
48 # Create an fs tree of a given height at a target location. This is
49 # done by agressively creating inline extents to expand the number of
50 # nodes required. We also add an traditional extent so that
51 # drop_snapshot is forced to walk at least one extent that is not
54 # NOTE: The ability to vary tree height for this test is very useful
55 # for debugging problems with drop_snapshot(). As a result we retain
56 # that parameter even though the test below always does level 2 trees.
63 echo "specify location for fileset"
68 1)# this always reproduces level 1 trees
71 2)# this always reproduces level 2 trees
74 3)# this always reproduces level 3 trees
78 echo "Can't make level $level trees";
84 for i in `seq -w 1 $n`; do
85 $XFS_IO_PROG -f -c "pwrite 0 4095" $loc/file$i > /dev/null 2>&1
88 $XFS_IO_PROG -f -c "pwrite 0 128k" $loc/extentfile > /dev/null 2>&1
91 # Force the default leaf size as the calculations for making our btree
92 # heights are based on that.
93 _scratch_mkfs "--nodesize 16384" >> $seqres.full 2>&1
96 # populate the default subvolume and create a snapshot ('snap1')
97 _explode_fs_tree 1 $SCRATCH_MNT/files
98 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT $SCRATCH_MNT/snap1
100 # create new btree nodes in this snapshot. They will become shared
101 # with the next snapshot we create.
102 _explode_fs_tree 1 $SCRATCH_MNT/snap1/files-snap1
104 # create our final snapshot ('snap2'), populate it with
105 # exclusively owned nodes.
107 # As a result of this action, snap2 will get an implied ref to the
108 # 128K extent created in the default subvolume.
109 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT/snap1 $SCRATCH_MNT/snap2
110 _explode_fs_tree 1 $SCRATCH_MNT/snap2/files-snap2
112 # Enable qgroups now that we have our filesystem prepared. This
113 # will kick off a scan which we will have to wait for.
114 _run_btrfs_util_prog quota enable $SCRATCH_MNT
115 _run_btrfs_util_prog quota rescan -w $SCRATCH_MNT
117 # Remount to clear cache, force everything to disk
120 # Finally, delete snap1 to trigger btrfs_drop_snapshot(). This
121 # snapshot is most interesting to delete because it will cause some
122 # nodes to go exclusively owned for snap2, while some will stay shared
123 # with the default subvolume. That exercises a maximum of the drop
124 # snapshot/qgroup interactions.
126 # snap2s imlied ref from to the 128K extent in files/ can be lost by
127 # the root finding code in qgroup accounting due to snap1 no longer
128 # providing a path to it. This was fixed by the first two patches
130 _run_btrfs_util_prog subvolume delete $SCRATCH_MNT/snap1
132 # "btrfs filesystem sync" will trigger subvolume deletion
133 _run_btrfs_util_prog filesystem sync $SCRATCH_MNT
135 # Qgroup will be checked by fstest at _check_scratch_fs()