2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (C) 2015 SUSE Linux Products GmbH. All Rights Reserved.
5 # FS QA Test No. btrfs/104
7 # Test btrfs quota group consistency operations during snapshot
8 # delete. Btrfs has had long standing issues with drop snapshot
9 # failing to properly account for quota groups. This test crafts
10 # several snapshot trees with shared and exclusive elements. One of
11 # the trees is removed and then quota group consistency is checked.
13 # This issue is fixed by the following linux kernel patches:
15 # Btrfs: use btrfs_get_fs_root in resolve_indirect_ref
16 # Btrfs: keep dropped roots in cache until transaciton commit
17 # btrfs: qgroup: account shared subtree during snapshot delete
20 seqres=$RESULT_DIR/$seq
21 echo "QA output created by $seq"
25 status=1 # failure is the default!
26 trap "_cleanup; exit \$status" 0 1 2 3 15
34 # get standard environment, filters and checks
38 # remove previous $seqres.full before test
41 # real QA test starts here
45 _require_btrfs_qgroup_report
49 # Create an fs tree of a given height at a target location. This is
50 # done by agressively creating inline extents to expand the number of
51 # nodes required. We also add an traditional extent so that
52 # drop_snapshot is forced to walk at least one extent that is not
55 # NOTE: The ability to vary tree height for this test is very useful
56 # for debugging problems with drop_snapshot(). As a result we retain
57 # that parameter even though the test below always does level 2 trees.
64 echo "specify location for fileset"
69 1)# this always reproduces level 1 trees
72 2)# this always reproduces level 2 trees
75 3)# this always reproduces level 3 trees
79 echo "Can't make level $level trees";
85 for i in `seq -w 1 $n`; do
86 $XFS_IO_PROG -f -c "pwrite 0 4095" $loc/file$i > /dev/null 2>&1
89 $XFS_IO_PROG -f -c "pwrite 0 128k" $loc/extentfile > /dev/null 2>&1
92 # Force the default leaf size as the calculations for making our btree
93 # heights are based on that.
94 _scratch_mkfs "--nodesize 16384" >> $seqres.full 2>&1
97 # populate the default subvolume and create a snapshot ('snap1')
98 _explode_fs_tree 1 $SCRATCH_MNT/files
99 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT $SCRATCH_MNT/snap1
101 # create new btree nodes in this snapshot. They will become shared
102 # with the next snapshot we create.
103 _explode_fs_tree 1 $SCRATCH_MNT/snap1/files-snap1
105 # create our final snapshot ('snap2'), populate it with
106 # exclusively owned nodes.
108 # As a result of this action, snap2 will get an implied ref to the
109 # 128K extent created in the default subvolume.
110 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT/snap1 $SCRATCH_MNT/snap2
111 _explode_fs_tree 1 $SCRATCH_MNT/snap2/files-snap2
113 # Enable qgroups now that we have our filesystem prepared. This
114 # will kick off a scan which we will have to wait for.
115 _run_btrfs_util_prog quota enable $SCRATCH_MNT
116 _run_btrfs_util_prog quota rescan -w $SCRATCH_MNT
118 # Remount to clear cache, force everything to disk
121 # Finally, delete snap1 to trigger btrfs_drop_snapshot(). This
122 # snapshot is most interesting to delete because it will cause some
123 # nodes to go exclusively owned for snap2, while some will stay shared
124 # with the default subvolume. That exercises a maximum of the drop
125 # snapshot/qgroup interactions.
127 # snap2s imlied ref from to the 128K extent in files/ can be lost by
128 # the root finding code in qgroup accounting due to snap1 no longer
129 # providing a path to it. This was fixed by the first two patches
131 _run_btrfs_util_prog subvolume delete $SCRATCH_MNT/snap1
133 # "btrfs filesystem sync" will trigger subvolume deletion
134 _run_btrfs_util_prog filesystem sync $SCRATCH_MNT
136 # Qgroup will be checked by fstest at _check_scratch_fs()