2 # SPDX-License-Identifier: GPL-2.0
3 # Copyright (C) 2015 SUSE Linux Products GmbH. All Rights Reserved.
5 # FS QA Test No. btrfs/104
7 # Test btrfs quota group consistency operations during snapshot
8 # delete. Btrfs has had long standing issues with drop snapshot
9 # failing to properly account for quota groups. This test crafts
10 # several snapshot trees with shared and exclusive elements. One of
11 # the trees is removed and then quota group consistency is checked.
13 # This issue is fixed by the following linux kernel patches:
15 # Btrfs: use btrfs_get_fs_root in resolve_indirect_ref
16 # Btrfs: keep dropped roots in cache until transaciton commit
17 # btrfs: qgroup: account shared subtree during snapshot delete
20 _begin_fstest auto qgroup
22 # Import common functions.
25 # real QA test starts here
28 _require_btrfs_qgroup_report
30 # Create an fs tree of a given height at a target location. This is
31 # done by agressively creating inline extents to expand the number of
32 # nodes required. We also add an traditional extent so that
33 # drop_snapshot is forced to walk at least one extent that is not
36 # NOTE: The ability to vary tree height for this test is very useful
37 # for debugging problems with drop_snapshot(). As a result we retain
38 # that parameter even though the test below always does level 2 trees.
45 echo "specify location for fileset"
50 1)# this always reproduces level 1 trees
53 2)# this always reproduces level 2 trees
56 3)# this always reproduces level 3 trees
60 echo "Can't make level $level trees";
66 for i in `seq -w 1 $n`; do
67 $XFS_IO_PROG -f -c "pwrite 0 4095" $loc/file$i > /dev/null 2>&1
70 $XFS_IO_PROG -f -c "pwrite 0 128k" $loc/extentfile > /dev/null 2>&1
73 # Force the default leaf size as the calculations for making our btree
74 # heights are based on that.
75 _scratch_mkfs "--nodesize 16384" >> $seqres.full 2>&1
78 # populate the default subvolume and create a snapshot ('snap1')
79 _explode_fs_tree 1 $SCRATCH_MNT/files
80 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT $SCRATCH_MNT/snap1
82 # create new btree nodes in this snapshot. They will become shared
83 # with the next snapshot we create.
84 _explode_fs_tree 1 $SCRATCH_MNT/snap1/files-snap1
86 # create our final snapshot ('snap2'), populate it with
87 # exclusively owned nodes.
89 # As a result of this action, snap2 will get an implied ref to the
90 # 128K extent created in the default subvolume.
91 _run_btrfs_util_prog subvolume snapshot $SCRATCH_MNT/snap1 $SCRATCH_MNT/snap2
92 _explode_fs_tree 1 $SCRATCH_MNT/snap2/files-snap2
94 # Enable qgroups now that we have our filesystem prepared. This
95 # will kick off a scan which we will have to wait for.
96 _run_btrfs_util_prog quota enable $SCRATCH_MNT
97 _run_btrfs_util_prog quota rescan -w $SCRATCH_MNT
99 # Remount to clear cache, force everything to disk
102 # Finally, delete snap1 to trigger btrfs_drop_snapshot(). This
103 # snapshot is most interesting to delete because it will cause some
104 # nodes to go exclusively owned for snap2, while some will stay shared
105 # with the default subvolume. That exercises a maximum of the drop
106 # snapshot/qgroup interactions.
108 # snap2s imlied ref from to the 128K extent in files/ can be lost by
109 # the root finding code in qgroup accounting due to snap1 no longer
110 # providing a path to it. This was fixed by the first two patches
112 _run_btrfs_util_prog subvolume delete $SCRATCH_MNT/snap1
114 # "btrfs filesystem sync" will trigger subvolume deletion
115 _run_btrfs_util_prog filesystem sync $SCRATCH_MNT
117 # Qgroup will be checked by fstest at _check_scratch_fs()