}
EXPORT_SYMBOL(vm_brk_flags);
+static
+unsigned long tear_down_vmas(struct mm_struct *mm, struct vma_iterator *vmi,
+ struct vm_area_struct *vma, unsigned long end)
+{
+ unsigned long nr_accounted = 0;
+ int count = 0;
+
+ mmap_assert_write_locked(mm);
+ vma_iter_set(vmi, vma->vm_end);
+ do {
+ if (vma->vm_flags & VM_ACCOUNT)
+ nr_accounted += vma_pages(vma);
+ vma_mark_detached(vma);
+ remove_vma(vma);
+ count++;
+ cond_resched();
+ vma = vma_next(vmi);
+ } while (vma && vma->vm_end <= end);
+
+ VM_WARN_ON_ONCE(count != mm->map_count);
+ return nr_accounted;
+}
+
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
VMA_ITERATOR(vmi, mm, 0);
- int count = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
* enabled, without holding any MM locks besides the unreachable
* mmap_write_lock.
*/
- vma_iter_set(&vmi, vma->vm_end);
- do {
- if (vma->vm_flags & VM_ACCOUNT)
- nr_accounted += vma_pages(vma);
- vma_mark_detached(vma);
- remove_vma(vma);
- count++;
- cond_resched();
- vma = vma_next(&vmi);
- } while (vma && likely(!xa_is_zero(vma)));
-
- BUG_ON(count != mm->map_count);
+ nr_accounted = tear_down_vmas(mm, &vmi, vma, ULONG_MAX);
destroy:
__mt_destroy(&mm->mm_mt);