static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
struct ma_state *mas_detach, bool mm_wr_locked)
{
- struct mmu_gather tlb;
struct unmap_desc unmap = {
.mas = mas_detach,
.first = vms->vma,
if (!vms->clear_ptes) /* Nothing to do */
return;
- /*
- * We can free page tables without write-locking mmap_lock because VMAs
- * were isolated before we downgraded mmap_lock.
- */
mas_set(mas_detach, 1);
- tlb_gather_mmu(&tlb, vms->vma->vm_mm);
- update_hiwater_rss(vms->vma->vm_mm);
- unmap_vmas(&tlb, &unmap);
- mas_set(mas_detach, 1);
- /* start and end may be different if there is no prev or next vma. */
- free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
- vms->unmap_end, vms->unmap_end, mm_wr_locked);
- tlb_finish_mmu(&tlb);
+ unmap_region(&unmap);
vms->clear_ptes = false;
}