void deactivate_file_folio(struct folio *folio);
void folio_activate(struct folio *folio);
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long pg_start,
- unsigned long pg_end, unsigned long vma_end,
- bool mm_wr_locked);
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
/**
* free_pgtables() - Free a range of page tables
* @tlb: The mmu gather
- * @mas: The maple state
- * @vma: The first vma
- * @pg_start: The lowest page table address (floor)
- * @pg_end: The highest page table address (ceiling)
- * @vma_end: The highest vma tree search address
- * @mm_wr_locked: boolean indicating if the mm is write locked
+ * @unmap: The unmap_desc
*
* Note: pg_start and pg_end are provided to indicate the absolute range of the
* page tables that should be removed. This can differ from the vma mappings on
* The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
* unrelated data to the mm_struct being torn down.
*/
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long pg_start,
- unsigned long pg_end, unsigned long vma_end,
- bool mm_wr_locked)
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
{
struct unlink_vma_file_batch vb;
+ struct ma_state *mas = unmap->mas;
+ struct vm_area_struct *vma = unmap->first;
/*
* Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
* may be 0. Underflow is expected in this case. Otherwise the
- * pagetable end is exclusive.
- * vma_end is exclusive.
- * The last vma address should never be larger than the pagetable end.
+ * pagetable end is exclusive. vma_end is exclusive. The last vma
+ * address should never be larger than the pagetable end.
*/
- WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
+ WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
tlb_free_vmas(tlb);
unsigned long addr = vma->vm_start;
struct vm_area_struct *next;
- next = mas_find(mas, vma_end - 1);
+ next = mas_find(mas, unmap->tree_end - 1);
/*
* Hide vma from rmap and truncate_pagecache before freeing
* pgtables
*/
- if (mm_wr_locked)
+ if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
*/
while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
vma = next;
- next = mas_find(mas, vma_end - 1);
- if (mm_wr_locked)
+ next = mas_find(mas, unmap->tree_end - 1);
+ if (unmap->mm_wr_locked)
vma_start_write(vma);
unlink_anon_vmas(vma);
unlink_file_vma_batch_add(&vb, vma);
}
unlink_file_vma_batch_final(&vb);
- free_pgd_range(tlb, addr, vma->vm_end,
- pg_start, next ? next->vm_start : pg_end);
+ free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
+ next ? next->vm_start : unmap->pg_end);
vma = next;
} while (vma);
}
*/
mm_flags_set(MMF_OOM_SKIP, mm);
mmap_write_lock(mm);
+ unmap.mm_wr_locked = true;
mt_clear_in_rcu(&mm->mm_mt);
- vma_iter_set(&vmi, vma->vm_end);
- free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
- USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
+ unmap_pgtable_init(&unmap, &vmi);
+ free_pgtables(&tlb, &unmap);
tlb_finish_mmu(&tlb);
/*
void unmap_region(struct unmap_desc *unmap)
{
struct mm_struct *mm = unmap->first->vm_mm;
- struct ma_state *mas = unmap->mas;
struct mmu_gather tlb;
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
unmap_vmas(&tlb, unmap);
- mas_set(mas, unmap->tree_reset);
- free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
- unmap->tree_end, unmap->mm_wr_locked);
+ mas_set(unmap->mas, unmap->tree_reset);
+ free_pgtables(&tlb, unmap);
tlb_finish_mmu(&tlb);
}
bool mm_wr_locked; /* If the mmap write lock is held */
};
+/*
+ * unmap_all_init() - Initialize unmap_desc to remove all vmas, point the
+ * pg_start and pg_end to a safe location.
+ */
static inline void unmap_all_init(struct unmap_desc *unmap,
struct vma_iterator *vmi, struct vm_area_struct *vma)
{
unmap->mm_wr_locked = false;
}
+/*
+ * unmap_pgtable_init() - Initialize unmap_desc to remove all page tables within
+ * the user range.
+ *
+ * ARM can have mappings outside of vmas.
+ * See: e2cdef8c847b4 ("[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS")
+ *
+ * ARM LPAE uses page table mappings beyond the USER_PGTABLES_CEILING
+ * See: CONFIG_ARM_LPAE in arch/arm/include/asm/pgtable.h
+ */
+static inline void unmap_pgtable_init(struct unmap_desc *unmap,
+ struct vma_iterator *vmi)
+{
+ vma_iter_set(vmi, unmap->tree_reset);
+ unmap->vma_start = FIRST_USER_ADDRESS;
+ unmap->vma_end = USER_PGTABLES_CEILING;
+ unmap->tree_end = USER_PGTABLES_CEILING;
+}
+
#define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next) \
struct unmap_desc name = { \
.mas = &(_vmi)->mas, \
{
}
-static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
- struct vm_area_struct *vma, unsigned long floor,
- unsigned long ceiling, unsigned long tree_max,
- bool mm_wr_locked)
+static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
{
+ (void)tlb;
+ (void)desc;
}
static inline void mapping_unmap_writable(struct address_space *mapping)