]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
mm: use unmap_desc struct for freeing page tables
authorLiam R. Howlett <Liam.Howlett@oracle.com>
Wed, 21 Jan 2026 16:49:46 +0000 (11:49 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 12 Feb 2026 23:42:56 +0000 (15:42 -0800)
Pass through the unmap_desc to free_pgtables() because it almost has
everything necessary and is already on the stack.

Updates testing code as necessary.

No functional changes intended.

[Liam.Howlett@oracle.com: fix up unmap desc use on exit_mmap()]
Link: https://lkml.kernel.org/r/20260210214214.364856-1-Liam.Howlett@oracle.com
Link: https://lkml.kernel.org/r/20260121164946.2093480-12-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Chris Li <chrisl@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Cc: SeongJae Park <sj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/memory.c
mm/mmap.c
mm/vma.c
mm/vma.h
tools/testing/vma/vma_internal.h

index 0f3ad8665d95e28773fc38eec444399f1cdecee6..ef71a1d9991f2d2a1861690229eb30886ad1b2d7 100644 (file)
@@ -512,10 +512,7 @@ bool __folio_end_writeback(struct folio *folio);
 void deactivate_file_folio(struct folio *folio);
 void folio_activate(struct folio *folio);
 
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-                  struct vm_area_struct *vma, unsigned long pg_start,
-                  unsigned long pg_end, unsigned long vma_end,
-                  bool mm_wr_locked);
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc);
 
 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
 
index d68f8f082b1c32f2e2d8748f892bedd1d1e21b63..136b80ca357bb964a169552ef3658f964e690572 100644 (file)
@@ -373,12 +373,7 @@ void free_pgd_range(struct mmu_gather *tlb,
 /**
  * free_pgtables() - Free a range of page tables
  * @tlb: The mmu gather
- * @mas: The maple state
- * @vma: The first vma
- * @pg_start: The lowest page table address (floor)
- * @pg_end: The highest page table address (ceiling)
- * @vma_end: The highest vma tree search address
- * @mm_wr_locked: boolean indicating if the mm is write locked
+ * @unmap: The unmap_desc
  *
  * Note: pg_start and pg_end are provided to indicate the absolute range of the
  * page tables that should be removed.  This can differ from the vma mappings on
@@ -388,21 +383,19 @@ void free_pgd_range(struct mmu_gather *tlb,
  * The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
  * unrelated data to the mm_struct being torn down.
  */
-void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-                  struct vm_area_struct *vma, unsigned long pg_start,
-                  unsigned long pg_end, unsigned long vma_end,
-                  bool mm_wr_locked)
+void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *unmap)
 {
        struct unlink_vma_file_batch vb;
+       struct ma_state *mas = unmap->mas;
+       struct vm_area_struct *vma = unmap->first;
 
        /*
         * Note: USER_PGTABLES_CEILING may be passed as the value of pg_end and
         * may be 0.  Underflow is expected in this case.  Otherwise the
-        * pagetable end is exclusive.
-        * vma_end is exclusive.
-        * The last vma address should never be larger than the pagetable end.
+        * pagetable end is exclusive.  vma_end is exclusive.  The last vma
+        * address should never be larger than the pagetable end.
         */
-       WARN_ON_ONCE(vma_end - 1 > pg_end - 1);
+       WARN_ON_ONCE(unmap->vma_end - 1 > unmap->pg_end - 1);
 
        tlb_free_vmas(tlb);
 
@@ -410,13 +403,13 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                unsigned long addr = vma->vm_start;
                struct vm_area_struct *next;
 
-               next = mas_find(mas, vma_end - 1);
+               next = mas_find(mas, unmap->tree_end - 1);
 
                /*
                 * Hide vma from rmap and truncate_pagecache before freeing
                 * pgtables
                 */
-               if (mm_wr_locked)
+               if (unmap->mm_wr_locked)
                        vma_start_write(vma);
                unlink_anon_vmas(vma);
 
@@ -428,16 +421,16 @@ void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
                 */
                while (next && next->vm_start <= vma->vm_end + PMD_SIZE) {
                        vma = next;
-                       next = mas_find(mas, vma_end - 1);
-                       if (mm_wr_locked)
+                       next = mas_find(mas, unmap->tree_end - 1);
+                       if (unmap->mm_wr_locked)
                                vma_start_write(vma);
                        unlink_anon_vmas(vma);
                        unlink_file_vma_batch_add(&vb, vma);
                }
                unlink_file_vma_batch_final(&vb);
 
-               free_pgd_range(tlb, addr, vma->vm_end,
-                       pg_start, next ? next->vm_start : pg_end);
+               free_pgd_range(tlb, addr, vma->vm_end, unmap->pg_start,
+                              next ? next->vm_start : unmap->pg_end);
                vma = next;
        } while (vma);
 }
index 042b6b4b6ab86144257ae2d3d4c24e6aa3bae304..a03b7681e13c2b2543087534d6747a34a29131f4 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1307,10 +1307,10 @@ void exit_mmap(struct mm_struct *mm)
         */
        mm_flags_set(MMF_OOM_SKIP, mm);
        mmap_write_lock(mm);
+       unmap.mm_wr_locked = true;
        mt_clear_in_rcu(&mm->mm_mt);
-       vma_iter_set(&vmi, vma->vm_end);
-       free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
-                     USER_PGTABLES_CEILING, USER_PGTABLES_CEILING, true);
+       unmap_pgtable_init(&unmap, &vmi);
+       free_pgtables(&tlb, &unmap);
        tlb_finish_mmu(&tlb);
 
        /*
index 876d2db5329ddf0f9032c01b8cb030e9fb7ed914..f352d5c722126f3e3709485a801694d5d21e1352 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -475,15 +475,13 @@ void remove_vma(struct vm_area_struct *vma)
 void unmap_region(struct unmap_desc *unmap)
 {
        struct mm_struct *mm = unmap->first->vm_mm;
-       struct ma_state *mas = unmap->mas;
        struct mmu_gather tlb;
 
        tlb_gather_mmu(&tlb, mm);
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, unmap);
-       mas_set(mas, unmap->tree_reset);
-       free_pgtables(&tlb, mas, unmap->first, unmap->pg_start, unmap->pg_end,
-                     unmap->tree_end, unmap->mm_wr_locked);
+       mas_set(unmap->mas, unmap->tree_reset);
+       free_pgtables(&tlb, unmap);
        tlb_finish_mmu(&tlb);
 }
 
index bb7fa5d2bde2534b05c8dca63e41a7eebbfbeeaa..de30c69bceaf7262d6832051c2b356ba0f14101d 100644 (file)
--- a/mm/vma.h
+++ b/mm/vma.h
@@ -167,6 +167,10 @@ struct unmap_desc {
        bool mm_wr_locked;            /* If the mmap write lock is held */
 };
 
+/*
+ * unmap_all_init() - Initialize unmap_desc to remove all vmas, point the
+ * pg_start and pg_end to a safe location.
+ */
 static inline void unmap_all_init(struct unmap_desc *unmap,
                struct vma_iterator *vmi, struct vm_area_struct *vma)
 {
@@ -181,6 +185,25 @@ static inline void unmap_all_init(struct unmap_desc *unmap,
        unmap->mm_wr_locked = false;
 }
 
+/*
+ * unmap_pgtable_init() - Initialize unmap_desc to remove all page tables within
+ * the user range.
+ *
+ * ARM can have mappings outside of vmas.
+ * See: e2cdef8c847b4 ("[PATCH] freepgt: free_pgtables from FIRST_USER_ADDRESS")
+ *
+ * ARM LPAE uses page table mappings beyond the USER_PGTABLES_CEILING
+ * See: CONFIG_ARM_LPAE in arch/arm/include/asm/pgtable.h
+ */
+static inline void unmap_pgtable_init(struct unmap_desc *unmap,
+                                     struct vma_iterator *vmi)
+{
+       vma_iter_set(vmi, unmap->tree_reset);
+       unmap->vma_start = FIRST_USER_ADDRESS;
+       unmap->vma_end = USER_PGTABLES_CEILING;
+       unmap->tree_end = USER_PGTABLES_CEILING;
+}
+
 #define UNMAP_STATE(name, _vmi, _vma, _vma_start, _vma_end, _prev, _next)      \
        struct unmap_desc name = {                                             \
                .mas = &(_vmi)->mas,                                           \
index 0b4918aac8d6dad2e4c613c8b7b639453458a79e..ca4eb563b29ba6306c37c454c91b6fd7cd8b2f2b 100644 (file)
@@ -1137,11 +1137,10 @@ static inline void unmap_vmas(struct mmu_gather *tlb, struct unmap_desc *unmap)
 {
 }
 
-static inline void free_pgtables(struct mmu_gather *tlb, struct ma_state *mas,
-                  struct vm_area_struct *vma, unsigned long floor,
-                  unsigned long ceiling, unsigned long tree_max,
-                  bool mm_wr_locked)
+static inline void free_pgtables(struct mmu_gather *tlb, struct unmap_desc *desc)
 {
+       (void)tlb;
+       (void)desc;
 }
 
 static inline void mapping_unmap_writable(struct address_space *mapping)