]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-client.git/commitdiff
mm: update secretmem to use VMA flags on mmap_prepare
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Thu, 22 Jan 2026 16:06:16 +0000 (16:06 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 12 Feb 2026 23:42:58 +0000 (15:42 -0800)
This patch updates secretmem to use the new vma_flags_t type which will
soon supersede vm_flags_t altogether.

In order to make this change we also have to update mlock_future_ok(), we
replace the vm_flags_t parameter with a simple boolean is_vma_locked one,
which also simplifies the invocation here.

This is laying the groundwork for eliminating the vm_flags_t in
vm_area_desc and more broadly throughout the kernel.

No functional changes intended.

[lorenzo.stoakes@oracle.com: fix check_brk_limits(), per Chris]
Link: https://lkml.kernel.org/r/3aab9ab1-74b4-405e-9efb-08fc2500c06e@lucifer.local
Link: https://lkml.kernel.org/r/a243a09b0a5d0581e963d696de1735f61f5b2075.1769097829.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Yury Norov <ynorov@nvidia.com>
Cc: Chris Mason <clm@fb.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/internal.h
mm/mmap.c
mm/mremap.c
mm/secretmem.c
mm/vma.c

index ef71a1d9991f2d2a1861690229eb30886ad1b2d7..d67e8bb7573498c9feed9ee86147484b5be9ad54 100644 (file)
@@ -1046,7 +1046,7 @@ extern long populate_vma_page_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end, int *locked);
 extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
                unsigned long end, bool write, int *locked);
-bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
+bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
                unsigned long bytes);
 
 /*
index c62abc61648511741c315f473c2b0a3cfc1cb01e..843160946aa5f481c33c6cb98a59443cc4c9d436 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -108,7 +108,8 @@ static int check_brk_limits(unsigned long addr, unsigned long len)
        if (IS_ERR_VALUE(mapped_addr))
                return mapped_addr;
 
-       return mlock_future_ok(current->mm, current->mm->def_flags, len)
+       return mlock_future_ok(current->mm,
+                             current->mm->def_flags & VM_LOCKED, len)
                ? 0 : -EAGAIN;
 }
 
@@ -225,12 +226,12 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
        return hint;
 }
 
-bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
-                       unsigned long bytes)
+bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
+                    unsigned long bytes)
 {
        unsigned long locked_pages, limit_pages;
 
-       if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
+       if (!is_vma_locked || capable(CAP_IPC_LOCK))
                return true;
 
        locked_pages = bytes >> PAGE_SHIFT;
@@ -416,7 +417,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                if (!can_do_mlock())
                        return -EPERM;
 
-       if (!mlock_future_ok(mm, vm_flags, len))
+       if (!mlock_future_ok(mm, vm_flags & VM_LOCKED, len))
                return -EAGAIN;
 
        if (file) {
index 8391ae17de645d10d8447b02a76ad735c73b406c..2be876a70cc0d03a8911dd0501f410dd70421f67 100644 (file)
@@ -1740,7 +1740,7 @@ static int check_prep_vma(struct vma_remap_struct *vrm)
        if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
                return -EFAULT;
 
-       if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
+       if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta))
                return -EAGAIN;
 
        if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
index edf111e0a1bbba9d33278f991cdf1108d8fb0c4f..11a779c812a7f7b854bd445d6b33799e70caa746 100644 (file)
@@ -122,13 +122,12 @@ static int secretmem_mmap_prepare(struct vm_area_desc *desc)
 {
        const unsigned long len = vma_desc_size(desc);
 
-       if ((desc->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+       if (!vma_desc_test_flags(desc, VMA_SHARED_BIT, VMA_MAYSHARE_BIT))
                return -EINVAL;
 
-       if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len))
+       vma_desc_set_flags(desc, VMA_LOCKED_BIT, VMA_DONTDUMP_BIT);
+       if (!mlock_future_ok(desc->mm, /*is_vma_locked=*/ true, len))
                return -EAGAIN;
-
-       desc->vm_flags |= VM_LOCKED | VM_DONTDUMP;
        desc->vm_ops = &secretmem_vm_ops;
 
        return 0;
index f352d5c722126f3e3709485a801694d5d21e1352..39dcd9ddd4bab3bfeb3105f568a1d228de00783d 100644 (file)
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -3053,7 +3053,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
                return -ENOMEM;
 
        /* mlock limit tests */
-       if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
+       if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
                return -ENOMEM;
 
        /* Check to ensure the stack will not grow into a hugetlb-only region */