This patch updates secretmem to use the new vma_flags_t type which will
soon supersede vm_flags_t altogether.
In order to make this change we also have to update mlock_future_ok(), we
replace the vm_flags_t parameter with a simple boolean is_vma_locked one,
which also simplifies the invocation here.
This is laying the groundwork for eliminating the vm_flags_t in
vm_area_desc and more broadly throughout the kernel.
No functional changes intended.
[lorenzo.stoakes@oracle.com: fix check_brk_limits(), per Chris]
Link: https://lkml.kernel.org/r/3aab9ab1-74b4-405e-9efb-08fc2500c06e@lucifer.local
Link: https://lkml.kernel.org/r/a243a09b0a5d0581e963d696de1735f61f5b2075.1769097829.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: David Hildenbrand <david@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: "Darrick J. Wong" <djwong@kernel.org>
Cc: Jarkko Sakkinen <jarkko@kernel.org>
Cc: Yury Norov <ynorov@nvidia.com>
Cc: Chris Mason <clm@fb.com>
Cc: Pedro Falcato <pfalcato@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
unsigned long start, unsigned long end, int *locked);
extern long faultin_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, bool write, int *locked);
-bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
+bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
unsigned long bytes);
/*
if (IS_ERR_VALUE(mapped_addr))
return mapped_addr;
- return mlock_future_ok(current->mm, current->mm->def_flags, len)
+ return mlock_future_ok(current->mm,
+ current->mm->def_flags & VM_LOCKED, len)
? 0 : -EAGAIN;
}
return hint;
}
-bool mlock_future_ok(const struct mm_struct *mm, vm_flags_t vm_flags,
- unsigned long bytes)
+bool mlock_future_ok(const struct mm_struct *mm, bool is_vma_locked,
+ unsigned long bytes)
{
unsigned long locked_pages, limit_pages;
- if (!(vm_flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
+ if (!is_vma_locked || capable(CAP_IPC_LOCK))
return true;
locked_pages = bytes >> PAGE_SHIFT;
if (!can_do_mlock())
return -EPERM;
- if (!mlock_future_ok(mm, vm_flags, len))
+ if (!mlock_future_ok(mm, vm_flags & VM_LOCKED, len))
return -EAGAIN;
if (file) {
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
return -EFAULT;
- if (!mlock_future_ok(mm, vma->vm_flags, vrm->delta))
+ if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, vrm->delta))
return -EAGAIN;
if (!may_expand_vm(mm, vma->vm_flags, vrm->delta >> PAGE_SHIFT))
{
const unsigned long len = vma_desc_size(desc);
- if ((desc->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ if (!vma_desc_test_flags(desc, VMA_SHARED_BIT, VMA_MAYSHARE_BIT))
return -EINVAL;
- if (!mlock_future_ok(desc->mm, desc->vm_flags | VM_LOCKED, len))
+ vma_desc_set_flags(desc, VMA_LOCKED_BIT, VMA_DONTDUMP_BIT);
+ if (!mlock_future_ok(desc->mm, /*is_vma_locked=*/ true, len))
return -EAGAIN;
-
- desc->vm_flags |= VM_LOCKED | VM_DONTDUMP;
desc->vm_ops = &secretmem_vm_ops;
return 0;
return -ENOMEM;
/* mlock limit tests */
- if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
+ if (!mlock_future_ok(mm, vma->vm_flags & VM_LOCKED, grow << PAGE_SHIFT))
return -ENOMEM;
/* Check to ensure the stack will not grow into a hugetlb-only region */