void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                               GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 #endif
 
 void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                                   GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
-                                   __builtin_return_address(0));
+                                   GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
+                                   NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 enum aarch64_reloc_op {
 
 void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
-                               GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+                               GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 #endif
 
         * init_data correctly */
        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
                                    GFP_KERNEL | __GFP_HIGHMEM,
-                                   PAGE_KERNEL_RWX, NUMA_NO_NODE,
+                                   PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
                                    __builtin_return_address(0));
 }
 
 
        if (PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                                   GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+                                   GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
                                    __builtin_return_address(0));
 }
 #endif
 
        if (PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                               GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
+                               GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 #else
 
 void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
-                               GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 
 
        return __vmalloc_node_range(size, 1,
                                    MODULES_VADDR + get_module_load_offset(),
                                    MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
-                                   PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                                   PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
                                    __builtin_return_address(0));
 }
 
 
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        unsigned long start, unsigned long end, gfp_t gfp_mask,
-                       pgprot_t prot, int node, const void *caller);
+                       pgprot_t prot, unsigned long vm_flags, int node,
+                       const void *caller);
+
 extern void vfree(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
 
  *     @end:           vm area range end
  *     @gfp_mask:      flags for the page level allocator
  *     @prot:          protection mask for the allocated pages
+ *     @vm_flags:      additional vm area flags (e.g. %VM_NO_GUARD)
  *     @node:          node to use for allocation or NUMA_NO_NODE
  *     @caller:        caller's return address
  *
  */
 void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        unsigned long start, unsigned long end, gfp_t gfp_mask,
-                       pgprot_t prot, int node, const void *caller)
+                       pgprot_t prot, unsigned long vm_flags, int node,
+                       const void *caller)
 {
        struct vm_struct *area;
        void *addr;
        if (!size || (size >> PAGE_SHIFT) > totalram_pages)
                goto fail;
 
-       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
-                                 start, end, node, gfp_mask, caller);
+       area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+                               vm_flags, start, end, node, gfp_mask, caller);
        if (!area)
                goto fail;
 
                            int node, const void *caller)
 {
        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
-                               gfp_mask, prot, node, caller);
+                               gfp_mask, prot, 0, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)