Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/nommu.c |
| 3 | * |
| 4 | * Replacement code for mm functions to support CPU's that don't |
| 5 | * have any form of memory management unit (thus no virtual memory). |
| 6 | * |
| 7 | * See Documentation/nommu-mmap.txt |
| 8 | * |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 9 | * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
| 11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> |
| 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 13 | * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
| 15 | |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 17 | |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 18 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/mm.h> |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 20 | #include <linux/vmacache.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/mman.h> |
| 22 | #include <linux/swap.h> |
| 23 | #include <linux/file.h> |
| 24 | #include <linux/highmem.h> |
| 25 | #include <linux/pagemap.h> |
| 26 | #include <linux/slab.h> |
| 27 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/blkdev.h> |
| 29 | #include <linux/backing-dev.h> |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 30 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/mount.h> |
| 32 | #include <linux/personality.h> |
| 33 | #include <linux/security.h> |
| 34 | #include <linux/syscalls.h> |
Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 35 | #include <linux/audit.h> |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 36 | #include <linux/printk.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
| 38 | #include <asm/uaccess.h> |
| 39 | #include <asm/tlb.h> |
| 40 | #include <asm/tlbflush.h> |
Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 41 | #include <asm/mmu_context.h> |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 42 | #include "internal.h" |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | void *high_memory; |
Arnd Bergmann | 944b687 | 2015-02-05 12:25:12 -0800 | [diff] [blame] | 45 | EXPORT_SYMBOL(high_memory); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | struct page *mem_map; |
| 47 | unsigned long max_mapnr; |
gchen gchen | 5b8bf30 | 2015-03-12 16:26:05 -0700 | [diff] [blame] | 48 | EXPORT_SYMBOL(max_mapnr); |
Hugh Dickins | 4266c97 | 2009-09-23 17:05:53 +0100 | [diff] [blame] | 49 | unsigned long highest_memmap_pfn; |
David Howells | fc4d5c2 | 2009-05-06 16:03:05 -0700 | [diff] [blame] | 50 | int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | int heap_stack_gap = 0; |
| 52 | |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 53 | atomic_long_t mmap_pages_allocated; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | EXPORT_SYMBOL(mem_map); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 57 | /* list of mapped, potentially shareable regions */ |
| 58 | static struct kmem_cache *vm_region_jar; |
| 59 | struct rb_root nommu_region_tree = RB_ROOT; |
| 60 | DECLARE_RWSEM(nommu_region_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Alexey Dobriyan | f0f37e2 | 2009-09-27 22:29:37 +0400 | [diff] [blame] | 62 | const struct vm_operations_struct generic_file_vm_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | }; |
| 64 | |
| 65 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | * Return the total memory allocated for this pointer, not |
| 67 | * just what the caller asked for. |
| 68 | * |
| 69 | * Doesn't have to be accurate, i.e. may have races. |
| 70 | */ |
| 71 | unsigned int kobjsize(const void *objp) |
| 72 | { |
| 73 | struct page *page; |
| 74 | |
Michael Hennerich | 4016a13 | 2008-04-28 02:13:38 -0700 | [diff] [blame] | 75 | /* |
| 76 | * If the object we have should not have ksize performed on it, |
| 77 | * return size of 0 |
| 78 | */ |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 79 | if (!objp || !virt_addr_valid(objp)) |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 80 | return 0; |
| 81 | |
| 82 | page = virt_to_head_page(objp); |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 83 | |
| 84 | /* |
| 85 | * If the allocator sets PageSlab, we know the pointer came from |
| 86 | * kmalloc(). |
| 87 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | if (PageSlab(page)) |
| 89 | return ksize(objp); |
| 90 | |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 91 | /* |
Paul Mundt | ab2e83e | 2009-01-08 12:04:48 +0000 | [diff] [blame] | 92 | * If it's not a compound page, see if we have a matching VMA |
| 93 | * region. This test is intentionally done in reverse order, |
| 94 | * so if there's no VMA, we still fall through and hand back |
| 95 | * PAGE_SIZE for 0-order pages. |
| 96 | */ |
| 97 | if (!PageCompound(page)) { |
| 98 | struct vm_area_struct *vma; |
| 99 | |
| 100 | vma = find_vma(current->mm, (unsigned long)objp); |
| 101 | if (vma) |
| 102 | return vma->vm_end - vma->vm_start; |
| 103 | } |
| 104 | |
| 105 | /* |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 106 | * The ksize() function is only guaranteed to work for pointers |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 107 | * returned by kmalloc(). So handle arbitrary pointers here. |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 108 | */ |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 109 | return PAGE_SIZE << compound_order(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | } |
| 111 | |
Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 112 | long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 113 | unsigned long start, unsigned long nr_pages, |
| 114 | unsigned int foll_flags, struct page **pages, |
| 115 | struct vm_area_struct **vmas, int *nonblocking) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 117 | struct vm_area_struct *vma; |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 118 | unsigned long vm_flags; |
| 119 | int i; |
| 120 | |
| 121 | /* calculate required read or write permissions. |
Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 122 | * If FOLL_FORCE is set, we only require the "MAY" flags. |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 123 | */ |
Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 124 | vm_flags = (foll_flags & FOLL_WRITE) ? |
| 125 | (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
| 126 | vm_flags &= (foll_flags & FOLL_FORCE) ? |
| 127 | (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
Peter Zijlstra | 9d73777 | 2009-06-25 11:58:55 +0200 | [diff] [blame] | 129 | for (i = 0; i < nr_pages; i++) { |
David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 130 | vma = find_vma(mm, start); |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 131 | if (!vma) |
| 132 | goto finish_or_fault; |
| 133 | |
| 134 | /* protect what we can, including chardevs */ |
Hugh Dickins | 1c3aff1 | 2009-09-21 17:03:24 -0700 | [diff] [blame] | 135 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || |
| 136 | !(vm_flags & vma->vm_flags)) |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 137 | goto finish_or_fault; |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | if (pages) { |
| 140 | pages[i] = virt_to_page(start); |
| 141 | if (pages[i]) |
| 142 | page_cache_get(pages[i]); |
| 143 | } |
| 144 | if (vmas) |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 145 | vmas[i] = vma; |
David Howells | e1ee65d | 2010-03-25 16:48:44 +0000 | [diff] [blame] | 146 | start = (start + PAGE_SIZE) & PAGE_MASK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 148 | |
| 149 | return i; |
| 150 | |
| 151 | finish_or_fault: |
| 152 | return i ? : -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 154 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 155 | /* |
| 156 | * get a list of pages in an address range belonging to the specified process |
| 157 | * and indicate the VMA that covers each page |
| 158 | * - this is potentially dodgy as we may end incrementing the page count of a |
| 159 | * slab page or a secondary page from a compound page |
| 160 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
| 161 | */ |
Michel Lespinasse | 28a3571 | 2013-02-22 16:35:55 -0800 | [diff] [blame] | 162 | long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 163 | unsigned long start, unsigned long nr_pages, |
| 164 | int write, int force, struct page **pages, |
| 165 | struct vm_area_struct **vmas) |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 166 | { |
| 167 | int flags = 0; |
| 168 | |
| 169 | if (write) |
Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 170 | flags |= FOLL_WRITE; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 171 | if (force) |
Hugh Dickins | 58fa879 | 2009-09-21 17:03:31 -0700 | [diff] [blame] | 172 | flags |= FOLL_FORCE; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 173 | |
Michel Lespinasse | 53a7706 | 2011-01-13 15:46:14 -0800 | [diff] [blame] | 174 | return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, |
| 175 | NULL); |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 176 | } |
Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 177 | EXPORT_SYMBOL(get_user_pages); |
| 178 | |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 179 | long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, |
| 180 | unsigned long start, unsigned long nr_pages, |
| 181 | int write, int force, struct page **pages, |
| 182 | int *locked) |
| 183 | { |
| 184 | return get_user_pages(tsk, mm, start, nr_pages, write, force, |
| 185 | pages, NULL); |
| 186 | } |
| 187 | EXPORT_SYMBOL(get_user_pages_locked); |
| 188 | |
Andrea Arcangeli | 0fd71a5 | 2015-02-11 15:27:20 -0800 | [diff] [blame] | 189 | long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
| 190 | unsigned long start, unsigned long nr_pages, |
| 191 | int write, int force, struct page **pages, |
| 192 | unsigned int gup_flags) |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 193 | { |
| 194 | long ret; |
| 195 | down_read(&mm->mmap_sem); |
| 196 | ret = get_user_pages(tsk, mm, start, nr_pages, write, force, |
| 197 | pages, NULL); |
| 198 | up_read(&mm->mmap_sem); |
| 199 | return ret; |
| 200 | } |
Andrea Arcangeli | 0fd71a5 | 2015-02-11 15:27:20 -0800 | [diff] [blame] | 201 | EXPORT_SYMBOL(__get_user_pages_unlocked); |
| 202 | |
| 203 | long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, |
| 204 | unsigned long start, unsigned long nr_pages, |
| 205 | int write, int force, struct page **pages) |
| 206 | { |
| 207 | return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, |
| 208 | force, pages, 0); |
| 209 | } |
Andrea Arcangeli | f0818f4 | 2015-02-11 15:27:17 -0800 | [diff] [blame] | 210 | EXPORT_SYMBOL(get_user_pages_unlocked); |
| 211 | |
Paul Mundt | dfc2f91 | 2009-06-26 04:31:57 +0900 | [diff] [blame] | 212 | /** |
| 213 | * follow_pfn - look up PFN at a user virtual address |
| 214 | * @vma: memory mapping |
| 215 | * @address: user virtual address |
| 216 | * @pfn: location to store found PFN |
| 217 | * |
| 218 | * Only IO mappings and raw PFN mappings are allowed. |
| 219 | * |
| 220 | * Returns zero and the pfn at @pfn on success, -ve otherwise. |
| 221 | */ |
| 222 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 223 | unsigned long *pfn) |
| 224 | { |
| 225 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 226 | return -EINVAL; |
| 227 | |
| 228 | *pfn = address >> PAGE_SHIFT; |
| 229 | return 0; |
| 230 | } |
| 231 | EXPORT_SYMBOL(follow_pfn); |
| 232 | |
Joonsoo Kim | f1c4069 | 2013-04-29 15:07:37 -0700 | [diff] [blame] | 233 | LIST_HEAD(vmap_area_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 235 | void vfree(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
| 237 | kfree(addr); |
| 238 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 239 | EXPORT_SYMBOL(vfree); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 241 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | { |
| 243 | /* |
Robert P. J. Day | 8518609 | 2007-10-19 23:11:38 +0200 | [diff] [blame] | 244 | * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() |
| 245 | * returns only a logical address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | */ |
Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 247 | return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 249 | EXPORT_SYMBOL(__vmalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 251 | void *vmalloc_user(unsigned long size) |
| 252 | { |
| 253 | void *ret; |
| 254 | |
| 255 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
| 256 | PAGE_KERNEL); |
| 257 | if (ret) { |
| 258 | struct vm_area_struct *vma; |
| 259 | |
| 260 | down_write(¤t->mm->mmap_sem); |
| 261 | vma = find_vma(current->mm, (unsigned long)ret); |
| 262 | if (vma) |
| 263 | vma->vm_flags |= VM_USERMAP; |
| 264 | up_write(¤t->mm->mmap_sem); |
| 265 | } |
| 266 | |
| 267 | return ret; |
| 268 | } |
| 269 | EXPORT_SYMBOL(vmalloc_user); |
| 270 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 271 | struct page *vmalloc_to_page(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { |
| 273 | return virt_to_page(addr); |
| 274 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 275 | EXPORT_SYMBOL(vmalloc_to_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 277 | unsigned long vmalloc_to_pfn(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | { |
| 279 | return page_to_pfn(virt_to_page(addr)); |
| 280 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 281 | EXPORT_SYMBOL(vmalloc_to_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | |
| 283 | long vread(char *buf, char *addr, unsigned long count) |
| 284 | { |
Chen Gang | 9bde916 | 2013-07-03 15:02:36 -0700 | [diff] [blame] | 285 | /* Don't allow overflow */ |
| 286 | if ((unsigned long) buf + count < count) |
| 287 | count = -(unsigned long) buf; |
| 288 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | memcpy(buf, addr, count); |
| 290 | return count; |
| 291 | } |
| 292 | |
| 293 | long vwrite(char *buf, char *addr, unsigned long count) |
| 294 | { |
| 295 | /* Don't allow overflow */ |
| 296 | if ((unsigned long) addr + count < count) |
| 297 | count = -(unsigned long) addr; |
| 298 | |
| 299 | memcpy(addr, buf, count); |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 300 | return count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | /* |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 304 | * vmalloc - allocate virtually contiguous memory |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | * |
| 306 | * @size: allocation size |
| 307 | * |
| 308 | * Allocate enough pages to cover @size from the page level |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 309 | * allocator and map them into contiguous kernel virtual space. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | * |
Michael Opdenacker | c1c8897 | 2006-10-03 23:21:02 +0200 | [diff] [blame] | 311 | * For tight control over page level allocator and protection flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | * use __vmalloc() instead. |
| 313 | */ |
| 314 | void *vmalloc(unsigned long size) |
| 315 | { |
| 316 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
| 317 | } |
Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 318 | EXPORT_SYMBOL(vmalloc); |
| 319 | |
Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 320 | /* |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 321 | * vzalloc - allocate virtually contiguous memory with zero fill |
Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 322 | * |
| 323 | * @size: allocation size |
| 324 | * |
| 325 | * Allocate enough pages to cover @size from the page level |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 326 | * allocator and map them into contiguous kernel virtual space. |
Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 327 | * The memory allocated is set to zero. |
| 328 | * |
| 329 | * For tight control over page level allocator and protection flags |
| 330 | * use __vmalloc() instead. |
| 331 | */ |
| 332 | void *vzalloc(unsigned long size) |
| 333 | { |
| 334 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
| 335 | PAGE_KERNEL); |
| 336 | } |
| 337 | EXPORT_SYMBOL(vzalloc); |
| 338 | |
| 339 | /** |
| 340 | * vmalloc_node - allocate memory on a specific node |
| 341 | * @size: allocation size |
| 342 | * @node: numa node |
| 343 | * |
| 344 | * Allocate enough pages to cover @size from the page level |
| 345 | * allocator and map them into contiguous kernel virtual space. |
| 346 | * |
| 347 | * For tight control over page level allocator and protection flags |
| 348 | * use __vmalloc() instead. |
| 349 | */ |
Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 350 | void *vmalloc_node(unsigned long size, int node) |
| 351 | { |
| 352 | return vmalloc(size); |
| 353 | } |
Paul Mundt | 9a14f65 | 2010-12-24 11:50:34 +0900 | [diff] [blame] | 354 | EXPORT_SYMBOL(vmalloc_node); |
Dave Young | e1ca778 | 2010-10-26 14:22:06 -0700 | [diff] [blame] | 355 | |
| 356 | /** |
| 357 | * vzalloc_node - allocate memory on a specific node with zero fill |
| 358 | * @size: allocation size |
| 359 | * @node: numa node |
| 360 | * |
| 361 | * Allocate enough pages to cover @size from the page level |
| 362 | * allocator and map them into contiguous kernel virtual space. |
| 363 | * The memory allocated is set to zero. |
| 364 | * |
| 365 | * For tight control over page level allocator and protection flags |
| 366 | * use __vmalloc() instead. |
| 367 | */ |
| 368 | void *vzalloc_node(unsigned long size, int node) |
| 369 | { |
| 370 | return vzalloc(size); |
| 371 | } |
| 372 | EXPORT_SYMBOL(vzalloc_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
Paul Mundt | 1af446e | 2008-08-04 16:01:47 +0900 | [diff] [blame] | 374 | #ifndef PAGE_KERNEL_EXEC |
| 375 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
| 376 | #endif |
| 377 | |
| 378 | /** |
| 379 | * vmalloc_exec - allocate virtually contiguous, executable memory |
| 380 | * @size: allocation size |
| 381 | * |
| 382 | * Kernel-internal function to allocate enough pages to cover @size |
| 383 | * the page level allocator and map them into contiguous and |
| 384 | * executable kernel virtual space. |
| 385 | * |
| 386 | * For tight control over page level allocator and protection flags |
| 387 | * use __vmalloc() instead. |
| 388 | */ |
| 389 | |
| 390 | void *vmalloc_exec(unsigned long size) |
| 391 | { |
| 392 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); |
| 393 | } |
| 394 | |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 395 | /** |
| 396 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | * @size: allocation size |
| 398 | * |
| 399 | * Allocate enough 32bit PA addressable pages to cover @size from the |
Masahiro Yamada | e1c0506 | 2015-07-07 10:14:59 +0900 | [diff] [blame] | 400 | * page level allocator and map them into contiguous kernel virtual space. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | */ |
| 402 | void *vmalloc_32(unsigned long size) |
| 403 | { |
| 404 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); |
| 405 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 406 | EXPORT_SYMBOL(vmalloc_32); |
| 407 | |
| 408 | /** |
| 409 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
| 410 | * @size: allocation size |
| 411 | * |
| 412 | * The resulting memory area is 32bit addressable and zeroed so it can be |
| 413 | * mapped to userspace without leaking data. |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 414 | * |
| 415 | * VM_USERMAP is set on the corresponding VMA so that subsequent calls to |
| 416 | * remap_vmalloc_range() are permissible. |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 417 | */ |
| 418 | void *vmalloc_32_user(unsigned long size) |
| 419 | { |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 420 | /* |
| 421 | * We'll have to sort out the ZONE_DMA bits for 64-bit, |
| 422 | * but for now this can simply use vmalloc_user() directly. |
| 423 | */ |
| 424 | return vmalloc_user(size); |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 425 | } |
| 426 | EXPORT_SYMBOL(vmalloc_32_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | |
| 428 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) |
| 429 | { |
| 430 | BUG(); |
| 431 | return NULL; |
| 432 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 433 | EXPORT_SYMBOL(vmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 435 | void vunmap(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | { |
| 437 | BUG(); |
| 438 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 439 | EXPORT_SYMBOL(vunmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | |
Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 441 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
| 442 | { |
| 443 | BUG(); |
| 444 | return NULL; |
| 445 | } |
| 446 | EXPORT_SYMBOL(vm_map_ram); |
| 447 | |
| 448 | void vm_unmap_ram(const void *mem, unsigned int count) |
| 449 | { |
| 450 | BUG(); |
| 451 | } |
| 452 | EXPORT_SYMBOL(vm_unmap_ram); |
| 453 | |
| 454 | void vm_unmap_aliases(void) |
| 455 | { |
| 456 | } |
| 457 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
| 458 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | /* |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 460 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to |
| 461 | * have one. |
| 462 | */ |
Gideon Israel Dsouza | 3b32123 | 2014-04-07 15:37:26 -0700 | [diff] [blame] | 463 | void __weak vmalloc_sync_all(void) |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 464 | { |
| 465 | } |
| 466 | |
Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 467 | /** |
| 468 | * alloc_vm_area - allocate a range of kernel address space |
| 469 | * @size: size of the area |
| 470 | * |
| 471 | * Returns: NULL on failure, vm_struct on success |
| 472 | * |
| 473 | * This function reserves a range of kernel address space, and |
| 474 | * allocates pagetables to map that range. No actual mappings |
| 475 | * are created. If the kernel address space is not shared |
| 476 | * between processes, it syncs the pagetable across all |
| 477 | * processes. |
| 478 | */ |
David Vrabel | cd12909 | 2011-09-29 16:53:32 +0100 | [diff] [blame] | 479 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
Paul Mundt | 29c185e | 2010-12-24 12:08:30 +0900 | [diff] [blame] | 480 | { |
| 481 | BUG(); |
| 482 | return NULL; |
| 483 | } |
| 484 | EXPORT_SYMBOL_GPL(alloc_vm_area); |
| 485 | |
| 486 | void free_vm_area(struct vm_struct *area) |
| 487 | { |
| 488 | BUG(); |
| 489 | } |
| 490 | EXPORT_SYMBOL_GPL(free_vm_area); |
| 491 | |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 492 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, |
| 493 | struct page *page) |
| 494 | { |
| 495 | return -EINVAL; |
| 496 | } |
| 497 | EXPORT_SYMBOL(vm_insert_page); |
| 498 | |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 499 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | * sys_brk() for the most part doesn't need the global kernel |
| 501 | * lock, except when an application is doing something nasty |
| 502 | * like trying to un-brk an area that has already been mapped |
| 503 | * to a regular file. in this case, the unmapping will need |
| 504 | * to invoke file system routines that need the global lock. |
| 505 | */ |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 506 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | { |
| 508 | struct mm_struct *mm = current->mm; |
| 509 | |
| 510 | if (brk < mm->start_brk || brk > mm->context.end_brk) |
| 511 | return mm->brk; |
| 512 | |
| 513 | if (mm->brk == brk) |
| 514 | return mm->brk; |
| 515 | |
| 516 | /* |
| 517 | * Always allow shrinking brk |
| 518 | */ |
| 519 | if (brk <= mm->brk) { |
| 520 | mm->brk = brk; |
| 521 | return brk; |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Ok, looks good - let it rip. |
| 526 | */ |
Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 527 | flush_icache_range(mm->brk, brk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | return mm->brk = brk; |
| 529 | } |
| 530 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 531 | /* |
| 532 | * initialise the VMA and region record slabs |
| 533 | */ |
| 534 | void __init mmap_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | { |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 536 | int ret; |
| 537 | |
Tejun Heo | 908c7f1 | 2014-09-08 09:51:29 +0900 | [diff] [blame] | 538 | ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); |
KOSAKI Motohiro | 00a62ce | 2009-04-30 15:08:51 -0700 | [diff] [blame] | 539 | VM_BUG_ON(ret); |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 540 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 541 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 543 | /* |
| 544 | * validate the region tree |
| 545 | * - the caller must hold the region lock |
| 546 | */ |
| 547 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS |
| 548 | static noinline void validate_nommu_regions(void) |
| 549 | { |
| 550 | struct vm_region *region, *last; |
| 551 | struct rb_node *p, *lastp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 553 | lastp = rb_first(&nommu_region_tree); |
| 554 | if (!lastp) |
| 555 | return; |
| 556 | |
| 557 | last = rb_entry(lastp, struct vm_region, vm_rb); |
Geliang Tang | c9427bc | 2015-11-05 18:48:38 -0800 | [diff] [blame] | 558 | BUG_ON(last->vm_end <= last->vm_start); |
| 559 | BUG_ON(last->vm_top < last->vm_end); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 560 | |
| 561 | while ((p = rb_next(lastp))) { |
| 562 | region = rb_entry(p, struct vm_region, vm_rb); |
| 563 | last = rb_entry(lastp, struct vm_region, vm_rb); |
| 564 | |
Geliang Tang | c9427bc | 2015-11-05 18:48:38 -0800 | [diff] [blame] | 565 | BUG_ON(region->vm_end <= region->vm_start); |
| 566 | BUG_ON(region->vm_top < region->vm_end); |
| 567 | BUG_ON(region->vm_start < last->vm_top); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 568 | |
| 569 | lastp = p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
| 571 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 572 | #else |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 573 | static void validate_nommu_regions(void) |
| 574 | { |
| 575 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 576 | #endif |
| 577 | |
| 578 | /* |
| 579 | * add a region into the global tree |
| 580 | */ |
| 581 | static void add_nommu_region(struct vm_region *region) |
| 582 | { |
| 583 | struct vm_region *pregion; |
| 584 | struct rb_node **p, *parent; |
| 585 | |
| 586 | validate_nommu_regions(); |
| 587 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 588 | parent = NULL; |
| 589 | p = &nommu_region_tree.rb_node; |
| 590 | while (*p) { |
| 591 | parent = *p; |
| 592 | pregion = rb_entry(parent, struct vm_region, vm_rb); |
| 593 | if (region->vm_start < pregion->vm_start) |
| 594 | p = &(*p)->rb_left; |
| 595 | else if (region->vm_start > pregion->vm_start) |
| 596 | p = &(*p)->rb_right; |
| 597 | else if (pregion == region) |
| 598 | return; |
| 599 | else |
| 600 | BUG(); |
| 601 | } |
| 602 | |
| 603 | rb_link_node(®ion->vm_rb, parent, p); |
| 604 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); |
| 605 | |
| 606 | validate_nommu_regions(); |
| 607 | } |
| 608 | |
| 609 | /* |
| 610 | * delete a region from the global tree |
| 611 | */ |
| 612 | static void delete_nommu_region(struct vm_region *region) |
| 613 | { |
| 614 | BUG_ON(!nommu_region_tree.rb_node); |
| 615 | |
| 616 | validate_nommu_regions(); |
| 617 | rb_erase(®ion->vm_rb, &nommu_region_tree); |
| 618 | validate_nommu_regions(); |
| 619 | } |
| 620 | |
| 621 | /* |
| 622 | * free a contiguous series of pages |
| 623 | */ |
| 624 | static void free_page_series(unsigned long from, unsigned long to) |
| 625 | { |
| 626 | for (; from < to; from += PAGE_SIZE) { |
| 627 | struct page *page = virt_to_page(from); |
| 628 | |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 629 | atomic_long_dec(&mmap_pages_allocated); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 630 | put_page(page); |
| 631 | } |
| 632 | } |
| 633 | |
| 634 | /* |
| 635 | * release a reference to a region |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 636 | * - the caller must hold the region semaphore for writing, which this releases |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 637 | * - the region may not have been added to the tree yet, in which case vm_top |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 638 | * will equal vm_start |
| 639 | */ |
| 640 | static void __put_nommu_region(struct vm_region *region) |
| 641 | __releases(nommu_region_sem) |
| 642 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 643 | BUG_ON(!nommu_region_tree.rb_node); |
| 644 | |
David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 645 | if (--region->vm_usage == 0) { |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 646 | if (region->vm_top > region->vm_start) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 647 | delete_nommu_region(region); |
| 648 | up_write(&nommu_region_sem); |
| 649 | |
| 650 | if (region->vm_file) |
| 651 | fput(region->vm_file); |
| 652 | |
| 653 | /* IO memory and memory shared directly out of the pagecache |
| 654 | * from ramfs/tmpfs mustn't be released here */ |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 655 | if (region->vm_flags & VM_MAPPED_COPY) |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 656 | free_page_series(region->vm_start, region->vm_top); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 657 | kmem_cache_free(vm_region_jar, region); |
| 658 | } else { |
| 659 | up_write(&nommu_region_sem); |
| 660 | } |
| 661 | } |
| 662 | |
| 663 | /* |
| 664 | * release a reference to a region |
| 665 | */ |
| 666 | static void put_nommu_region(struct vm_region *region) |
| 667 | { |
| 668 | down_write(&nommu_region_sem); |
| 669 | __put_nommu_region(region); |
| 670 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 672 | /* |
Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 673 | * update protection on a vma |
| 674 | */ |
| 675 | static void protect_vma(struct vm_area_struct *vma, unsigned long flags) |
| 676 | { |
| 677 | #ifdef CONFIG_MPU |
| 678 | struct mm_struct *mm = vma->vm_mm; |
| 679 | long start = vma->vm_start & PAGE_MASK; |
| 680 | while (start < vma->vm_end) { |
| 681 | protect_page(mm, start, flags); |
| 682 | start += PAGE_SIZE; |
| 683 | } |
| 684 | update_protections(mm); |
| 685 | #endif |
| 686 | } |
| 687 | |
| 688 | /* |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 689 | * add a VMA into a process's mm_struct in the appropriate place in the list |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 690 | * and tree and add to the address space's page tree also if not an anonymous |
| 691 | * page |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 692 | * - should be called with mm->mmap_sem held writelocked |
| 693 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 694 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 695 | { |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 696 | struct vm_area_struct *pvma, *prev; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 697 | struct address_space *mapping; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 698 | struct rb_node **p, *parent, *rb_prev; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 699 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 700 | BUG_ON(!vma->vm_region); |
| 701 | |
| 702 | mm->map_count++; |
| 703 | vma->vm_mm = mm; |
| 704 | |
Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 705 | protect_vma(vma, vma->vm_flags); |
| 706 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 707 | /* add the VMA to the mapping */ |
| 708 | if (vma->vm_file) { |
| 709 | mapping = vma->vm_file->f_mapping; |
| 710 | |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 711 | i_mmap_lock_write(mapping); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 712 | flush_dcache_mmap_lock(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 713 | vma_interval_tree_insert(vma, &mapping->i_mmap); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 714 | flush_dcache_mmap_unlock(mapping); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 715 | i_mmap_unlock_write(mapping); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 716 | } |
| 717 | |
| 718 | /* add the VMA to the tree */ |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 719 | parent = rb_prev = NULL; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 720 | p = &mm->mm_rb.rb_node; |
| 721 | while (*p) { |
| 722 | parent = *p; |
| 723 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); |
| 724 | |
| 725 | /* sort by: start addr, end addr, VMA struct addr in that order |
| 726 | * (the latter is necessary as we may get identical VMAs) */ |
| 727 | if (vma->vm_start < pvma->vm_start) |
| 728 | p = &(*p)->rb_left; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 729 | else if (vma->vm_start > pvma->vm_start) { |
| 730 | rb_prev = parent; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 731 | p = &(*p)->rb_right; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 732 | } else if (vma->vm_end < pvma->vm_end) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 733 | p = &(*p)->rb_left; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 734 | else if (vma->vm_end > pvma->vm_end) { |
| 735 | rb_prev = parent; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 736 | p = &(*p)->rb_right; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 737 | } else if (vma < pvma) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 738 | p = &(*p)->rb_left; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 739 | else if (vma > pvma) { |
| 740 | rb_prev = parent; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 741 | p = &(*p)->rb_right; |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 742 | } else |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 743 | BUG(); |
| 744 | } |
| 745 | |
| 746 | rb_link_node(&vma->vm_rb, parent, p); |
| 747 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
| 748 | |
| 749 | /* add VMA to the VMA list also */ |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 750 | prev = NULL; |
| 751 | if (rb_prev) |
| 752 | prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 753 | |
Namhyung Kim | 6038def | 2011-05-24 17:11:22 -0700 | [diff] [blame] | 754 | __vma_link_list(mm, vma, prev, parent); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | /* |
| 758 | * delete a VMA from its owning mm_struct and address space |
| 759 | */ |
| 760 | static void delete_vma_from_mm(struct vm_area_struct *vma) |
| 761 | { |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 762 | int i; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 763 | struct address_space *mapping; |
| 764 | struct mm_struct *mm = vma->vm_mm; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 765 | struct task_struct *curr = current; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 766 | |
Bernd Schmidt | eb8cdec | 2009-09-21 17:03:57 -0700 | [diff] [blame] | 767 | protect_vma(vma, 0); |
| 768 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 769 | mm->map_count--; |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 770 | for (i = 0; i < VMACACHE_SIZE; i++) { |
| 771 | /* if the vma is cached, invalidate the entire cache */ |
| 772 | if (curr->vmacache[i] == vma) { |
Steven Miao | e020d5b | 2014-06-23 13:22:02 -0700 | [diff] [blame] | 773 | vmacache_invalidate(mm); |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 774 | break; |
| 775 | } |
| 776 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 777 | |
| 778 | /* remove the VMA from the mapping */ |
| 779 | if (vma->vm_file) { |
| 780 | mapping = vma->vm_file->f_mapping; |
| 781 | |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 782 | i_mmap_lock_write(mapping); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 783 | flush_dcache_mmap_lock(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 784 | vma_interval_tree_remove(vma, &mapping->i_mmap); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 785 | flush_dcache_mmap_unlock(mapping); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 786 | i_mmap_unlock_write(mapping); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 787 | } |
| 788 | |
| 789 | /* remove from the MM's tree and list */ |
| 790 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
Namhyung Kim | b951bf2 | 2011-05-24 17:11:23 -0700 | [diff] [blame] | 791 | |
| 792 | if (vma->vm_prev) |
| 793 | vma->vm_prev->vm_next = vma->vm_next; |
| 794 | else |
| 795 | mm->mmap = vma->vm_next; |
| 796 | |
| 797 | if (vma->vm_next) |
| 798 | vma->vm_next->vm_prev = vma->vm_prev; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | /* |
| 802 | * destroy a VMA record |
| 803 | */ |
| 804 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) |
| 805 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 806 | if (vma->vm_ops && vma->vm_ops->close) |
| 807 | vma->vm_ops->close(vma); |
Konstantin Khlebnikov | e9714ac | 2012-10-08 16:28:54 -0700 | [diff] [blame] | 808 | if (vma->vm_file) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 809 | fput(vma->vm_file); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 810 | put_nommu_region(vma->vm_region); |
| 811 | kmem_cache_free(vm_area_cachep, vma); |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | /* |
| 815 | * look up the first VMA in which addr resides, NULL if none |
| 816 | * - should be called with mm->mmap_sem at least held readlocked |
| 817 | */ |
| 818 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
| 819 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 820 | struct vm_area_struct *vma; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 821 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 822 | /* check the cache first */ |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 823 | vma = vmacache_find(mm, addr); |
| 824 | if (likely(vma)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 825 | return vma; |
| 826 | |
Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 827 | /* trawl the list (there may be multiple mappings in which addr |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 828 | * resides) */ |
Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 829 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 830 | if (vma->vm_start > addr) |
| 831 | return NULL; |
| 832 | if (vma->vm_end > addr) { |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 833 | vmacache_update(addr, vma); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 834 | return vma; |
| 835 | } |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 836 | } |
| 837 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 838 | return NULL; |
| 839 | } |
| 840 | EXPORT_SYMBOL(find_vma); |
| 841 | |
| 842 | /* |
David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 843 | * find a VMA |
| 844 | * - we don't extend stack VMAs under NOMMU conditions |
| 845 | */ |
| 846 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) |
| 847 | { |
David Howells | 7561e8c | 2010-03-25 16:48:38 +0000 | [diff] [blame] | 848 | return find_vma(mm, addr); |
David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 849 | } |
| 850 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 851 | /* |
| 852 | * expand a stack to a given address |
| 853 | * - not supported under NOMMU conditions |
| 854 | */ |
Greg Ungerer | 57c8f63 | 2007-07-15 23:38:28 -0700 | [diff] [blame] | 855 | int expand_stack(struct vm_area_struct *vma, unsigned long address) |
| 856 | { |
| 857 | return -ENOMEM; |
| 858 | } |
| 859 | |
David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 860 | /* |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 861 | * look up the first VMA exactly that exactly matches addr |
| 862 | * - should be called with mm->mmap_sem at least held readlocked |
| 863 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 864 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, |
| 865 | unsigned long addr, |
| 866 | unsigned long len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | { |
| 868 | struct vm_area_struct *vma; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 869 | unsigned long end = addr + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 871 | /* check the cache first */ |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 872 | vma = vmacache_find_exact(mm, addr, end); |
| 873 | if (vma) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 874 | return vma; |
| 875 | |
Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 876 | /* trawl the list (there may be multiple mappings in which addr |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 877 | * resides) */ |
Namhyung Kim | e922c4c | 2011-05-24 17:11:24 -0700 | [diff] [blame] | 878 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 879 | if (vma->vm_start < addr) |
| 880 | continue; |
| 881 | if (vma->vm_start > addr) |
| 882 | return NULL; |
| 883 | if (vma->vm_end == end) { |
Davidlohr Bueso | 615d6e8 | 2014-04-07 15:37:25 -0700 | [diff] [blame] | 884 | vmacache_update(addr, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | return vma; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 886 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | } |
| 888 | |
| 889 | return NULL; |
| 890 | } |
| 891 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 892 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | * determine whether a mapping should be permitted and, if so, what sort of |
| 894 | * mapping we're capable of supporting |
| 895 | */ |
| 896 | static int validate_mmap_request(struct file *file, |
| 897 | unsigned long addr, |
| 898 | unsigned long len, |
| 899 | unsigned long prot, |
| 900 | unsigned long flags, |
| 901 | unsigned long pgoff, |
| 902 | unsigned long *_capabilities) |
| 903 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 904 | unsigned long capabilities, rlen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | int ret; |
| 906 | |
| 907 | /* do the simple checks first */ |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 908 | if (flags & MAP_FIXED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 910 | |
| 911 | if ((flags & MAP_TYPE) != MAP_PRIVATE && |
| 912 | (flags & MAP_TYPE) != MAP_SHARED) |
| 913 | return -EINVAL; |
| 914 | |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 915 | if (!len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 | return -EINVAL; |
| 917 | |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 918 | /* Careful about overflows.. */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 919 | rlen = PAGE_ALIGN(len); |
| 920 | if (!rlen || rlen > TASK_SIZE) |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 921 | return -ENOMEM; |
| 922 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | /* offset overflow? */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 924 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 925 | return -EOVERFLOW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | |
| 927 | if (file) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 928 | /* files must support mmap */ |
Al Viro | 72c2d53 | 2013-09-22 16:27:52 -0400 | [diff] [blame] | 929 | if (!file->f_op->mmap) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | return -ENODEV; |
| 931 | |
| 932 | /* work out if what we've got could possibly be shared |
| 933 | * - we support chardevs that provide their own "memory" |
| 934 | * - we support files/blockdevs that are memory backed |
| 935 | */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 936 | if (file->f_op->mmap_capabilities) { |
| 937 | capabilities = file->f_op->mmap_capabilities(file); |
| 938 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | /* no explicit capabilities set, so assume some |
| 940 | * defaults */ |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 941 | switch (file_inode(file)->i_mode & S_IFMT) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | case S_IFREG: |
| 943 | case S_IFBLK: |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 944 | capabilities = NOMMU_MAP_COPY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 | break; |
| 946 | |
| 947 | case S_IFCHR: |
| 948 | capabilities = |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 949 | NOMMU_MAP_DIRECT | |
| 950 | NOMMU_MAP_READ | |
| 951 | NOMMU_MAP_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | break; |
| 953 | |
| 954 | default: |
| 955 | return -EINVAL; |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | /* eliminate any capabilities that we can't support on this |
| 960 | * device */ |
| 961 | if (!file->f_op->get_unmapped_area) |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 962 | capabilities &= ~NOMMU_MAP_DIRECT; |
Al Viro | 6e242a1 | 2015-03-31 12:35:13 -0400 | [diff] [blame] | 963 | if (!(file->f_mode & FMODE_CAN_READ)) |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 964 | capabilities &= ~NOMMU_MAP_COPY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | |
Graff Yang | 28d7a6a | 2009-08-18 14:11:17 -0700 | [diff] [blame] | 966 | /* The file shall have been opened with read permission. */ |
| 967 | if (!(file->f_mode & FMODE_READ)) |
| 968 | return -EACCES; |
| 969 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | if (flags & MAP_SHARED) { |
| 971 | /* do checks for writing, appending and locking */ |
| 972 | if ((prot & PROT_WRITE) && |
| 973 | !(file->f_mode & FMODE_WRITE)) |
| 974 | return -EACCES; |
| 975 | |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 976 | if (IS_APPEND(file_inode(file)) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | (file->f_mode & FMODE_WRITE)) |
| 978 | return -EACCES; |
| 979 | |
Jeff Layton | d7a0698 | 2014-03-10 09:54:15 -0400 | [diff] [blame] | 980 | if (locks_verify_locked(file)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | return -EAGAIN; |
| 982 | |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 983 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | return -ENODEV; |
| 985 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | /* we mustn't privatise shared mappings */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 987 | capabilities &= ~NOMMU_MAP_COPY; |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 988 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | /* we're going to read the file into private memory we |
| 990 | * allocate */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 991 | if (!(capabilities & NOMMU_MAP_COPY)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | return -ENODEV; |
| 993 | |
| 994 | /* we don't permit a private writable mapping to be |
| 995 | * shared with the backing device */ |
| 996 | if (prot & PROT_WRITE) |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 997 | capabilities &= ~NOMMU_MAP_DIRECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | } |
| 999 | |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1000 | if (capabilities & NOMMU_MAP_DIRECT) { |
| 1001 | if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || |
| 1002 | ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || |
| 1003 | ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1004 | ) { |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1005 | capabilities &= ~NOMMU_MAP_DIRECT; |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1006 | if (flags & MAP_SHARED) { |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1007 | pr_warn("MAP_SHARED not completely supported on !MMU\n"); |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1008 | return -EINVAL; |
| 1009 | } |
| 1010 | } |
| 1011 | } |
| 1012 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | /* handle executable mappings and implied executable |
| 1014 | * mappings */ |
Eric W. Biederman | 90f8572 | 2015-06-29 14:42:03 -0500 | [diff] [blame] | 1015 | if (path_noexec(&file->f_path)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | if (prot & PROT_EXEC) |
| 1017 | return -EPERM; |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 1018 | } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | /* handle implication of PROT_EXEC by PROT_READ */ |
| 1020 | if (current->personality & READ_IMPLIES_EXEC) { |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1021 | if (capabilities & NOMMU_MAP_EXEC) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | prot |= PROT_EXEC; |
| 1023 | } |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 1024 | } else if ((prot & PROT_READ) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | (prot & PROT_EXEC) && |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1026 | !(capabilities & NOMMU_MAP_EXEC) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | ) { |
| 1028 | /* backing file is not executable, try to copy */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1029 | capabilities &= ~NOMMU_MAP_DIRECT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | } |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 1031 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | /* anonymous mappings are always memory backed and can be |
| 1033 | * privately mapped |
| 1034 | */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1035 | capabilities = NOMMU_MAP_COPY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | |
| 1037 | /* handle PROT_EXEC implication by PROT_READ */ |
| 1038 | if ((prot & PROT_READ) && |
| 1039 | (current->personality & READ_IMPLIES_EXEC)) |
| 1040 | prot |= PROT_EXEC; |
| 1041 | } |
| 1042 | |
| 1043 | /* allow the security API to have its say */ |
Al Viro | e546785 | 2012-05-30 13:30:51 -0400 | [diff] [blame] | 1044 | ret = security_mmap_addr(addr); |
| 1045 | if (ret < 0) |
| 1046 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | |
| 1048 | /* looks okay */ |
| 1049 | *_capabilities = capabilities; |
| 1050 | return 0; |
| 1051 | } |
| 1052 | |
| 1053 | /* |
| 1054 | * we've determined that we can make the mapping, now translate what we |
| 1055 | * now know into VMA flags |
| 1056 | */ |
| 1057 | static unsigned long determine_vm_flags(struct file *file, |
| 1058 | unsigned long prot, |
| 1059 | unsigned long flags, |
| 1060 | unsigned long capabilities) |
| 1061 | { |
| 1062 | unsigned long vm_flags; |
| 1063 | |
| 1064 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | /* vm_flags |= mm->def_flags; */ |
| 1066 | |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1067 | if (!(capabilities & NOMMU_MAP_DIRECT)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1068 | /* attempt to share read-only copies of mapped file chunks */ |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1069 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | if (file && !(prot & PROT_WRITE)) |
| 1071 | vm_flags |= VM_MAYSHARE; |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1072 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | /* overlay a shareable mapping on the backing device or inode |
| 1074 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and |
| 1075 | * romfs/cramfs */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1076 | vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | if (flags & MAP_SHARED) |
Bernd Schmidt | 3c7b204 | 2010-05-25 23:43:00 -0700 | [diff] [blame] | 1078 | vm_flags |= VM_SHARED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | } |
| 1080 | |
| 1081 | /* refuse to let anyone share private mappings with this process if |
| 1082 | * it's being traced - otherwise breakpoints set in it may interfere |
| 1083 | * with another untraced process |
| 1084 | */ |
Tejun Heo | a288eec | 2011-06-17 16:50:37 +0200 | [diff] [blame] | 1085 | if ((flags & MAP_PRIVATE) && current->ptrace) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | vm_flags &= ~VM_MAYSHARE; |
| 1087 | |
| 1088 | return vm_flags; |
| 1089 | } |
| 1090 | |
| 1091 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1092 | * set up a shared mapping on a file (the driver or filesystem provides and |
| 1093 | * pins the storage) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1094 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1095 | static int do_mmap_shared_file(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | { |
| 1097 | int ret; |
| 1098 | |
| 1099 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1100 | if (ret == 0) { |
| 1101 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1102 | return 0; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1103 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | if (ret != -ENOSYS) |
| 1105 | return ret; |
| 1106 | |
David Howells | 3fa3046 | 2010-03-23 13:35:21 -0700 | [diff] [blame] | 1107 | /* getting -ENOSYS indicates that direct mmap isn't possible (as |
| 1108 | * opposed to tried but failed) so we can only give a suitable error as |
| 1109 | * it's not possible to make a private copy if MAP_SHARED was given */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | return -ENODEV; |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * set up a private mapping or an anonymous shared mapping |
| 1115 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1116 | static int do_mmap_private(struct vm_area_struct *vma, |
| 1117 | struct vm_region *region, |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1118 | unsigned long len, |
| 1119 | unsigned long capabilities) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | { |
Joonsoo Kim | dbc8358 | 2014-12-12 16:55:55 -0800 | [diff] [blame] | 1121 | unsigned long total, point; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | void *base; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1123 | int ret, order; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | |
| 1125 | /* invoke the file's mapping function so that it can keep track of |
| 1126 | * shared mappings on devices or memory |
| 1127 | * - VM_MAYSHARE will be set if it may attempt to share |
| 1128 | */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1129 | if (capabilities & NOMMU_MAP_DIRECT) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1130 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1131 | if (ret == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | /* shouldn't return success if we're not sharing */ |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1133 | BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); |
| 1134 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1135 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | } |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1137 | if (ret != -ENOSYS) |
| 1138 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | |
| 1140 | /* getting an ENOSYS error indicates that direct mmap isn't |
| 1141 | * possible (as opposed to tried but failed) so we'll try to |
| 1142 | * make a private copy of the data and map that instead */ |
| 1143 | } |
| 1144 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1145 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | /* allocate some memory to hold the mapping |
| 1147 | * - note that this may not return a page-aligned address if the object |
| 1148 | * we're allocating is smaller than a page |
| 1149 | */ |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1150 | order = get_order(len); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1151 | total = 1 << order; |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1152 | point = len >> PAGE_SHIFT; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1153 | |
Joonsoo Kim | dbc8358 | 2014-12-12 16:55:55 -0800 | [diff] [blame] | 1154 | /* we don't want to allocate a power-of-2 sized page set */ |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1155 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) |
Joonsoo Kim | dbc8358 | 2014-12-12 16:55:55 -0800 | [diff] [blame] | 1156 | total = point; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1157 | |
Joonsoo Kim | da61653 | 2015-02-27 15:51:43 -0800 | [diff] [blame] | 1158 | base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); |
Joonsoo Kim | dbc8358 | 2014-12-12 16:55:55 -0800 | [diff] [blame] | 1159 | if (!base) |
| 1160 | goto enomem; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1161 | |
Joonsoo Kim | dbc8358 | 2014-12-12 16:55:55 -0800 | [diff] [blame] | 1162 | atomic_long_add(total, &mmap_pages_allocated); |
| 1163 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1164 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
| 1165 | region->vm_start = (unsigned long) base; |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1166 | region->vm_end = region->vm_start + len; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1167 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1168 | |
| 1169 | vma->vm_start = region->vm_start; |
| 1170 | vma->vm_end = region->vm_start + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | |
| 1172 | if (vma->vm_file) { |
| 1173 | /* read the contents of a file into the copy */ |
| 1174 | mm_segment_t old_fs; |
| 1175 | loff_t fpos; |
| 1176 | |
| 1177 | fpos = vma->vm_pgoff; |
| 1178 | fpos <<= PAGE_SHIFT; |
| 1179 | |
| 1180 | old_fs = get_fs(); |
| 1181 | set_fs(KERNEL_DS); |
Al Viro | 6e242a1 | 2015-03-31 12:35:13 -0400 | [diff] [blame] | 1182 | ret = __vfs_read(vma->vm_file, base, len, &fpos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | set_fs(old_fs); |
| 1184 | |
| 1185 | if (ret < 0) |
| 1186 | goto error_free; |
| 1187 | |
| 1188 | /* clear the last little bit */ |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1189 | if (ret < len) |
| 1190 | memset(base + ret, 0, len - ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | } |
| 1193 | |
| 1194 | return 0; |
| 1195 | |
| 1196 | error_free: |
Namhyung Kim | 7223bb4 | 2011-05-24 17:11:26 -0700 | [diff] [blame] | 1197 | free_page_series(region->vm_start, region->vm_top); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1198 | region->vm_start = vma->vm_start = 0; |
| 1199 | region->vm_end = vma->vm_end = 0; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1200 | region->vm_top = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | return ret; |
| 1202 | |
| 1203 | enomem: |
Mitchel Humpherys | b1de0d1 | 2014-06-06 14:38:30 -0700 | [diff] [blame] | 1204 | pr_err("Allocation of length %lu from process %d (%s) failed\n", |
Greg Ungerer | 05ae6fa | 2009-01-13 17:30:22 +1000 | [diff] [blame] | 1205 | len, current->pid, current->comm); |
David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1206 | show_free_areas(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | return -ENOMEM; |
| 1208 | } |
| 1209 | |
| 1210 | /* |
| 1211 | * handle mapping creation for uClinux |
| 1212 | */ |
Oleg Nesterov | 1fcfd8d | 2015-09-09 15:39:29 -0700 | [diff] [blame] | 1213 | unsigned long do_mmap(struct file *file, |
| 1214 | unsigned long addr, |
| 1215 | unsigned long len, |
| 1216 | unsigned long prot, |
| 1217 | unsigned long flags, |
| 1218 | vm_flags_t vm_flags, |
| 1219 | unsigned long pgoff, |
| 1220 | unsigned long *populate) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1222 | struct vm_area_struct *vma; |
| 1223 | struct vm_region *region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | struct rb_node *rb; |
Oleg Nesterov | 1fcfd8d | 2015-09-09 15:39:29 -0700 | [diff] [blame] | 1225 | unsigned long capabilities, result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1226 | int ret; |
| 1227 | |
Michel Lespinasse | 41badc1 | 2013-02-22 16:32:47 -0800 | [diff] [blame] | 1228 | *populate = 0; |
Michel Lespinasse | bebeb3d | 2013-02-22 16:32:37 -0800 | [diff] [blame] | 1229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | /* decide whether we should attempt the mapping, and if so what sort of |
| 1231 | * mapping */ |
| 1232 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
| 1233 | &capabilities); |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1234 | if (ret < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1235 | return ret; |
| 1236 | |
David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1237 | /* we ignore the address hint */ |
| 1238 | addr = 0; |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1239 | len = PAGE_ALIGN(len); |
David Howells | 06aab5a | 2009-09-24 12:33:48 +0100 | [diff] [blame] | 1240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | /* we've determined that we can make the mapping, now translate what we |
| 1242 | * now know into VMA flags */ |
Oleg Nesterov | 1fcfd8d | 2015-09-09 15:39:29 -0700 | [diff] [blame] | 1243 | vm_flags |= determine_vm_flags(file, prot, flags, capabilities); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1245 | /* we're going to need to record the mapping */ |
| 1246 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); |
| 1247 | if (!region) |
| 1248 | goto error_getting_region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1250 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 1251 | if (!vma) |
| 1252 | goto error_getting_vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | |
David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1254 | region->vm_usage = 1; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1255 | region->vm_flags = vm_flags; |
| 1256 | region->vm_pgoff = pgoff; |
| 1257 | |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 1258 | INIT_LIST_HEAD(&vma->anon_vma_chain); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1259 | vma->vm_flags = vm_flags; |
| 1260 | vma->vm_pgoff = pgoff; |
| 1261 | |
| 1262 | if (file) { |
Al Viro | cb0942b | 2012-08-27 14:48:26 -0400 | [diff] [blame] | 1263 | region->vm_file = get_file(file); |
| 1264 | vma->vm_file = get_file(file); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1265 | } |
| 1266 | |
| 1267 | down_write(&nommu_region_sem); |
| 1268 | |
| 1269 | /* if we want to share, we need to check for regions created by other |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1270 | * mmap() calls that overlap with our proposed mapping |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1271 | * - we can only share with a superset match on most regular files |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1272 | * - shared mappings on character devices and memory backed files are |
| 1273 | * permitted to overlap inexactly as far as we are concerned for in |
| 1274 | * these cases, sharing is handled in the driver or filesystem rather |
| 1275 | * than here |
| 1276 | */ |
| 1277 | if (vm_flags & VM_MAYSHARE) { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1278 | struct vm_region *pregion; |
| 1279 | unsigned long pglen, rpglen, pgend, rpgend, start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1281 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1282 | pgend = pgoff + pglen; |
David Howells | 165b239 | 2007-03-22 00:11:24 -0800 | [diff] [blame] | 1283 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1284 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { |
| 1285 | pregion = rb_entry(rb, struct vm_region, vm_rb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1287 | if (!(pregion->vm_flags & VM_MAYSHARE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1288 | continue; |
| 1289 | |
| 1290 | /* search for overlapping mappings on the same file */ |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 1291 | if (file_inode(pregion->vm_file) != |
| 1292 | file_inode(file)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | continue; |
| 1294 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1295 | if (pregion->vm_pgoff >= pgend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1296 | continue; |
| 1297 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1298 | rpglen = pregion->vm_end - pregion->vm_start; |
| 1299 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1300 | rpgend = pregion->vm_pgoff + rpglen; |
| 1301 | if (pgoff >= rpgend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | continue; |
| 1303 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1304 | /* handle inexactly overlapping matches between |
| 1305 | * mappings */ |
| 1306 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && |
| 1307 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { |
| 1308 | /* new mapping is not a subset of the region */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1309 | if (!(capabilities & NOMMU_MAP_DIRECT)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1310 | goto sharing_violation; |
| 1311 | continue; |
| 1312 | } |
| 1313 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1314 | /* we've found a region we can share */ |
David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1315 | pregion->vm_usage++; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1316 | vma->vm_region = pregion; |
| 1317 | start = pregion->vm_start; |
| 1318 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; |
| 1319 | vma->vm_start = start; |
| 1320 | vma->vm_end = start + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1322 | if (pregion->vm_flags & VM_MAPPED_COPY) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1323 | vma->vm_flags |= VM_MAPPED_COPY; |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1324 | else { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1325 | ret = do_mmap_shared_file(vma); |
| 1326 | if (ret < 0) { |
| 1327 | vma->vm_region = NULL; |
| 1328 | vma->vm_start = 0; |
| 1329 | vma->vm_end = 0; |
David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1330 | pregion->vm_usage--; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1331 | pregion = NULL; |
| 1332 | goto error_just_free; |
| 1333 | } |
| 1334 | } |
| 1335 | fput(region->vm_file); |
| 1336 | kmem_cache_free(vm_region_jar, region); |
| 1337 | region = pregion; |
| 1338 | result = start; |
| 1339 | goto share; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | } |
| 1341 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1342 | /* obtain the address at which to make a shared mapping |
| 1343 | * - this is the hook for quasi-memory character devices to |
| 1344 | * tell us the location of a shared mapping |
| 1345 | */ |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1346 | if (capabilities & NOMMU_MAP_DIRECT) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1347 | addr = file->f_op->get_unmapped_area(file, addr, len, |
| 1348 | pgoff, flags); |
Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1349 | if (IS_ERR_VALUE(addr)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | ret = addr; |
Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1351 | if (ret != -ENOSYS) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1352 | goto error_just_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | |
| 1354 | /* the driver refused to tell us where to site |
| 1355 | * the mapping so we'll have to attempt to copy |
| 1356 | * it */ |
Namhyung Kim | bb005a5 | 2011-05-24 17:11:27 -0700 | [diff] [blame] | 1357 | ret = -ENODEV; |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1358 | if (!(capabilities & NOMMU_MAP_COPY)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1359 | goto error_just_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1360 | |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1361 | capabilities &= ~NOMMU_MAP_DIRECT; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1362 | } else { |
| 1363 | vma->vm_start = region->vm_start = addr; |
| 1364 | vma->vm_end = region->vm_end = addr + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | } |
| 1366 | } |
| 1367 | } |
| 1368 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1369 | vma->vm_region = region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1371 | /* set up the mapping |
Christoph Hellwig | b4caecd | 2015-01-14 10:42:32 +0100 | [diff] [blame] | 1372 | * - the region is filled in if NOMMU_MAP_DIRECT is still set |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1373 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1374 | if (file && vma->vm_flags & VM_SHARED) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1375 | ret = do_mmap_shared_file(vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | else |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1377 | ret = do_mmap_private(vma, region, len, capabilities); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | if (ret < 0) |
David Howells | 645d83c | 2009-09-24 15:13:10 +0100 | [diff] [blame] | 1379 | goto error_just_free; |
| 1380 | add_nommu_region(region); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1381 | |
Jie Zhang | ea63763 | 2009-12-14 18:00:02 -0800 | [diff] [blame] | 1382 | /* clear anonymous mappings that don't ask for uninitialized data */ |
| 1383 | if (!vma->vm_file && !(flags & MAP_UNINITIALIZED)) |
| 1384 | memset((void *)region->vm_start, 0, |
| 1385 | region->vm_end - region->vm_start); |
| 1386 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1387 | /* okay... we have a mapping; now we have to register it */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1388 | result = vma->vm_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1389 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1390 | current->mm->total_vm += len >> PAGE_SHIFT; |
| 1391 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1392 | share: |
| 1393 | add_vma_to_mm(current->mm, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1395 | /* we flush the region from the icache only when the first executable |
| 1396 | * mapping of it is made */ |
| 1397 | if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { |
| 1398 | flush_icache_range(region->vm_start, region->vm_end); |
| 1399 | region->vm_icache_flushed = true; |
| 1400 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1401 | |
Mike Frysinger | cfe79c0 | 2010-01-06 17:23:23 +0000 | [diff] [blame] | 1402 | up_write(&nommu_region_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1404 | return result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1405 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1406 | error_just_free: |
| 1407 | up_write(&nommu_region_sem); |
| 1408 | error: |
David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1409 | if (region->vm_file) |
| 1410 | fput(region->vm_file); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1411 | kmem_cache_free(vm_region_jar, region); |
David Howells | 89a8640 | 2009-10-30 13:13:26 +0000 | [diff] [blame] | 1412 | if (vma->vm_file) |
| 1413 | fput(vma->vm_file); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1414 | kmem_cache_free(vm_area_cachep, vma); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1415 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1417 | sharing_violation: |
| 1418 | up_write(&nommu_region_sem); |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1419 | pr_warn("Attempt to share mismatched mappings\n"); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1420 | ret = -EINVAL; |
| 1421 | goto error; |
| 1422 | |
| 1423 | error_getting_vma: |
| 1424 | kmem_cache_free(vm_region_jar, region); |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1425 | pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n", |
| 1426 | len, current->pid); |
David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1427 | show_free_areas(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | return -ENOMEM; |
| 1429 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1430 | error_getting_region: |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1431 | pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", |
| 1432 | len, current->pid); |
David Rientjes | 7bf02ea | 2011-05-24 17:11:16 -0700 | [diff] [blame] | 1433 | show_free_areas(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | return -ENOMEM; |
| 1435 | } |
Linus Torvalds | 6be5ceb | 2012-04-20 17:13:58 -0700 | [diff] [blame] | 1436 | |
Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1437 | SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
| 1438 | unsigned long, prot, unsigned long, flags, |
| 1439 | unsigned long, fd, unsigned long, pgoff) |
| 1440 | { |
| 1441 | struct file *file = NULL; |
| 1442 | unsigned long retval = -EBADF; |
| 1443 | |
Al Viro | 120a795 | 2010-10-30 02:54:44 -0400 | [diff] [blame] | 1444 | audit_mmap_fd(fd, flags); |
Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1445 | if (!(flags & MAP_ANONYMOUS)) { |
| 1446 | file = fget(fd); |
| 1447 | if (!file) |
| 1448 | goto out; |
| 1449 | } |
| 1450 | |
| 1451 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
| 1452 | |
Greg Ungerer | ad1ed29 | 2012-06-04 14:29:59 +1000 | [diff] [blame] | 1453 | retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff); |
Hugh Dickins | 66f0dc4 | 2009-12-30 20:17:34 +0000 | [diff] [blame] | 1454 | |
| 1455 | if (file) |
| 1456 | fput(file); |
| 1457 | out: |
| 1458 | return retval; |
| 1459 | } |
| 1460 | |
Christoph Hellwig | a467937 | 2010-03-10 15:21:15 -0800 | [diff] [blame] | 1461 | #ifdef __ARCH_WANT_SYS_OLD_MMAP |
| 1462 | struct mmap_arg_struct { |
| 1463 | unsigned long addr; |
| 1464 | unsigned long len; |
| 1465 | unsigned long prot; |
| 1466 | unsigned long flags; |
| 1467 | unsigned long fd; |
| 1468 | unsigned long offset; |
| 1469 | }; |
| 1470 | |
| 1471 | SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) |
| 1472 | { |
| 1473 | struct mmap_arg_struct a; |
| 1474 | |
| 1475 | if (copy_from_user(&a, arg, sizeof(a))) |
| 1476 | return -EFAULT; |
Alexander Kuleshov | 1824cb7 | 2015-11-05 18:46:35 -0800 | [diff] [blame] | 1477 | if (offset_in_page(a.offset)) |
Christoph Hellwig | a467937 | 2010-03-10 15:21:15 -0800 | [diff] [blame] | 1478 | return -EINVAL; |
| 1479 | |
| 1480 | return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, |
| 1481 | a.offset >> PAGE_SHIFT); |
| 1482 | } |
| 1483 | #endif /* __ARCH_WANT_SYS_OLD_MMAP */ |
| 1484 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1486 | * split a vma into two pieces at address 'addr', a new vma is allocated either |
| 1487 | * for the first part or the tail. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1488 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1489 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
| 1490 | unsigned long addr, int new_below) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1492 | struct vm_area_struct *new; |
| 1493 | struct vm_region *region; |
| 1494 | unsigned long npages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | |
David Howells | 779c102 | 2010-01-15 17:01:34 -0800 | [diff] [blame] | 1496 | /* we're only permitted to split anonymous regions (these should have |
| 1497 | * only a single usage on the region) */ |
| 1498 | if (vma->vm_file) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1499 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1500 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1501 | if (mm->map_count >= sysctl_max_map_count) |
| 1502 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1504 | region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); |
| 1505 | if (!region) |
| 1506 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1508 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
| 1509 | if (!new) { |
| 1510 | kmem_cache_free(vm_region_jar, region); |
| 1511 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1513 | |
| 1514 | /* most fields are the same, copy all, and then fixup */ |
| 1515 | *new = *vma; |
| 1516 | *region = *vma->vm_region; |
| 1517 | new->vm_region = region; |
| 1518 | |
| 1519 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; |
| 1520 | |
| 1521 | if (new_below) { |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1522 | region->vm_top = region->vm_end = new->vm_end = addr; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1523 | } else { |
| 1524 | region->vm_start = new->vm_start = addr; |
| 1525 | region->vm_pgoff = new->vm_pgoff += npages; |
| 1526 | } |
| 1527 | |
| 1528 | if (new->vm_ops && new->vm_ops->open) |
| 1529 | new->vm_ops->open(new); |
| 1530 | |
| 1531 | delete_vma_from_mm(vma); |
| 1532 | down_write(&nommu_region_sem); |
| 1533 | delete_nommu_region(vma->vm_region); |
| 1534 | if (new_below) { |
| 1535 | vma->vm_region->vm_start = vma->vm_start = addr; |
| 1536 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; |
| 1537 | } else { |
| 1538 | vma->vm_region->vm_end = vma->vm_end = addr; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1539 | vma->vm_region->vm_top = addr; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1540 | } |
| 1541 | add_nommu_region(vma->vm_region); |
| 1542 | add_nommu_region(new->vm_region); |
| 1543 | up_write(&nommu_region_sem); |
| 1544 | add_vma_to_mm(mm, vma); |
| 1545 | add_vma_to_mm(mm, new); |
| 1546 | return 0; |
| 1547 | } |
| 1548 | |
| 1549 | /* |
| 1550 | * shrink a VMA by removing the specified chunk from either the beginning or |
| 1551 | * the end |
| 1552 | */ |
| 1553 | static int shrink_vma(struct mm_struct *mm, |
| 1554 | struct vm_area_struct *vma, |
| 1555 | unsigned long from, unsigned long to) |
| 1556 | { |
| 1557 | struct vm_region *region; |
| 1558 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1559 | /* adjust the VMA's pointers, which may reposition it in the MM's tree |
| 1560 | * and list */ |
| 1561 | delete_vma_from_mm(vma); |
| 1562 | if (from > vma->vm_start) |
| 1563 | vma->vm_end = from; |
| 1564 | else |
| 1565 | vma->vm_start = to; |
| 1566 | add_vma_to_mm(mm, vma); |
| 1567 | |
| 1568 | /* cut the backing region down to size */ |
| 1569 | region = vma->vm_region; |
David Howells | 1e2ae59 | 2010-01-15 17:01:33 -0800 | [diff] [blame] | 1570 | BUG_ON(region->vm_usage != 1); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1571 | |
| 1572 | down_write(&nommu_region_sem); |
| 1573 | delete_nommu_region(region); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1574 | if (from > region->vm_start) { |
| 1575 | to = region->vm_top; |
| 1576 | region->vm_top = region->vm_end = from; |
| 1577 | } else { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1578 | region->vm_start = to; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1579 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1580 | add_nommu_region(region); |
| 1581 | up_write(&nommu_region_sem); |
| 1582 | |
| 1583 | free_page_series(from, to); |
| 1584 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1585 | } |
| 1586 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1587 | /* |
| 1588 | * release a mapping |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1589 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single |
| 1590 | * VMA, though it need not cover the whole VMA |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1591 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1592 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1594 | struct vm_area_struct *vma; |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1595 | unsigned long end; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1596 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1598 | len = PAGE_ALIGN(len); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1599 | if (len == 0) |
| 1600 | return -EINVAL; |
| 1601 | |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1602 | end = start + len; |
| 1603 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1604 | /* find the first potentially overlapping VMA */ |
| 1605 | vma = find_vma(mm, start); |
| 1606 | if (!vma) { |
Choi Gi-yong | ac71490 | 2014-04-07 15:37:36 -0700 | [diff] [blame] | 1607 | static int limit; |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1608 | if (limit < 5) { |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1609 | pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", |
| 1610 | current->pid, current->comm, |
| 1611 | start, start + len - 1); |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1612 | limit++; |
| 1613 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1614 | return -EINVAL; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1615 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1617 | /* we're allowed to split an anonymous VMA but not a file-backed one */ |
| 1618 | if (vma->vm_file) { |
| 1619 | do { |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1620 | if (start > vma->vm_start) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1621 | return -EINVAL; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1622 | if (end == vma->vm_end) |
| 1623 | goto erase_whole_vma; |
Namhyung Kim | d75a310 | 2011-05-24 17:11:25 -0700 | [diff] [blame] | 1624 | vma = vma->vm_next; |
| 1625 | } while (vma); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1626 | return -EINVAL; |
| 1627 | } else { |
| 1628 | /* the chunk must be a subset of the VMA found */ |
| 1629 | if (start == vma->vm_start && end == vma->vm_end) |
| 1630 | goto erase_whole_vma; |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1631 | if (start < vma->vm_start || end > vma->vm_end) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1632 | return -EINVAL; |
Alexander Kuleshov | 1824cb7 | 2015-11-05 18:46:35 -0800 | [diff] [blame] | 1633 | if (offset_in_page(start)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1634 | return -EINVAL; |
Alexander Kuleshov | 1824cb7 | 2015-11-05 18:46:35 -0800 | [diff] [blame] | 1635 | if (end != vma->vm_end && offset_in_page(end)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1636 | return -EINVAL; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1637 | if (start != vma->vm_start && end != vma->vm_end) { |
| 1638 | ret = split_vma(mm, vma, start, 1); |
Leon Romanovsky | 22cc877 | 2015-06-24 16:57:47 -0700 | [diff] [blame] | 1639 | if (ret < 0) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1640 | return ret; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1641 | } |
| 1642 | return shrink_vma(mm, vma, start, end); |
| 1643 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1645 | erase_whole_vma: |
| 1646 | delete_vma_from_mm(vma); |
| 1647 | delete_vma(mm, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | return 0; |
| 1649 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1650 | EXPORT_SYMBOL(do_munmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | |
Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1652 | int vm_munmap(unsigned long addr, size_t len) |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1653 | { |
Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1654 | struct mm_struct *mm = current->mm; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1655 | int ret; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1656 | |
| 1657 | down_write(&mm->mmap_sem); |
| 1658 | ret = do_munmap(mm, addr, len); |
| 1659 | up_write(&mm->mmap_sem); |
| 1660 | return ret; |
| 1661 | } |
Linus Torvalds | a46ef99 | 2012-04-20 16:20:01 -0700 | [diff] [blame] | 1662 | EXPORT_SYMBOL(vm_munmap); |
| 1663 | |
| 1664 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
| 1665 | { |
Al Viro | bfce281 | 2012-04-20 21:57:04 -0400 | [diff] [blame] | 1666 | return vm_munmap(addr, len); |
Linus Torvalds | a46ef99 | 2012-04-20 16:20:01 -0700 | [diff] [blame] | 1667 | } |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1668 | |
| 1669 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1670 | * release all the mappings made in a process's VM space |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1671 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1672 | void exit_mmap(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1674 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1676 | if (!mm) |
| 1677 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1678 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1679 | mm->total_vm = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1680 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1681 | while ((vma = mm->mmap)) { |
| 1682 | mm->mmap = vma->vm_next; |
| 1683 | delete_vma_from_mm(vma); |
| 1684 | delete_vma(mm, vma); |
Steven J. Magnani | 04c3496 | 2010-11-24 12:56:54 -0800 | [diff] [blame] | 1685 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | } |
| 1687 | } |
| 1688 | |
Linus Torvalds | e4eb1ff | 2012-04-20 15:35:40 -0700 | [diff] [blame] | 1689 | unsigned long vm_brk(unsigned long addr, unsigned long len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | { |
| 1691 | return -ENOMEM; |
| 1692 | } |
| 1693 | |
| 1694 | /* |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1695 | * expand (or shrink) an existing mapping, potentially moving it at the same |
| 1696 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1697 | * |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1698 | * under NOMMU conditions, we only permit changing a mapping's size, and only |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1699 | * as long as it stays within the region allocated by do_mmap_private() and the |
| 1700 | * block is not shareable |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | * |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1702 | * MREMAP_FIXED is not supported under NOMMU conditions |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | */ |
Al Viro | 4b377ba | 2013-03-04 10:47:59 -0500 | [diff] [blame] | 1704 | static unsigned long do_mremap(unsigned long addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1705 | unsigned long old_len, unsigned long new_len, |
| 1706 | unsigned long flags, unsigned long new_addr) |
| 1707 | { |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1708 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | |
| 1710 | /* insanity checks first */ |
Bob Liu | f67d9b1 | 2011-05-24 17:12:56 -0700 | [diff] [blame] | 1711 | old_len = PAGE_ALIGN(old_len); |
| 1712 | new_len = PAGE_ALIGN(new_len); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1713 | if (old_len == 0 || new_len == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | return (unsigned long) -EINVAL; |
| 1715 | |
Alexander Kuleshov | 1824cb7 | 2015-11-05 18:46:35 -0800 | [diff] [blame] | 1716 | if (offset_in_page(addr)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1717 | return -EINVAL; |
| 1718 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | if (flags & MREMAP_FIXED && new_addr != addr) |
| 1720 | return (unsigned long) -EINVAL; |
| 1721 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1722 | vma = find_vma_exact(current->mm, addr, old_len); |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1723 | if (!vma) |
| 1724 | return (unsigned long) -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1726 | if (vma->vm_end != vma->vm_start + old_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1727 | return (unsigned long) -EFAULT; |
| 1728 | |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1729 | if (vma->vm_flags & VM_MAYSHARE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1730 | return (unsigned long) -EPERM; |
| 1731 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1732 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1733 | return (unsigned long) -ENOMEM; |
| 1734 | |
| 1735 | /* all checks complete - do it */ |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1736 | vma->vm_end = vma->vm_start + new_len; |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1737 | return vma->vm_start; |
| 1738 | } |
| 1739 | |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1740 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, |
| 1741 | unsigned long, new_len, unsigned long, flags, |
| 1742 | unsigned long, new_addr) |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1743 | { |
| 1744 | unsigned long ret; |
| 1745 | |
| 1746 | down_write(¤t->mm->mmap_sem); |
| 1747 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); |
| 1748 | up_write(¤t->mm->mmap_sem); |
| 1749 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | } |
| 1751 | |
Michel Lespinasse | 240aade | 2013-02-22 16:35:56 -0800 | [diff] [blame] | 1752 | struct page *follow_page_mask(struct vm_area_struct *vma, |
| 1753 | unsigned long address, unsigned int flags, |
| 1754 | unsigned int *page_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | { |
Michel Lespinasse | 240aade | 2013-02-22 16:35:56 -0800 | [diff] [blame] | 1756 | *page_mask = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1757 | return NULL; |
| 1758 | } |
| 1759 | |
Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1760 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
| 1761 | unsigned long pfn, unsigned long size, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1762 | { |
Bob Liu | 8f3b132 | 2011-07-08 15:39:46 -0700 | [diff] [blame] | 1763 | if (addr != (pfn << PAGE_SHIFT)) |
| 1764 | return -EINVAL; |
| 1765 | |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1766 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 1767 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1768 | } |
Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1769 | EXPORT_SYMBOL(remap_pfn_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1770 | |
Linus Torvalds | 3c0b9de | 2013-04-27 13:25:38 -0700 | [diff] [blame] | 1771 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) |
| 1772 | { |
| 1773 | unsigned long pfn = start >> PAGE_SHIFT; |
| 1774 | unsigned long vm_len = vma->vm_end - vma->vm_start; |
| 1775 | |
| 1776 | pfn += vma->vm_pgoff; |
| 1777 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); |
| 1778 | } |
| 1779 | EXPORT_SYMBOL(vm_iomap_memory); |
| 1780 | |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 1781 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
| 1782 | unsigned long pgoff) |
| 1783 | { |
| 1784 | unsigned int size = vma->vm_end - vma->vm_start; |
| 1785 | |
| 1786 | if (!(vma->vm_flags & VM_USERMAP)) |
| 1787 | return -EINVAL; |
| 1788 | |
| 1789 | vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); |
| 1790 | vma->vm_end = vma->vm_start + size; |
| 1791 | |
| 1792 | return 0; |
| 1793 | } |
| 1794 | EXPORT_SYMBOL(remap_vmalloc_range); |
| 1795 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, |
| 1797 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 1798 | { |
| 1799 | return -ENOMEM; |
| 1800 | } |
| 1801 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1802 | void unmap_mapping_range(struct address_space *mapping, |
| 1803 | loff_t const holebegin, loff_t const holelen, |
| 1804 | int even_cows) |
| 1805 | { |
| 1806 | } |
Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1807 | EXPORT_SYMBOL(unmap_mapping_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1808 | |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1809 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1810 | { |
| 1811 | BUG(); |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1812 | return 0; |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1813 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1814 | EXPORT_SYMBOL(filemap_fault); |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1815 | |
Kirill A. Shutemov | f182036 | 2014-04-07 15:37:19 -0700 | [diff] [blame] | 1816 | void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 1817 | { |
| 1818 | BUG(); |
| 1819 | } |
| 1820 | EXPORT_SYMBOL(filemap_map_pages); |
| 1821 | |
Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 1822 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
| 1823 | unsigned long addr, void *buf, int len, int write) |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1824 | { |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1825 | struct vm_area_struct *vma; |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1826 | |
| 1827 | down_read(&mm->mmap_sem); |
| 1828 | |
| 1829 | /* the access must start within one of the target process's mappings */ |
David Howells | 0159b14 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 1830 | vma = find_vma(mm, addr); |
| 1831 | if (vma) { |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1832 | /* don't overrun this mapping */ |
| 1833 | if (addr + len >= vma->vm_end) |
| 1834 | len = vma->vm_end - addr; |
| 1835 | |
| 1836 | /* only read or write mappings where it is permitted */ |
David Howells | d00c7b99 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1837 | if (write && vma->vm_flags & VM_MAYWRITE) |
Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 1838 | copy_to_user_page(vma, NULL, addr, |
| 1839 | (void *) addr, buf, len); |
David Howells | d00c7b99 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1840 | else if (!write && vma->vm_flags & VM_MAYREAD) |
Jie Zhang | 7959722 | 2010-01-06 17:23:28 +0000 | [diff] [blame] | 1841 | copy_from_user_page(vma, NULL, addr, |
| 1842 | buf, (void *) addr, len); |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1843 | else |
| 1844 | len = 0; |
| 1845 | } else { |
| 1846 | len = 0; |
| 1847 | } |
| 1848 | |
| 1849 | up_read(&mm->mmap_sem); |
Mike Frysinger | f55f199 | 2011-03-29 14:05:12 +0100 | [diff] [blame] | 1850 | |
| 1851 | return len; |
| 1852 | } |
| 1853 | |
| 1854 | /** |
| 1855 | * @access_remote_vm - access another process' address space |
| 1856 | * @mm: the mm_struct of the target address space |
| 1857 | * @addr: start address to access |
| 1858 | * @buf: source or destination buffer |
| 1859 | * @len: number of bytes to transfer |
| 1860 | * @write: whether the access is a write |
| 1861 | * |
| 1862 | * The caller must hold a reference on @mm. |
| 1863 | */ |
| 1864 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 1865 | void *buf, int len, int write) |
| 1866 | { |
| 1867 | return __access_remote_vm(NULL, mm, addr, buf, len, write); |
| 1868 | } |
| 1869 | |
| 1870 | /* |
| 1871 | * Access another process' address space. |
| 1872 | * - source/target buffer must be kernel space |
| 1873 | */ |
| 1874 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) |
| 1875 | { |
| 1876 | struct mm_struct *mm; |
| 1877 | |
| 1878 | if (addr + len < addr) |
| 1879 | return 0; |
| 1880 | |
| 1881 | mm = get_task_mm(tsk); |
| 1882 | if (!mm) |
| 1883 | return 0; |
| 1884 | |
| 1885 | len = __access_remote_vm(tsk, mm, addr, buf, len, write); |
| 1886 | |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1887 | mmput(mm); |
| 1888 | return len; |
| 1889 | } |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1890 | |
| 1891 | /** |
| 1892 | * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode |
| 1893 | * @inode: The inode to check |
| 1894 | * @size: The current filesize of the inode |
| 1895 | * @newsize: The proposed filesize of the inode |
| 1896 | * |
| 1897 | * Check the shared mappings on an inode on behalf of a shrinking truncate to |
| 1898 | * make sure that that any outstanding VMAs aren't broken and then shrink the |
| 1899 | * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't |
| 1900 | * automatically grant mappings that are too large. |
| 1901 | */ |
| 1902 | int nommu_shrink_inode_mappings(struct inode *inode, size_t size, |
| 1903 | size_t newsize) |
| 1904 | { |
| 1905 | struct vm_area_struct *vma; |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1906 | struct vm_region *region; |
| 1907 | pgoff_t low, high; |
| 1908 | size_t r_size, r_top; |
| 1909 | |
| 1910 | low = newsize >> PAGE_SHIFT; |
| 1911 | high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1912 | |
| 1913 | down_write(&nommu_region_sem); |
Davidlohr Bueso | 1acf2e0 | 2014-12-12 16:54:39 -0800 | [diff] [blame] | 1914 | i_mmap_lock_read(inode->i_mapping); |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1915 | |
| 1916 | /* search for VMAs that fall within the dead zone */ |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 1917 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1918 | /* found one - only interested if it's shared out of the page |
| 1919 | * cache */ |
| 1920 | if (vma->vm_flags & VM_SHARED) { |
Davidlohr Bueso | 1acf2e0 | 2014-12-12 16:54:39 -0800 | [diff] [blame] | 1921 | i_mmap_unlock_read(inode->i_mapping); |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1922 | up_write(&nommu_region_sem); |
| 1923 | return -ETXTBSY; /* not quite true, but near enough */ |
| 1924 | } |
| 1925 | } |
| 1926 | |
| 1927 | /* reduce any regions that overlap the dead zone - if in existence, |
| 1928 | * these will be pointed to by VMAs that don't overlap the dead zone |
| 1929 | * |
| 1930 | * we don't check for any regions that start beyond the EOF as there |
| 1931 | * shouldn't be any |
| 1932 | */ |
Davidlohr Bueso | 1acf2e0 | 2014-12-12 16:54:39 -0800 | [diff] [blame] | 1933 | vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1934 | if (!(vma->vm_flags & VM_SHARED)) |
| 1935 | continue; |
| 1936 | |
| 1937 | region = vma->vm_region; |
| 1938 | r_size = region->vm_top - region->vm_start; |
| 1939 | r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; |
| 1940 | |
| 1941 | if (r_top > newsize) { |
| 1942 | region->vm_top -= r_top - newsize; |
| 1943 | if (region->vm_end > region->vm_top) |
| 1944 | region->vm_end = region->vm_top; |
| 1945 | } |
| 1946 | } |
| 1947 | |
Davidlohr Bueso | 1acf2e0 | 2014-12-12 16:54:39 -0800 | [diff] [blame] | 1948 | i_mmap_unlock_read(inode->i_mapping); |
David Howells | 7e66087 | 2010-01-15 17:01:39 -0800 | [diff] [blame] | 1949 | up_write(&nommu_region_sem); |
| 1950 | return 0; |
| 1951 | } |
Andrew Shewmaker | c9b1d09 | 2013-04-29 15:08:10 -0700 | [diff] [blame] | 1952 | |
| 1953 | /* |
| 1954 | * Initialise sysctl_user_reserve_kbytes. |
| 1955 | * |
| 1956 | * This is intended to prevent a user from starting a single memory hogging |
| 1957 | * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER |
| 1958 | * mode. |
| 1959 | * |
| 1960 | * The default value is min(3% of free memory, 128MB) |
| 1961 | * 128MB is enough to recover with sshd/login, bash, and top/kill. |
| 1962 | */ |
| 1963 | static int __meminit init_user_reserve(void) |
| 1964 | { |
| 1965 | unsigned long free_kbytes; |
| 1966 | |
| 1967 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
| 1968 | |
| 1969 | sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); |
| 1970 | return 0; |
| 1971 | } |
Paul Gortmaker | a4bc6fc | 2015-05-01 20:08:20 -0400 | [diff] [blame] | 1972 | subsys_initcall(init_user_reserve); |
Andrew Shewmaker | 4eeab4f | 2013-04-29 15:08:11 -0700 | [diff] [blame] | 1973 | |
| 1974 | /* |
| 1975 | * Initialise sysctl_admin_reserve_kbytes. |
| 1976 | * |
| 1977 | * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin |
| 1978 | * to log in and kill a memory hogging process. |
| 1979 | * |
| 1980 | * Systems with more than 256MB will reserve 8MB, enough to recover |
| 1981 | * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will |
| 1982 | * only reserve 3% of free pages by default. |
| 1983 | */ |
| 1984 | static int __meminit init_admin_reserve(void) |
| 1985 | { |
| 1986 | unsigned long free_kbytes; |
| 1987 | |
| 1988 | free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); |
| 1989 | |
| 1990 | sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); |
| 1991 | return 0; |
| 1992 | } |
Paul Gortmaker | a4bc6fc | 2015-05-01 20:08:20 -0400 | [diff] [blame] | 1993 | subsys_initcall(init_admin_reserve); |