Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/nommu.c |
| 3 | * |
| 4 | * Replacement code for mm functions to support CPU's that don't |
| 5 | * have any form of memory management unit (thus no virtual memory). |
| 6 | * |
| 7 | * See Documentation/nommu-mmap.txt |
| 8 | * |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 9 | * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> |
| 11 | * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> |
| 12 | * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> |
Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 13 | * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
| 15 | |
David Howells | f2b8544 | 2007-10-29 13:15:39 +0000 | [diff] [blame] | 16 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
| 18 | #include <linux/mman.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/file.h> |
| 21 | #include <linux/highmem.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/vmalloc.h> |
Roland McGrath | fa8e26c | 2008-07-25 19:45:50 -0700 | [diff] [blame] | 25 | #include <linux/tracehook.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/blkdev.h> |
| 27 | #include <linux/backing-dev.h> |
| 28 | #include <linux/mount.h> |
| 29 | #include <linux/personality.h> |
| 30 | #include <linux/security.h> |
| 31 | #include <linux/syscalls.h> |
| 32 | |
| 33 | #include <asm/uaccess.h> |
| 34 | #include <asm/tlb.h> |
| 35 | #include <asm/tlbflush.h> |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 36 | #include "internal.h" |
| 37 | |
| 38 | static inline __attribute__((format(printf, 1, 2))) |
| 39 | void no_printk(const char *fmt, ...) |
| 40 | { |
| 41 | } |
| 42 | |
| 43 | #if 0 |
| 44 | #define kenter(FMT, ...) \ |
| 45 | printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) |
| 46 | #define kleave(FMT, ...) \ |
| 47 | printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) |
| 48 | #define kdebug(FMT, ...) \ |
| 49 | printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) |
| 50 | #else |
| 51 | #define kenter(FMT, ...) \ |
| 52 | no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) |
| 53 | #define kleave(FMT, ...) \ |
| 54 | no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) |
| 55 | #define kdebug(FMT, ...) \ |
| 56 | no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) |
| 57 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 59 | #include "internal.h" |
| 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | void *high_memory; |
| 62 | struct page *mem_map; |
| 63 | unsigned long max_mapnr; |
| 64 | unsigned long num_physpages; |
Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 65 | atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ |
| 67 | int sysctl_overcommit_ratio = 50; /* default is 50% */ |
| 68 | int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 69 | int sysctl_nr_trim_pages = 1; /* page trimming behaviour */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | int heap_stack_gap = 0; |
| 71 | |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 72 | atomic_long_t mmap_pages_allocated; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 73 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | EXPORT_SYMBOL(mem_map); |
Wu, Bryan | 6a04de6 | 2007-04-11 23:28:47 -0700 | [diff] [blame] | 75 | EXPORT_SYMBOL(num_physpages); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 77 | /* list of mapped, potentially shareable regions */ |
| 78 | static struct kmem_cache *vm_region_jar; |
| 79 | struct rb_root nommu_region_tree = RB_ROOT; |
| 80 | DECLARE_RWSEM(nommu_region_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
| 82 | struct vm_operations_struct generic_file_vm_ops = { |
| 83 | }; |
| 84 | |
| 85 | /* |
| 86 | * Handle all mappings that got truncated by a "truncate()" |
| 87 | * system call. |
| 88 | * |
| 89 | * NOTE! We have to be ready to update the memory sharing |
| 90 | * between the file and the memory map for a potential last |
| 91 | * incomplete page. Ugly, but necessary. |
| 92 | */ |
| 93 | int vmtruncate(struct inode *inode, loff_t offset) |
| 94 | { |
| 95 | struct address_space *mapping = inode->i_mapping; |
| 96 | unsigned long limit; |
| 97 | |
| 98 | if (inode->i_size < offset) |
| 99 | goto do_expand; |
| 100 | i_size_write(inode, offset); |
| 101 | |
| 102 | truncate_inode_pages(mapping, offset); |
| 103 | goto out_truncate; |
| 104 | |
| 105 | do_expand: |
| 106 | limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; |
| 107 | if (limit != RLIM_INFINITY && offset > limit) |
| 108 | goto out_sig; |
| 109 | if (offset > inode->i_sb->s_maxbytes) |
| 110 | goto out; |
| 111 | i_size_write(inode, offset); |
| 112 | |
| 113 | out_truncate: |
Al Viro | acfa438 | 2008-12-04 10:06:33 -0500 | [diff] [blame] | 114 | if (inode->i_op->truncate) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | inode->i_op->truncate(inode); |
| 116 | return 0; |
| 117 | out_sig: |
| 118 | send_sig(SIGXFSZ, current, 0); |
| 119 | out: |
| 120 | return -EFBIG; |
| 121 | } |
| 122 | |
| 123 | EXPORT_SYMBOL(vmtruncate); |
| 124 | |
| 125 | /* |
| 126 | * Return the total memory allocated for this pointer, not |
| 127 | * just what the caller asked for. |
| 128 | * |
| 129 | * Doesn't have to be accurate, i.e. may have races. |
| 130 | */ |
| 131 | unsigned int kobjsize(const void *objp) |
| 132 | { |
| 133 | struct page *page; |
| 134 | |
Michael Hennerich | 4016a13 | 2008-04-28 02:13:38 -0700 | [diff] [blame] | 135 | /* |
| 136 | * If the object we have should not have ksize performed on it, |
| 137 | * return size of 0 |
| 138 | */ |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 139 | if (!objp || !virt_addr_valid(objp)) |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 140 | return 0; |
| 141 | |
| 142 | page = virt_to_head_page(objp); |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 143 | |
| 144 | /* |
| 145 | * If the allocator sets PageSlab, we know the pointer came from |
| 146 | * kmalloc(). |
| 147 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | if (PageSlab(page)) |
| 149 | return ksize(objp); |
| 150 | |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 151 | /* |
Paul Mundt | ab2e83e | 2009-01-08 12:04:48 +0000 | [diff] [blame] | 152 | * If it's not a compound page, see if we have a matching VMA |
| 153 | * region. This test is intentionally done in reverse order, |
| 154 | * so if there's no VMA, we still fall through and hand back |
| 155 | * PAGE_SIZE for 0-order pages. |
| 156 | */ |
| 157 | if (!PageCompound(page)) { |
| 158 | struct vm_area_struct *vma; |
| 159 | |
| 160 | vma = find_vma(current->mm, (unsigned long)objp); |
| 161 | if (vma) |
| 162 | return vma->vm_end - vma->vm_start; |
| 163 | } |
| 164 | |
| 165 | /* |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 166 | * The ksize() function is only guaranteed to work for pointers |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 167 | * returned by kmalloc(). So handle arbitrary pointers here. |
Paul Mundt | 6cfd53fc | 2008-06-05 22:46:08 -0700 | [diff] [blame] | 168 | */ |
Paul Mundt | 5a1603b | 2008-06-12 16:29:55 +0900 | [diff] [blame] | 169 | return PAGE_SIZE << compound_order(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 172 | int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 173 | unsigned long start, int len, int flags, |
| 174 | struct page **pages, struct vm_area_struct **vmas) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | { |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 176 | struct vm_area_struct *vma; |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 177 | unsigned long vm_flags; |
| 178 | int i; |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 179 | int write = !!(flags & GUP_FLAGS_WRITE); |
| 180 | int force = !!(flags & GUP_FLAGS_FORCE); |
| 181 | int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 182 | |
| 183 | /* calculate required read or write permissions. |
| 184 | * - if 'force' is set, we only require the "MAY" flags. |
| 185 | */ |
| 186 | vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); |
| 187 | vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | for (i = 0; i < len; i++) { |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 190 | vma = find_vma(mm, start); |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 191 | if (!vma) |
| 192 | goto finish_or_fault; |
| 193 | |
| 194 | /* protect what we can, including chardevs */ |
| 195 | if (vma->vm_flags & (VM_IO | VM_PFNMAP) || |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 196 | (!ignore && !(vm_flags & vma->vm_flags))) |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 197 | goto finish_or_fault; |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 198 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | if (pages) { |
| 200 | pages[i] = virt_to_page(start); |
| 201 | if (pages[i]) |
| 202 | page_cache_get(pages[i]); |
| 203 | } |
| 204 | if (vmas) |
Sonic Zhang | 910e46d | 2006-09-27 01:50:17 -0700 | [diff] [blame] | 205 | vmas[i] = vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | start += PAGE_SIZE; |
| 207 | } |
David Howells | 7b4d5b8 | 2006-09-27 01:50:18 -0700 | [diff] [blame] | 208 | |
| 209 | return i; |
| 210 | |
| 211 | finish_or_fault: |
| 212 | return i ? : -EFAULT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | } |
Nick Piggin | b291f00 | 2008-10-18 20:26:44 -0700 | [diff] [blame] | 214 | |
| 215 | |
| 216 | /* |
| 217 | * get a list of pages in an address range belonging to the specified process |
| 218 | * and indicate the VMA that covers each page |
| 219 | * - this is potentially dodgy as we may end incrementing the page count of a |
| 220 | * slab page or a secondary page from a compound page |
| 221 | * - don't permit access to VMAs that don't support it, such as I/O mappings |
| 222 | */ |
| 223 | int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
| 224 | unsigned long start, int len, int write, int force, |
| 225 | struct page **pages, struct vm_area_struct **vmas) |
| 226 | { |
| 227 | int flags = 0; |
| 228 | |
| 229 | if (write) |
| 230 | flags |= GUP_FLAGS_WRITE; |
| 231 | if (force) |
| 232 | flags |= GUP_FLAGS_FORCE; |
| 233 | |
| 234 | return __get_user_pages(tsk, mm, |
| 235 | start, len, flags, |
| 236 | pages, vmas); |
| 237 | } |
Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 238 | EXPORT_SYMBOL(get_user_pages); |
| 239 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | DEFINE_RWLOCK(vmlist_lock); |
| 241 | struct vm_struct *vmlist; |
| 242 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 243 | void vfree(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | { |
| 245 | kfree(addr); |
| 246 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 247 | EXPORT_SYMBOL(vfree); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 249 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | { |
| 251 | /* |
Robert P. J. Day | 8518609 | 2007-10-19 23:11:38 +0200 | [diff] [blame] | 252 | * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() |
| 253 | * returns only a logical address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | */ |
Nick Piggin | 8409751 | 2006-03-22 00:08:34 -0800 | [diff] [blame] | 255 | return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 257 | EXPORT_SYMBOL(__vmalloc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 259 | void *vmalloc_user(unsigned long size) |
| 260 | { |
| 261 | void *ret; |
| 262 | |
| 263 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, |
| 264 | PAGE_KERNEL); |
| 265 | if (ret) { |
| 266 | struct vm_area_struct *vma; |
| 267 | |
| 268 | down_write(¤t->mm->mmap_sem); |
| 269 | vma = find_vma(current->mm, (unsigned long)ret); |
| 270 | if (vma) |
| 271 | vma->vm_flags |= VM_USERMAP; |
| 272 | up_write(¤t->mm->mmap_sem); |
| 273 | } |
| 274 | |
| 275 | return ret; |
| 276 | } |
| 277 | EXPORT_SYMBOL(vmalloc_user); |
| 278 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 279 | struct page *vmalloc_to_page(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | { |
| 281 | return virt_to_page(addr); |
| 282 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 283 | EXPORT_SYMBOL(vmalloc_to_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 285 | unsigned long vmalloc_to_pfn(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | { |
| 287 | return page_to_pfn(virt_to_page(addr)); |
| 288 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 289 | EXPORT_SYMBOL(vmalloc_to_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | |
| 291 | long vread(char *buf, char *addr, unsigned long count) |
| 292 | { |
| 293 | memcpy(buf, addr, count); |
| 294 | return count; |
| 295 | } |
| 296 | |
| 297 | long vwrite(char *buf, char *addr, unsigned long count) |
| 298 | { |
| 299 | /* Don't allow overflow */ |
| 300 | if ((unsigned long) addr + count < count) |
| 301 | count = -(unsigned long) addr; |
| 302 | |
| 303 | memcpy(addr, buf, count); |
| 304 | return(count); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * vmalloc - allocate virtually continguos memory |
| 309 | * |
| 310 | * @size: allocation size |
| 311 | * |
| 312 | * Allocate enough pages to cover @size from the page level |
| 313 | * allocator and map them into continguos kernel virtual space. |
| 314 | * |
Michael Opdenacker | c1c8897 | 2006-10-03 23:21:02 +0200 | [diff] [blame] | 315 | * For tight control over page level allocator and protection flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | * use __vmalloc() instead. |
| 317 | */ |
| 318 | void *vmalloc(unsigned long size) |
| 319 | { |
| 320 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
| 321 | } |
Andrew Morton | f613888 | 2006-02-28 16:59:18 -0800 | [diff] [blame] | 322 | EXPORT_SYMBOL(vmalloc); |
| 323 | |
| 324 | void *vmalloc_node(unsigned long size, int node) |
| 325 | { |
| 326 | return vmalloc(size); |
| 327 | } |
| 328 | EXPORT_SYMBOL(vmalloc_node); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | |
Paul Mundt | 1af446e | 2008-08-04 16:01:47 +0900 | [diff] [blame] | 330 | #ifndef PAGE_KERNEL_EXEC |
| 331 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
| 332 | #endif |
| 333 | |
| 334 | /** |
| 335 | * vmalloc_exec - allocate virtually contiguous, executable memory |
| 336 | * @size: allocation size |
| 337 | * |
| 338 | * Kernel-internal function to allocate enough pages to cover @size |
| 339 | * the page level allocator and map them into contiguous and |
| 340 | * executable kernel virtual space. |
| 341 | * |
| 342 | * For tight control over page level allocator and protection flags |
| 343 | * use __vmalloc() instead. |
| 344 | */ |
| 345 | |
| 346 | void *vmalloc_exec(unsigned long size) |
| 347 | { |
| 348 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); |
| 349 | } |
| 350 | |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 351 | /** |
| 352 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | * @size: allocation size |
| 354 | * |
| 355 | * Allocate enough 32bit PA addressable pages to cover @size from the |
| 356 | * page level allocator and map them into continguos kernel virtual space. |
| 357 | */ |
| 358 | void *vmalloc_32(unsigned long size) |
| 359 | { |
| 360 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); |
| 361 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 362 | EXPORT_SYMBOL(vmalloc_32); |
| 363 | |
| 364 | /** |
| 365 | * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory |
| 366 | * @size: allocation size |
| 367 | * |
| 368 | * The resulting memory area is 32bit addressable and zeroed so it can be |
| 369 | * mapped to userspace without leaking data. |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 370 | * |
| 371 | * VM_USERMAP is set on the corresponding VMA so that subsequent calls to |
| 372 | * remap_vmalloc_range() are permissible. |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 373 | */ |
| 374 | void *vmalloc_32_user(unsigned long size) |
| 375 | { |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 376 | /* |
| 377 | * We'll have to sort out the ZONE_DMA bits for 64-bit, |
| 378 | * but for now this can simply use vmalloc_user() directly. |
| 379 | */ |
| 380 | return vmalloc_user(size); |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 381 | } |
| 382 | EXPORT_SYMBOL(vmalloc_32_user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | |
| 384 | void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) |
| 385 | { |
| 386 | BUG(); |
| 387 | return NULL; |
| 388 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 389 | EXPORT_SYMBOL(vmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
Christoph Lameter | b3bdda0 | 2008-02-04 22:28:32 -0800 | [diff] [blame] | 391 | void vunmap(const void *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { |
| 393 | BUG(); |
| 394 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 395 | EXPORT_SYMBOL(vunmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
Paul Mundt | eb6434d | 2009-01-21 17:45:47 +0900 | [diff] [blame] | 397 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
| 398 | { |
| 399 | BUG(); |
| 400 | return NULL; |
| 401 | } |
| 402 | EXPORT_SYMBOL(vm_map_ram); |
| 403 | |
| 404 | void vm_unmap_ram(const void *mem, unsigned int count) |
| 405 | { |
| 406 | BUG(); |
| 407 | } |
| 408 | EXPORT_SYMBOL(vm_unmap_ram); |
| 409 | |
| 410 | void vm_unmap_aliases(void) |
| 411 | { |
| 412 | } |
| 413 | EXPORT_SYMBOL_GPL(vm_unmap_aliases); |
| 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | /* |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 416 | * Implement a stub for vmalloc_sync_all() if the architecture chose not to |
| 417 | * have one. |
| 418 | */ |
| 419 | void __attribute__((weak)) vmalloc_sync_all(void) |
| 420 | { |
| 421 | } |
| 422 | |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 423 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, |
| 424 | struct page *page) |
| 425 | { |
| 426 | return -EINVAL; |
| 427 | } |
| 428 | EXPORT_SYMBOL(vm_insert_page); |
| 429 | |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 430 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | * sys_brk() for the most part doesn't need the global kernel |
| 432 | * lock, except when an application is doing something nasty |
| 433 | * like trying to un-brk an area that has already been mapped |
| 434 | * to a regular file. in this case, the unmapping will need |
| 435 | * to invoke file system routines that need the global lock. |
| 436 | */ |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 437 | SYSCALL_DEFINE1(brk, unsigned long, brk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | { |
| 439 | struct mm_struct *mm = current->mm; |
| 440 | |
| 441 | if (brk < mm->start_brk || brk > mm->context.end_brk) |
| 442 | return mm->brk; |
| 443 | |
| 444 | if (mm->brk == brk) |
| 445 | return mm->brk; |
| 446 | |
| 447 | /* |
| 448 | * Always allow shrinking brk |
| 449 | */ |
| 450 | if (brk <= mm->brk) { |
| 451 | mm->brk = brk; |
| 452 | return brk; |
| 453 | } |
| 454 | |
| 455 | /* |
| 456 | * Ok, looks good - let it rip. |
| 457 | */ |
| 458 | return mm->brk = brk; |
| 459 | } |
| 460 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 461 | /* |
| 462 | * initialise the VMA and region record slabs |
| 463 | */ |
| 464 | void __init mmap_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | { |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 466 | vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 467 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 469 | /* |
| 470 | * validate the region tree |
| 471 | * - the caller must hold the region lock |
| 472 | */ |
| 473 | #ifdef CONFIG_DEBUG_NOMMU_REGIONS |
| 474 | static noinline void validate_nommu_regions(void) |
| 475 | { |
| 476 | struct vm_region *region, *last; |
| 477 | struct rb_node *p, *lastp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 479 | lastp = rb_first(&nommu_region_tree); |
| 480 | if (!lastp) |
| 481 | return; |
| 482 | |
| 483 | last = rb_entry(lastp, struct vm_region, vm_rb); |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 484 | BUG_ON(unlikely(last->vm_end <= last->vm_start)); |
| 485 | BUG_ON(unlikely(last->vm_top < last->vm_end)); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 486 | |
| 487 | while ((p = rb_next(lastp))) { |
| 488 | region = rb_entry(p, struct vm_region, vm_rb); |
| 489 | last = rb_entry(lastp, struct vm_region, vm_rb); |
| 490 | |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 491 | BUG_ON(unlikely(region->vm_end <= region->vm_start)); |
| 492 | BUG_ON(unlikely(region->vm_top < region->vm_end)); |
| 493 | BUG_ON(unlikely(region->vm_start < last->vm_top)); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 494 | |
| 495 | lastp = p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | } |
| 497 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 498 | #else |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 499 | static void validate_nommu_regions(void) |
| 500 | { |
| 501 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 502 | #endif |
| 503 | |
| 504 | /* |
| 505 | * add a region into the global tree |
| 506 | */ |
| 507 | static void add_nommu_region(struct vm_region *region) |
| 508 | { |
| 509 | struct vm_region *pregion; |
| 510 | struct rb_node **p, *parent; |
| 511 | |
| 512 | validate_nommu_regions(); |
| 513 | |
| 514 | BUG_ON(region->vm_start & ~PAGE_MASK); |
| 515 | |
| 516 | parent = NULL; |
| 517 | p = &nommu_region_tree.rb_node; |
| 518 | while (*p) { |
| 519 | parent = *p; |
| 520 | pregion = rb_entry(parent, struct vm_region, vm_rb); |
| 521 | if (region->vm_start < pregion->vm_start) |
| 522 | p = &(*p)->rb_left; |
| 523 | else if (region->vm_start > pregion->vm_start) |
| 524 | p = &(*p)->rb_right; |
| 525 | else if (pregion == region) |
| 526 | return; |
| 527 | else |
| 528 | BUG(); |
| 529 | } |
| 530 | |
| 531 | rb_link_node(®ion->vm_rb, parent, p); |
| 532 | rb_insert_color(®ion->vm_rb, &nommu_region_tree); |
| 533 | |
| 534 | validate_nommu_regions(); |
| 535 | } |
| 536 | |
| 537 | /* |
| 538 | * delete a region from the global tree |
| 539 | */ |
| 540 | static void delete_nommu_region(struct vm_region *region) |
| 541 | { |
| 542 | BUG_ON(!nommu_region_tree.rb_node); |
| 543 | |
| 544 | validate_nommu_regions(); |
| 545 | rb_erase(®ion->vm_rb, &nommu_region_tree); |
| 546 | validate_nommu_regions(); |
| 547 | } |
| 548 | |
| 549 | /* |
| 550 | * free a contiguous series of pages |
| 551 | */ |
| 552 | static void free_page_series(unsigned long from, unsigned long to) |
| 553 | { |
| 554 | for (; from < to; from += PAGE_SIZE) { |
| 555 | struct page *page = virt_to_page(from); |
| 556 | |
| 557 | kdebug("- free %lx", from); |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 558 | atomic_long_dec(&mmap_pages_allocated); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 559 | if (page_count(page) != 1) |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 560 | kdebug("free page %p: refcount not one: %d", |
| 561 | page, page_count(page)); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 562 | put_page(page); |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * release a reference to a region |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 568 | * - the caller must hold the region semaphore for writing, which this releases |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 569 | * - the region may not have been added to the tree yet, in which case vm_top |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 570 | * will equal vm_start |
| 571 | */ |
| 572 | static void __put_nommu_region(struct vm_region *region) |
| 573 | __releases(nommu_region_sem) |
| 574 | { |
| 575 | kenter("%p{%d}", region, atomic_read(®ion->vm_usage)); |
| 576 | |
| 577 | BUG_ON(!nommu_region_tree.rb_node); |
| 578 | |
| 579 | if (atomic_dec_and_test(®ion->vm_usage)) { |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 580 | if (region->vm_top > region->vm_start) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 581 | delete_nommu_region(region); |
| 582 | up_write(&nommu_region_sem); |
| 583 | |
| 584 | if (region->vm_file) |
| 585 | fput(region->vm_file); |
| 586 | |
| 587 | /* IO memory and memory shared directly out of the pagecache |
| 588 | * from ramfs/tmpfs mustn't be released here */ |
| 589 | if (region->vm_flags & VM_MAPPED_COPY) { |
| 590 | kdebug("free series"); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 591 | free_page_series(region->vm_start, region->vm_top); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 592 | } |
| 593 | kmem_cache_free(vm_region_jar, region); |
| 594 | } else { |
| 595 | up_write(&nommu_region_sem); |
| 596 | } |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * release a reference to a region |
| 601 | */ |
| 602 | static void put_nommu_region(struct vm_region *region) |
| 603 | { |
| 604 | down_write(&nommu_region_sem); |
| 605 | __put_nommu_region(region); |
| 606 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 608 | /* |
| 609 | * add a VMA into a process's mm_struct in the appropriate place in the list |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 610 | * and tree and add to the address space's page tree also if not an anonymous |
| 611 | * page |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 612 | * - should be called with mm->mmap_sem held writelocked |
| 613 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 614 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 615 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 616 | struct vm_area_struct *pvma, **pp; |
| 617 | struct address_space *mapping; |
| 618 | struct rb_node **p, *parent; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 619 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 620 | kenter(",%p", vma); |
| 621 | |
| 622 | BUG_ON(!vma->vm_region); |
| 623 | |
| 624 | mm->map_count++; |
| 625 | vma->vm_mm = mm; |
| 626 | |
| 627 | /* add the VMA to the mapping */ |
| 628 | if (vma->vm_file) { |
| 629 | mapping = vma->vm_file->f_mapping; |
| 630 | |
| 631 | flush_dcache_mmap_lock(mapping); |
| 632 | vma_prio_tree_insert(vma, &mapping->i_mmap); |
| 633 | flush_dcache_mmap_unlock(mapping); |
| 634 | } |
| 635 | |
| 636 | /* add the VMA to the tree */ |
| 637 | parent = NULL; |
| 638 | p = &mm->mm_rb.rb_node; |
| 639 | while (*p) { |
| 640 | parent = *p; |
| 641 | pvma = rb_entry(parent, struct vm_area_struct, vm_rb); |
| 642 | |
| 643 | /* sort by: start addr, end addr, VMA struct addr in that order |
| 644 | * (the latter is necessary as we may get identical VMAs) */ |
| 645 | if (vma->vm_start < pvma->vm_start) |
| 646 | p = &(*p)->rb_left; |
| 647 | else if (vma->vm_start > pvma->vm_start) |
| 648 | p = &(*p)->rb_right; |
| 649 | else if (vma->vm_end < pvma->vm_end) |
| 650 | p = &(*p)->rb_left; |
| 651 | else if (vma->vm_end > pvma->vm_end) |
| 652 | p = &(*p)->rb_right; |
| 653 | else if (vma < pvma) |
| 654 | p = &(*p)->rb_left; |
| 655 | else if (vma > pvma) |
| 656 | p = &(*p)->rb_right; |
| 657 | else |
| 658 | BUG(); |
| 659 | } |
| 660 | |
| 661 | rb_link_node(&vma->vm_rb, parent, p); |
| 662 | rb_insert_color(&vma->vm_rb, &mm->mm_rb); |
| 663 | |
| 664 | /* add VMA to the VMA list also */ |
| 665 | for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) { |
| 666 | if (pvma->vm_start > vma->vm_start) |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 667 | break; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 668 | if (pvma->vm_start < vma->vm_start) |
| 669 | continue; |
| 670 | if (pvma->vm_end < vma->vm_end) |
| 671 | break; |
| 672 | } |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 673 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 674 | vma->vm_next = *pp; |
| 675 | *pp = vma; |
| 676 | } |
| 677 | |
| 678 | /* |
| 679 | * delete a VMA from its owning mm_struct and address space |
| 680 | */ |
| 681 | static void delete_vma_from_mm(struct vm_area_struct *vma) |
| 682 | { |
| 683 | struct vm_area_struct **pp; |
| 684 | struct address_space *mapping; |
| 685 | struct mm_struct *mm = vma->vm_mm; |
| 686 | |
| 687 | kenter("%p", vma); |
| 688 | |
| 689 | mm->map_count--; |
| 690 | if (mm->mmap_cache == vma) |
| 691 | mm->mmap_cache = NULL; |
| 692 | |
| 693 | /* remove the VMA from the mapping */ |
| 694 | if (vma->vm_file) { |
| 695 | mapping = vma->vm_file->f_mapping; |
| 696 | |
| 697 | flush_dcache_mmap_lock(mapping); |
| 698 | vma_prio_tree_remove(vma, &mapping->i_mmap); |
| 699 | flush_dcache_mmap_unlock(mapping); |
| 700 | } |
| 701 | |
| 702 | /* remove from the MM's tree and list */ |
| 703 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
| 704 | for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) { |
| 705 | if (*pp == vma) { |
| 706 | *pp = vma->vm_next; |
| 707 | break; |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | vma->vm_mm = NULL; |
| 712 | } |
| 713 | |
| 714 | /* |
| 715 | * destroy a VMA record |
| 716 | */ |
| 717 | static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) |
| 718 | { |
| 719 | kenter("%p", vma); |
| 720 | if (vma->vm_ops && vma->vm_ops->close) |
| 721 | vma->vm_ops->close(vma); |
| 722 | if (vma->vm_file) { |
| 723 | fput(vma->vm_file); |
| 724 | if (vma->vm_flags & VM_EXECUTABLE) |
| 725 | removed_exe_file_vma(mm); |
| 726 | } |
| 727 | put_nommu_region(vma->vm_region); |
| 728 | kmem_cache_free(vm_area_cachep, vma); |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | /* |
| 732 | * look up the first VMA in which addr resides, NULL if none |
| 733 | * - should be called with mm->mmap_sem at least held readlocked |
| 734 | */ |
| 735 | struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) |
| 736 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 737 | struct vm_area_struct *vma; |
| 738 | struct rb_node *n = mm->mm_rb.rb_node; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 739 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 740 | /* check the cache first */ |
| 741 | vma = mm->mmap_cache; |
| 742 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) |
| 743 | return vma; |
| 744 | |
| 745 | /* trawl the tree (there may be multiple mappings in which addr |
| 746 | * resides) */ |
| 747 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { |
| 748 | vma = rb_entry(n, struct vm_area_struct, vm_rb); |
| 749 | if (vma->vm_start > addr) |
| 750 | return NULL; |
| 751 | if (vma->vm_end > addr) { |
| 752 | mm->mmap_cache = vma; |
| 753 | return vma; |
| 754 | } |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 755 | } |
| 756 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 757 | return NULL; |
| 758 | } |
| 759 | EXPORT_SYMBOL(find_vma); |
| 760 | |
| 761 | /* |
David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 762 | * find a VMA |
| 763 | * - we don't extend stack VMAs under NOMMU conditions |
| 764 | */ |
| 765 | struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) |
| 766 | { |
| 767 | return find_vma(mm, addr); |
| 768 | } |
| 769 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 770 | /* |
| 771 | * expand a stack to a given address |
| 772 | * - not supported under NOMMU conditions |
| 773 | */ |
Greg Ungerer | 57c8f63 | 2007-07-15 23:38:28 -0700 | [diff] [blame] | 774 | int expand_stack(struct vm_area_struct *vma, unsigned long address) |
| 775 | { |
| 776 | return -ENOMEM; |
| 777 | } |
| 778 | |
David Howells | 930e652 | 2006-09-27 01:50:22 -0700 | [diff] [blame] | 779 | /* |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 780 | * look up the first VMA exactly that exactly matches addr |
| 781 | * - should be called with mm->mmap_sem at least held readlocked |
| 782 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 783 | static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, |
| 784 | unsigned long addr, |
| 785 | unsigned long len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | { |
| 787 | struct vm_area_struct *vma; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 788 | struct rb_node *n = mm->mm_rb.rb_node; |
| 789 | unsigned long end = addr + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 791 | /* check the cache first */ |
| 792 | vma = mm->mmap_cache; |
| 793 | if (vma && vma->vm_start == addr && vma->vm_end == end) |
| 794 | return vma; |
| 795 | |
| 796 | /* trawl the tree (there may be multiple mappings in which addr |
| 797 | * resides) */ |
| 798 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | vma = rb_entry(n, struct vm_area_struct, vm_rb); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 800 | if (vma->vm_start < addr) |
| 801 | continue; |
| 802 | if (vma->vm_start > addr) |
| 803 | return NULL; |
| 804 | if (vma->vm_end == end) { |
| 805 | mm->mmap_cache = vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | return vma; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 807 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | } |
| 809 | |
| 810 | return NULL; |
| 811 | } |
| 812 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 813 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | * determine whether a mapping should be permitted and, if so, what sort of |
| 815 | * mapping we're capable of supporting |
| 816 | */ |
| 817 | static int validate_mmap_request(struct file *file, |
| 818 | unsigned long addr, |
| 819 | unsigned long len, |
| 820 | unsigned long prot, |
| 821 | unsigned long flags, |
| 822 | unsigned long pgoff, |
| 823 | unsigned long *_capabilities) |
| 824 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 825 | unsigned long capabilities, rlen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | unsigned long reqprot = prot; |
| 827 | int ret; |
| 828 | |
| 829 | /* do the simple checks first */ |
| 830 | if (flags & MAP_FIXED || addr) { |
| 831 | printk(KERN_DEBUG |
| 832 | "%d: Can't do fixed-address/overlay mmap of RAM\n", |
| 833 | current->pid); |
| 834 | return -EINVAL; |
| 835 | } |
| 836 | |
| 837 | if ((flags & MAP_TYPE) != MAP_PRIVATE && |
| 838 | (flags & MAP_TYPE) != MAP_SHARED) |
| 839 | return -EINVAL; |
| 840 | |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 841 | if (!len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | return -EINVAL; |
| 843 | |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 844 | /* Careful about overflows.. */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 845 | rlen = PAGE_ALIGN(len); |
| 846 | if (!rlen || rlen > TASK_SIZE) |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 847 | return -ENOMEM; |
| 848 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | /* offset overflow? */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 850 | if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff) |
Mike Frysinger | f81cff0d | 2006-12-06 12:02:59 +1000 | [diff] [blame] | 851 | return -EOVERFLOW; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 852 | |
| 853 | if (file) { |
| 854 | /* validate file mapping requests */ |
| 855 | struct address_space *mapping; |
| 856 | |
| 857 | /* files must support mmap */ |
| 858 | if (!file->f_op || !file->f_op->mmap) |
| 859 | return -ENODEV; |
| 860 | |
| 861 | /* work out if what we've got could possibly be shared |
| 862 | * - we support chardevs that provide their own "memory" |
| 863 | * - we support files/blockdevs that are memory backed |
| 864 | */ |
| 865 | mapping = file->f_mapping; |
| 866 | if (!mapping) |
Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 867 | mapping = file->f_path.dentry->d_inode->i_mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | |
| 869 | capabilities = 0; |
| 870 | if (mapping && mapping->backing_dev_info) |
| 871 | capabilities = mapping->backing_dev_info->capabilities; |
| 872 | |
| 873 | if (!capabilities) { |
| 874 | /* no explicit capabilities set, so assume some |
| 875 | * defaults */ |
Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 876 | switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | case S_IFREG: |
| 878 | case S_IFBLK: |
| 879 | capabilities = BDI_CAP_MAP_COPY; |
| 880 | break; |
| 881 | |
| 882 | case S_IFCHR: |
| 883 | capabilities = |
| 884 | BDI_CAP_MAP_DIRECT | |
| 885 | BDI_CAP_READ_MAP | |
| 886 | BDI_CAP_WRITE_MAP; |
| 887 | break; |
| 888 | |
| 889 | default: |
| 890 | return -EINVAL; |
| 891 | } |
| 892 | } |
| 893 | |
| 894 | /* eliminate any capabilities that we can't support on this |
| 895 | * device */ |
| 896 | if (!file->f_op->get_unmapped_area) |
| 897 | capabilities &= ~BDI_CAP_MAP_DIRECT; |
| 898 | if (!file->f_op->read) |
| 899 | capabilities &= ~BDI_CAP_MAP_COPY; |
| 900 | |
| 901 | if (flags & MAP_SHARED) { |
| 902 | /* do checks for writing, appending and locking */ |
| 903 | if ((prot & PROT_WRITE) && |
| 904 | !(file->f_mode & FMODE_WRITE)) |
| 905 | return -EACCES; |
| 906 | |
Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 907 | if (IS_APPEND(file->f_path.dentry->d_inode) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | (file->f_mode & FMODE_WRITE)) |
| 909 | return -EACCES; |
| 910 | |
Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 911 | if (locks_verify_locked(file->f_path.dentry->d_inode)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | return -EAGAIN; |
| 913 | |
| 914 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) |
| 915 | return -ENODEV; |
| 916 | |
| 917 | if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || |
| 918 | ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || |
| 919 | ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) |
| 920 | ) { |
| 921 | printk("MAP_SHARED not completely supported on !MMU\n"); |
| 922 | return -EINVAL; |
| 923 | } |
| 924 | |
| 925 | /* we mustn't privatise shared mappings */ |
| 926 | capabilities &= ~BDI_CAP_MAP_COPY; |
| 927 | } |
| 928 | else { |
| 929 | /* we're going to read the file into private memory we |
| 930 | * allocate */ |
| 931 | if (!(capabilities & BDI_CAP_MAP_COPY)) |
| 932 | return -ENODEV; |
| 933 | |
| 934 | /* we don't permit a private writable mapping to be |
| 935 | * shared with the backing device */ |
| 936 | if (prot & PROT_WRITE) |
| 937 | capabilities &= ~BDI_CAP_MAP_DIRECT; |
| 938 | } |
| 939 | |
| 940 | /* handle executable mappings and implied executable |
| 941 | * mappings */ |
Josef Sipek | e9536ae | 2006-12-08 02:37:21 -0800 | [diff] [blame] | 942 | if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | if (prot & PROT_EXEC) |
| 944 | return -EPERM; |
| 945 | } |
| 946 | else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { |
| 947 | /* handle implication of PROT_EXEC by PROT_READ */ |
| 948 | if (current->personality & READ_IMPLIES_EXEC) { |
| 949 | if (capabilities & BDI_CAP_EXEC_MAP) |
| 950 | prot |= PROT_EXEC; |
| 951 | } |
| 952 | } |
| 953 | else if ((prot & PROT_READ) && |
| 954 | (prot & PROT_EXEC) && |
| 955 | !(capabilities & BDI_CAP_EXEC_MAP) |
| 956 | ) { |
| 957 | /* backing file is not executable, try to copy */ |
| 958 | capabilities &= ~BDI_CAP_MAP_DIRECT; |
| 959 | } |
| 960 | } |
| 961 | else { |
| 962 | /* anonymous mappings are always memory backed and can be |
| 963 | * privately mapped |
| 964 | */ |
| 965 | capabilities = BDI_CAP_MAP_COPY; |
| 966 | |
| 967 | /* handle PROT_EXEC implication by PROT_READ */ |
| 968 | if ((prot & PROT_READ) && |
| 969 | (current->personality & READ_IMPLIES_EXEC)) |
| 970 | prot |= PROT_EXEC; |
| 971 | } |
| 972 | |
| 973 | /* allow the security API to have its say */ |
Eric Paris | ed03218 | 2007-06-28 15:55:21 -0400 | [diff] [blame] | 974 | ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 975 | if (ret < 0) |
| 976 | return ret; |
| 977 | |
| 978 | /* looks okay */ |
| 979 | *_capabilities = capabilities; |
| 980 | return 0; |
| 981 | } |
| 982 | |
| 983 | /* |
| 984 | * we've determined that we can make the mapping, now translate what we |
| 985 | * now know into VMA flags |
| 986 | */ |
| 987 | static unsigned long determine_vm_flags(struct file *file, |
| 988 | unsigned long prot, |
| 989 | unsigned long flags, |
| 990 | unsigned long capabilities) |
| 991 | { |
| 992 | unsigned long vm_flags; |
| 993 | |
| 994 | vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); |
| 995 | vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; |
| 996 | /* vm_flags |= mm->def_flags; */ |
| 997 | |
| 998 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) { |
| 999 | /* attempt to share read-only copies of mapped file chunks */ |
| 1000 | if (file && !(prot & PROT_WRITE)) |
| 1001 | vm_flags |= VM_MAYSHARE; |
| 1002 | } |
| 1003 | else { |
| 1004 | /* overlay a shareable mapping on the backing device or inode |
| 1005 | * if possible - used for chardevs, ramfs/tmpfs/shmfs and |
| 1006 | * romfs/cramfs */ |
| 1007 | if (flags & MAP_SHARED) |
| 1008 | vm_flags |= VM_MAYSHARE | VM_SHARED; |
| 1009 | else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0) |
| 1010 | vm_flags |= VM_MAYSHARE; |
| 1011 | } |
| 1012 | |
| 1013 | /* refuse to let anyone share private mappings with this process if |
| 1014 | * it's being traced - otherwise breakpoints set in it may interfere |
| 1015 | * with another untraced process |
| 1016 | */ |
Roland McGrath | fa8e26c | 2008-07-25 19:45:50 -0700 | [diff] [blame] | 1017 | if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | vm_flags &= ~VM_MAYSHARE; |
| 1019 | |
| 1020 | return vm_flags; |
| 1021 | } |
| 1022 | |
| 1023 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1024 | * set up a shared mapping on a file (the driver or filesystem provides and |
| 1025 | * pins the storage) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1026 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1027 | static int do_mmap_shared_file(struct vm_area_struct *vma) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | { |
| 1029 | int ret; |
| 1030 | |
| 1031 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1032 | if (ret == 0) { |
| 1033 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
| 1034 | return ret; |
| 1035 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1036 | if (ret != -ENOSYS) |
| 1037 | return ret; |
| 1038 | |
| 1039 | /* getting an ENOSYS error indicates that direct mmap isn't |
| 1040 | * possible (as opposed to tried but failed) so we'll fall |
| 1041 | * through to making a private copy of the data and mapping |
| 1042 | * that if we can */ |
| 1043 | return -ENODEV; |
| 1044 | } |
| 1045 | |
| 1046 | /* |
| 1047 | * set up a private mapping or an anonymous shared mapping |
| 1048 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1049 | static int do_mmap_private(struct vm_area_struct *vma, |
| 1050 | struct vm_region *region, |
| 1051 | unsigned long len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1053 | struct page *pages; |
| 1054 | unsigned long total, point, n, rlen; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | void *base; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1056 | int ret, order; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | |
| 1058 | /* invoke the file's mapping function so that it can keep track of |
| 1059 | * shared mappings on devices or memory |
| 1060 | * - VM_MAYSHARE will be set if it may attempt to share |
| 1061 | */ |
| 1062 | if (vma->vm_file) { |
| 1063 | ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1064 | if (ret == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1065 | /* shouldn't return success if we're not sharing */ |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1066 | BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); |
| 1067 | vma->vm_region->vm_top = vma->vm_region->vm_end; |
| 1068 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | } |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1070 | if (ret != -ENOSYS) |
| 1071 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | |
| 1073 | /* getting an ENOSYS error indicates that direct mmap isn't |
| 1074 | * possible (as opposed to tried but failed) so we'll try to |
| 1075 | * make a private copy of the data and map that instead */ |
| 1076 | } |
| 1077 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1078 | rlen = PAGE_ALIGN(len); |
| 1079 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | /* allocate some memory to hold the mapping |
| 1081 | * - note that this may not return a page-aligned address if the object |
| 1082 | * we're allocating is smaller than a page |
| 1083 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1084 | order = get_order(rlen); |
| 1085 | kdebug("alloc order %d for %lx", order, len); |
| 1086 | |
| 1087 | pages = alloc_pages(GFP_KERNEL, order); |
| 1088 | if (!pages) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | goto enomem; |
| 1090 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1091 | total = 1 << order; |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1092 | atomic_long_add(total, &mmap_pages_allocated); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1094 | point = rlen >> PAGE_SHIFT; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1095 | |
| 1096 | /* we allocated a power-of-2 sized page set, so we may want to trim off |
| 1097 | * the excess */ |
| 1098 | if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { |
| 1099 | while (total > point) { |
| 1100 | order = ilog2(total - point); |
| 1101 | n = 1 << order; |
| 1102 | kdebug("shave %lu/%lu @%lu", n, total - point, total); |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1103 | atomic_long_sub(n, &mmap_pages_allocated); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1104 | total -= n; |
| 1105 | set_page_refcounted(pages + total); |
| 1106 | __free_pages(pages + total, order); |
| 1107 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1108 | } |
| 1109 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1110 | for (point = 1; point < total; point++) |
| 1111 | set_page_refcounted(&pages[point]); |
| 1112 | |
| 1113 | base = page_address(pages); |
| 1114 | region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; |
| 1115 | region->vm_start = (unsigned long) base; |
| 1116 | region->vm_end = region->vm_start + rlen; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1117 | region->vm_top = region->vm_start + (total << PAGE_SHIFT); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1118 | |
| 1119 | vma->vm_start = region->vm_start; |
| 1120 | vma->vm_end = region->vm_start + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | |
| 1122 | if (vma->vm_file) { |
| 1123 | /* read the contents of a file into the copy */ |
| 1124 | mm_segment_t old_fs; |
| 1125 | loff_t fpos; |
| 1126 | |
| 1127 | fpos = vma->vm_pgoff; |
| 1128 | fpos <<= PAGE_SHIFT; |
| 1129 | |
| 1130 | old_fs = get_fs(); |
| 1131 | set_fs(KERNEL_DS); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1132 | ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | set_fs(old_fs); |
| 1134 | |
| 1135 | if (ret < 0) |
| 1136 | goto error_free; |
| 1137 | |
| 1138 | /* clear the last little bit */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1139 | if (ret < rlen) |
| 1140 | memset(base + ret, 0, rlen - ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | |
| 1142 | } else { |
| 1143 | /* if it's an anonymous mapping, then just clear it */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1144 | memset(base, 0, rlen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | return 0; |
| 1148 | |
| 1149 | error_free: |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1150 | free_page_series(region->vm_start, region->vm_end); |
| 1151 | region->vm_start = vma->vm_start = 0; |
| 1152 | region->vm_end = vma->vm_end = 0; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1153 | region->vm_top = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1154 | return ret; |
| 1155 | |
| 1156 | enomem: |
Greg Ungerer | 05ae6fa | 2009-01-13 17:30:22 +1000 | [diff] [blame] | 1157 | printk("Allocation of length %lu from process %d (%s) failed\n", |
| 1158 | len, current->pid, current->comm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | show_free_areas(); |
| 1160 | return -ENOMEM; |
| 1161 | } |
| 1162 | |
| 1163 | /* |
| 1164 | * handle mapping creation for uClinux |
| 1165 | */ |
| 1166 | unsigned long do_mmap_pgoff(struct file *file, |
| 1167 | unsigned long addr, |
| 1168 | unsigned long len, |
| 1169 | unsigned long prot, |
| 1170 | unsigned long flags, |
| 1171 | unsigned long pgoff) |
| 1172 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1173 | struct vm_area_struct *vma; |
| 1174 | struct vm_region *region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1175 | struct rb_node *rb; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1176 | unsigned long capabilities, vm_flags, result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | int ret; |
| 1178 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1179 | kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff); |
| 1180 | |
Eric Paris | 7cd9414 | 2007-11-26 18:47:40 -0500 | [diff] [blame] | 1181 | if (!(flags & MAP_FIXED)) |
| 1182 | addr = round_hint_to_min(addr); |
| 1183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1184 | /* decide whether we should attempt the mapping, and if so what sort of |
| 1185 | * mapping */ |
| 1186 | ret = validate_mmap_request(file, addr, len, prot, flags, pgoff, |
| 1187 | &capabilities); |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1188 | if (ret < 0) { |
| 1189 | kleave(" = %d [val]", ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | return ret; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1191 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | |
| 1193 | /* we've determined that we can make the mapping, now translate what we |
| 1194 | * now know into VMA flags */ |
| 1195 | vm_flags = determine_vm_flags(file, prot, flags, capabilities); |
| 1196 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1197 | /* we're going to need to record the mapping */ |
| 1198 | region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); |
| 1199 | if (!region) |
| 1200 | goto error_getting_region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1201 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1202 | vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); |
| 1203 | if (!vma) |
| 1204 | goto error_getting_vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1205 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1206 | atomic_set(®ion->vm_usage, 1); |
| 1207 | region->vm_flags = vm_flags; |
| 1208 | region->vm_pgoff = pgoff; |
| 1209 | |
| 1210 | INIT_LIST_HEAD(&vma->anon_vma_node); |
| 1211 | vma->vm_flags = vm_flags; |
| 1212 | vma->vm_pgoff = pgoff; |
| 1213 | |
| 1214 | if (file) { |
| 1215 | region->vm_file = file; |
| 1216 | get_file(file); |
| 1217 | vma->vm_file = file; |
| 1218 | get_file(file); |
| 1219 | if (vm_flags & VM_EXECUTABLE) { |
| 1220 | added_exe_file_vma(current->mm); |
| 1221 | vma->vm_mm = current->mm; |
| 1222 | } |
| 1223 | } |
| 1224 | |
| 1225 | down_write(&nommu_region_sem); |
| 1226 | |
| 1227 | /* if we want to share, we need to check for regions created by other |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1228 | * mmap() calls that overlap with our proposed mapping |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1229 | * - we can only share with a superset match on most regular files |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | * - shared mappings on character devices and memory backed files are |
| 1231 | * permitted to overlap inexactly as far as we are concerned for in |
| 1232 | * these cases, sharing is handled in the driver or filesystem rather |
| 1233 | * than here |
| 1234 | */ |
| 1235 | if (vm_flags & VM_MAYSHARE) { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1236 | struct vm_region *pregion; |
| 1237 | unsigned long pglen, rpglen, pgend, rpgend, start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1239 | pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1240 | pgend = pgoff + pglen; |
David Howells | 165b239 | 2007-03-22 00:11:24 -0800 | [diff] [blame] | 1241 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1242 | for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) { |
| 1243 | pregion = rb_entry(rb, struct vm_region, vm_rb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1244 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1245 | if (!(pregion->vm_flags & VM_MAYSHARE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1246 | continue; |
| 1247 | |
| 1248 | /* search for overlapping mappings on the same file */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1249 | if (pregion->vm_file->f_path.dentry->d_inode != |
| 1250 | file->f_path.dentry->d_inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | continue; |
| 1252 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1253 | if (pregion->vm_pgoff >= pgend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | continue; |
| 1255 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1256 | rpglen = pregion->vm_end - pregion->vm_start; |
| 1257 | rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1258 | rpgend = pregion->vm_pgoff + rpglen; |
| 1259 | if (pgoff >= rpgend) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | continue; |
| 1261 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1262 | /* handle inexactly overlapping matches between |
| 1263 | * mappings */ |
| 1264 | if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && |
| 1265 | !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { |
| 1266 | /* new mapping is not a subset of the region */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | if (!(capabilities & BDI_CAP_MAP_DIRECT)) |
| 1268 | goto sharing_violation; |
| 1269 | continue; |
| 1270 | } |
| 1271 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1272 | /* we've found a region we can share */ |
| 1273 | atomic_inc(&pregion->vm_usage); |
| 1274 | vma->vm_region = pregion; |
| 1275 | start = pregion->vm_start; |
| 1276 | start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; |
| 1277 | vma->vm_start = start; |
| 1278 | vma->vm_end = start + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1280 | if (pregion->vm_flags & VM_MAPPED_COPY) { |
| 1281 | kdebug("share copy"); |
| 1282 | vma->vm_flags |= VM_MAPPED_COPY; |
| 1283 | } else { |
| 1284 | kdebug("share mmap"); |
| 1285 | ret = do_mmap_shared_file(vma); |
| 1286 | if (ret < 0) { |
| 1287 | vma->vm_region = NULL; |
| 1288 | vma->vm_start = 0; |
| 1289 | vma->vm_end = 0; |
| 1290 | atomic_dec(&pregion->vm_usage); |
| 1291 | pregion = NULL; |
| 1292 | goto error_just_free; |
| 1293 | } |
| 1294 | } |
| 1295 | fput(region->vm_file); |
| 1296 | kmem_cache_free(vm_region_jar, region); |
| 1297 | region = pregion; |
| 1298 | result = start; |
| 1299 | goto share; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | } |
| 1301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | /* obtain the address at which to make a shared mapping |
| 1303 | * - this is the hook for quasi-memory character devices to |
| 1304 | * tell us the location of a shared mapping |
| 1305 | */ |
| 1306 | if (file && file->f_op->get_unmapped_area) { |
| 1307 | addr = file->f_op->get_unmapped_area(file, addr, len, |
| 1308 | pgoff, flags); |
| 1309 | if (IS_ERR((void *) addr)) { |
| 1310 | ret = addr; |
| 1311 | if (ret != (unsigned long) -ENOSYS) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1312 | goto error_just_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1313 | |
| 1314 | /* the driver refused to tell us where to site |
| 1315 | * the mapping so we'll have to attempt to copy |
| 1316 | * it */ |
| 1317 | ret = (unsigned long) -ENODEV; |
| 1318 | if (!(capabilities & BDI_CAP_MAP_COPY)) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1319 | goto error_just_free; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | |
| 1321 | capabilities &= ~BDI_CAP_MAP_DIRECT; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1322 | } else { |
| 1323 | vma->vm_start = region->vm_start = addr; |
| 1324 | vma->vm_end = region->vm_end = addr + len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | } |
| 1326 | } |
| 1327 | } |
| 1328 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1329 | vma->vm_region = region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1330 | |
| 1331 | /* set up the mapping */ |
| 1332 | if (file && vma->vm_flags & VM_SHARED) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1333 | ret = do_mmap_shared_file(vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | else |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1335 | ret = do_mmap_private(vma, region, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | if (ret < 0) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1337 | goto error_put_region; |
| 1338 | |
| 1339 | add_nommu_region(region); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | |
| 1341 | /* okay... we have a mapping; now we have to register it */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1342 | result = vma->vm_start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1343 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | current->mm->total_vm += len >> PAGE_SHIFT; |
| 1345 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1346 | share: |
| 1347 | add_vma_to_mm(current->mm, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1348 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1349 | up_write(&nommu_region_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | |
| 1351 | if (prot & PROT_EXEC) |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1352 | flush_icache_range(result, result + len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1354 | kleave(" = %lx", result); |
| 1355 | return result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1357 | error_put_region: |
| 1358 | __put_nommu_region(region); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | if (vma) { |
Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 1360 | if (vma->vm_file) { |
Gavin Lambert | 3fcd03e | 2006-09-30 23:27:01 -0700 | [diff] [blame] | 1361 | fput(vma->vm_file); |
Matt Helsley | 925d1c4 | 2008-04-29 01:01:36 -0700 | [diff] [blame] | 1362 | if (vma->vm_flags & VM_EXECUTABLE) |
| 1363 | removed_exe_file_vma(vma->vm_mm); |
| 1364 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1365 | kmem_cache_free(vm_area_cachep, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1367 | kleave(" = %d [pr]", ret); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | return ret; |
| 1369 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1370 | error_just_free: |
| 1371 | up_write(&nommu_region_sem); |
| 1372 | error: |
| 1373 | fput(region->vm_file); |
| 1374 | kmem_cache_free(vm_region_jar, region); |
| 1375 | fput(vma->vm_file); |
| 1376 | if (vma->vm_flags & VM_EXECUTABLE) |
| 1377 | removed_exe_file_vma(vma->vm_mm); |
| 1378 | kmem_cache_free(vm_area_cachep, vma); |
| 1379 | kleave(" = %d", ret); |
| 1380 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1382 | sharing_violation: |
| 1383 | up_write(&nommu_region_sem); |
| 1384 | printk(KERN_WARNING "Attempt to share mismatched mappings\n"); |
| 1385 | ret = -EINVAL; |
| 1386 | goto error; |
| 1387 | |
| 1388 | error_getting_vma: |
| 1389 | kmem_cache_free(vm_region_jar, region); |
| 1390 | printk(KERN_WARNING "Allocation of vma for %lu byte allocation" |
| 1391 | " from process %d failed\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | len, current->pid); |
| 1393 | show_free_areas(); |
| 1394 | return -ENOMEM; |
| 1395 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1396 | error_getting_region: |
| 1397 | printk(KERN_WARNING "Allocation of vm region for %lu byte allocation" |
| 1398 | " from process %d failed\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | len, current->pid); |
| 1400 | show_free_areas(); |
| 1401 | return -ENOMEM; |
| 1402 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1403 | EXPORT_SYMBOL(do_mmap_pgoff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | |
| 1405 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1406 | * split a vma into two pieces at address 'addr', a new vma is allocated either |
| 1407 | * for the first part or the tail. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1408 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1409 | int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, |
| 1410 | unsigned long addr, int new_below) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1412 | struct vm_area_struct *new; |
| 1413 | struct vm_region *region; |
| 1414 | unsigned long npages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1415 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1416 | kenter(""); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1417 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1418 | /* we're only permitted to split anonymous regions that have a single |
| 1419 | * owner */ |
| 1420 | if (vma->vm_file || |
| 1421 | atomic_read(&vma->vm_region->vm_usage) != 1) |
| 1422 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1424 | if (mm->map_count >= sysctl_max_map_count) |
| 1425 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1426 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1427 | region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); |
| 1428 | if (!region) |
| 1429 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1430 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1431 | new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); |
| 1432 | if (!new) { |
| 1433 | kmem_cache_free(vm_region_jar, region); |
| 1434 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1435 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1436 | |
| 1437 | /* most fields are the same, copy all, and then fixup */ |
| 1438 | *new = *vma; |
| 1439 | *region = *vma->vm_region; |
| 1440 | new->vm_region = region; |
| 1441 | |
| 1442 | npages = (addr - vma->vm_start) >> PAGE_SHIFT; |
| 1443 | |
| 1444 | if (new_below) { |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1445 | region->vm_top = region->vm_end = new->vm_end = addr; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1446 | } else { |
| 1447 | region->vm_start = new->vm_start = addr; |
| 1448 | region->vm_pgoff = new->vm_pgoff += npages; |
| 1449 | } |
| 1450 | |
| 1451 | if (new->vm_ops && new->vm_ops->open) |
| 1452 | new->vm_ops->open(new); |
| 1453 | |
| 1454 | delete_vma_from_mm(vma); |
| 1455 | down_write(&nommu_region_sem); |
| 1456 | delete_nommu_region(vma->vm_region); |
| 1457 | if (new_below) { |
| 1458 | vma->vm_region->vm_start = vma->vm_start = addr; |
| 1459 | vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; |
| 1460 | } else { |
| 1461 | vma->vm_region->vm_end = vma->vm_end = addr; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1462 | vma->vm_region->vm_top = addr; |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1463 | } |
| 1464 | add_nommu_region(vma->vm_region); |
| 1465 | add_nommu_region(new->vm_region); |
| 1466 | up_write(&nommu_region_sem); |
| 1467 | add_vma_to_mm(mm, vma); |
| 1468 | add_vma_to_mm(mm, new); |
| 1469 | return 0; |
| 1470 | } |
| 1471 | |
| 1472 | /* |
| 1473 | * shrink a VMA by removing the specified chunk from either the beginning or |
| 1474 | * the end |
| 1475 | */ |
| 1476 | static int shrink_vma(struct mm_struct *mm, |
| 1477 | struct vm_area_struct *vma, |
| 1478 | unsigned long from, unsigned long to) |
| 1479 | { |
| 1480 | struct vm_region *region; |
| 1481 | |
| 1482 | kenter(""); |
| 1483 | |
| 1484 | /* adjust the VMA's pointers, which may reposition it in the MM's tree |
| 1485 | * and list */ |
| 1486 | delete_vma_from_mm(vma); |
| 1487 | if (from > vma->vm_start) |
| 1488 | vma->vm_end = from; |
| 1489 | else |
| 1490 | vma->vm_start = to; |
| 1491 | add_vma_to_mm(mm, vma); |
| 1492 | |
| 1493 | /* cut the backing region down to size */ |
| 1494 | region = vma->vm_region; |
| 1495 | BUG_ON(atomic_read(®ion->vm_usage) != 1); |
| 1496 | |
| 1497 | down_write(&nommu_region_sem); |
| 1498 | delete_nommu_region(region); |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1499 | if (from > region->vm_start) { |
| 1500 | to = region->vm_top; |
| 1501 | region->vm_top = region->vm_end = from; |
| 1502 | } else { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1503 | region->vm_start = to; |
Paul Mundt | dd8632a | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1504 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1505 | add_nommu_region(region); |
| 1506 | up_write(&nommu_region_sem); |
| 1507 | |
| 1508 | free_page_series(from, to); |
| 1509 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | } |
| 1511 | |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1512 | /* |
| 1513 | * release a mapping |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1514 | * - under NOMMU conditions the chunk to be unmapped must be backed by a single |
| 1515 | * VMA, though it need not cover the whole VMA |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1516 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1517 | int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1519 | struct vm_area_struct *vma; |
| 1520 | struct rb_node *rb; |
| 1521 | unsigned long end = start + len; |
| 1522 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1524 | kenter(",%lx,%zx", start, len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1525 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1526 | if (len == 0) |
| 1527 | return -EINVAL; |
| 1528 | |
| 1529 | /* find the first potentially overlapping VMA */ |
| 1530 | vma = find_vma(mm, start); |
| 1531 | if (!vma) { |
David Howells | 33e5d769 | 2009-04-02 16:56:32 -0700 | [diff] [blame] | 1532 | static int limit = 0; |
| 1533 | if (limit < 5) { |
| 1534 | printk(KERN_WARNING |
| 1535 | "munmap of memory not mmapped by process %d" |
| 1536 | " (%s): 0x%lx-0x%lx\n", |
| 1537 | current->pid, current->comm, |
| 1538 | start, start + len - 1); |
| 1539 | limit++; |
| 1540 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1541 | return -EINVAL; |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1542 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1543 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1544 | /* we're allowed to split an anonymous VMA but not a file-backed one */ |
| 1545 | if (vma->vm_file) { |
| 1546 | do { |
| 1547 | if (start > vma->vm_start) { |
| 1548 | kleave(" = -EINVAL [miss]"); |
| 1549 | return -EINVAL; |
| 1550 | } |
| 1551 | if (end == vma->vm_end) |
| 1552 | goto erase_whole_vma; |
| 1553 | rb = rb_next(&vma->vm_rb); |
| 1554 | vma = rb_entry(rb, struct vm_area_struct, vm_rb); |
| 1555 | } while (rb); |
| 1556 | kleave(" = -EINVAL [split file]"); |
| 1557 | return -EINVAL; |
| 1558 | } else { |
| 1559 | /* the chunk must be a subset of the VMA found */ |
| 1560 | if (start == vma->vm_start && end == vma->vm_end) |
| 1561 | goto erase_whole_vma; |
| 1562 | if (start < vma->vm_start || end > vma->vm_end) { |
| 1563 | kleave(" = -EINVAL [superset]"); |
| 1564 | return -EINVAL; |
| 1565 | } |
| 1566 | if (start & ~PAGE_MASK) { |
| 1567 | kleave(" = -EINVAL [unaligned start]"); |
| 1568 | return -EINVAL; |
| 1569 | } |
| 1570 | if (end != vma->vm_end && end & ~PAGE_MASK) { |
| 1571 | kleave(" = -EINVAL [unaligned split]"); |
| 1572 | return -EINVAL; |
| 1573 | } |
| 1574 | if (start != vma->vm_start && end != vma->vm_end) { |
| 1575 | ret = split_vma(mm, vma, start, 1); |
| 1576 | if (ret < 0) { |
| 1577 | kleave(" = %d [split]", ret); |
| 1578 | return ret; |
| 1579 | } |
| 1580 | } |
| 1581 | return shrink_vma(mm, vma, start, end); |
| 1582 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1583 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1584 | erase_whole_vma: |
| 1585 | delete_vma_from_mm(vma); |
| 1586 | delete_vma(mm, vma); |
| 1587 | kleave(" = 0"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | return 0; |
| 1589 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1590 | EXPORT_SYMBOL(do_munmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1592 | SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1593 | { |
| 1594 | int ret; |
| 1595 | struct mm_struct *mm = current->mm; |
| 1596 | |
| 1597 | down_write(&mm->mmap_sem); |
| 1598 | ret = do_munmap(mm, addr, len); |
| 1599 | up_write(&mm->mmap_sem); |
| 1600 | return ret; |
| 1601 | } |
| 1602 | |
| 1603 | /* |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1604 | * release all the mappings made in a process's VM space |
David Howells | 3034097 | 2006-09-27 01:50:20 -0700 | [diff] [blame] | 1605 | */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1606 | void exit_mmap(struct mm_struct *mm) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1607 | { |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1608 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1609 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1610 | if (!mm) |
| 1611 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1612 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1613 | kenter(""); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1615 | mm->total_vm = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1617 | while ((vma = mm->mmap)) { |
| 1618 | mm->mmap = vma->vm_next; |
| 1619 | delete_vma_from_mm(vma); |
| 1620 | delete_vma(mm, vma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1621 | } |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1622 | |
| 1623 | kleave(""); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1624 | } |
| 1625 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | unsigned long do_brk(unsigned long addr, unsigned long len) |
| 1627 | { |
| 1628 | return -ENOMEM; |
| 1629 | } |
| 1630 | |
| 1631 | /* |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1632 | * expand (or shrink) an existing mapping, potentially moving it at the same |
| 1633 | * time (controlled by the MREMAP_MAYMOVE flag and available VM space) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | * |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1635 | * under NOMMU conditions, we only permit changing a mapping's size, and only |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1636 | * as long as it stays within the region allocated by do_mmap_private() and the |
| 1637 | * block is not shareable |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1638 | * |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1639 | * MREMAP_FIXED is not supported under NOMMU conditions |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | */ |
| 1641 | unsigned long do_mremap(unsigned long addr, |
| 1642 | unsigned long old_len, unsigned long new_len, |
| 1643 | unsigned long flags, unsigned long new_addr) |
| 1644 | { |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1645 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1646 | |
| 1647 | /* insanity checks first */ |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1648 | if (old_len == 0 || new_len == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | return (unsigned long) -EINVAL; |
| 1650 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1651 | if (addr & ~PAGE_MASK) |
| 1652 | return -EINVAL; |
| 1653 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1654 | if (flags & MREMAP_FIXED && new_addr != addr) |
| 1655 | return (unsigned long) -EINVAL; |
| 1656 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1657 | vma = find_vma_exact(current->mm, addr, old_len); |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1658 | if (!vma) |
| 1659 | return (unsigned long) -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1661 | if (vma->vm_end != vma->vm_start + old_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1662 | return (unsigned long) -EFAULT; |
| 1663 | |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1664 | if (vma->vm_flags & VM_MAYSHARE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1665 | return (unsigned long) -EPERM; |
| 1666 | |
David Howells | 8feae13 | 2009-01-08 12:04:47 +0000 | [diff] [blame] | 1667 | if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1668 | return (unsigned long) -ENOMEM; |
| 1669 | |
| 1670 | /* all checks complete - do it */ |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1671 | vma->vm_end = vma->vm_start + new_len; |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1672 | return vma->vm_start; |
| 1673 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1674 | EXPORT_SYMBOL(do_mremap); |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1675 | |
Heiko Carstens | 6a6160a | 2009-01-14 14:14:15 +0100 | [diff] [blame] | 1676 | SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, |
| 1677 | unsigned long, new_len, unsigned long, flags, |
| 1678 | unsigned long, new_addr) |
David Howells | 6fa5f80 | 2006-09-27 01:50:21 -0700 | [diff] [blame] | 1679 | { |
| 1680 | unsigned long ret; |
| 1681 | |
| 1682 | down_write(¤t->mm->mmap_sem); |
| 1683 | ret = do_mremap(addr, old_len, new_len, flags, new_addr); |
| 1684 | up_write(¤t->mm->mmap_sem); |
| 1685 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1686 | } |
| 1687 | |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1688 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
Hugh Dickins | deceb6c | 2005-10-29 18:16:33 -0700 | [diff] [blame] | 1689 | unsigned int foll_flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | { |
| 1691 | return NULL; |
| 1692 | } |
| 1693 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1694 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, |
| 1695 | unsigned long to, unsigned long size, pgprot_t prot) |
| 1696 | { |
Greg Ungerer | 66aa2b4 | 2005-09-12 11:18:10 +1000 | [diff] [blame] | 1697 | vma->vm_start = vma->vm_pgoff << PAGE_SHIFT; |
| 1698 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | } |
Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1700 | EXPORT_SYMBOL(remap_pfn_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1701 | |
Paul Mundt | f905bc4 | 2008-02-04 22:29:59 -0800 | [diff] [blame] | 1702 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, |
| 1703 | unsigned long pgoff) |
| 1704 | { |
| 1705 | unsigned int size = vma->vm_end - vma->vm_start; |
| 1706 | |
| 1707 | if (!(vma->vm_flags & VM_USERMAP)) |
| 1708 | return -EINVAL; |
| 1709 | |
| 1710 | vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); |
| 1711 | vma->vm_end = vma->vm_start + size; |
| 1712 | |
| 1713 | return 0; |
| 1714 | } |
| 1715 | EXPORT_SYMBOL(remap_vmalloc_range); |
| 1716 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1717 | void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) |
| 1718 | { |
| 1719 | } |
| 1720 | |
| 1721 | unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, |
| 1722 | unsigned long len, unsigned long pgoff, unsigned long flags) |
| 1723 | { |
| 1724 | return -ENOMEM; |
| 1725 | } |
| 1726 | |
Wolfgang Wander | 1363c3c | 2005-06-21 17:14:49 -0700 | [diff] [blame] | 1727 | void arch_unmap_area(struct mm_struct *mm, unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1728 | { |
| 1729 | } |
| 1730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1731 | void unmap_mapping_range(struct address_space *mapping, |
| 1732 | loff_t const holebegin, loff_t const holelen, |
| 1733 | int even_cows) |
| 1734 | { |
| 1735 | } |
Luke Yang | 22c4af4 | 2006-07-14 00:24:09 -0700 | [diff] [blame] | 1736 | EXPORT_SYMBOL(unmap_mapping_range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1737 | |
| 1738 | /* |
David Howells | d56e03c | 2007-03-22 00:11:23 -0800 | [diff] [blame] | 1739 | * ask for an unmapped area at which to create a mapping on a file |
| 1740 | */ |
| 1741 | unsigned long get_unmapped_area(struct file *file, unsigned long addr, |
| 1742 | unsigned long len, unsigned long pgoff, |
| 1743 | unsigned long flags) |
| 1744 | { |
| 1745 | unsigned long (*get_area)(struct file *, unsigned long, unsigned long, |
| 1746 | unsigned long, unsigned long); |
| 1747 | |
| 1748 | get_area = current->mm->get_unmapped_area; |
| 1749 | if (file && file->f_op && file->f_op->get_unmapped_area) |
| 1750 | get_area = file->f_op->get_unmapped_area; |
| 1751 | |
| 1752 | if (!get_area) |
| 1753 | return -ENOSYS; |
| 1754 | |
| 1755 | return get_area(file, addr, len, pgoff, flags); |
| 1756 | } |
David Howells | d56e03c | 2007-03-22 00:11:23 -0800 | [diff] [blame] | 1757 | EXPORT_SYMBOL(get_unmapped_area); |
| 1758 | |
| 1759 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 | * Check that a process has enough memory to allocate a new virtual |
| 1761 | * mapping. 0 means there is enough memory for the allocation to |
| 1762 | * succeed and -ENOMEM implies there is not. |
| 1763 | * |
| 1764 | * We currently support three overcommit policies, which are set via the |
| 1765 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting |
| 1766 | * |
| 1767 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. |
| 1768 | * Additional code 2002 Jul 20 by Robert Love. |
| 1769 | * |
| 1770 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. |
| 1771 | * |
| 1772 | * Note this is a helper function intended to be used by LSMs which |
| 1773 | * wish to use this logic. |
| 1774 | */ |
Alan Cox | 34b4e4a | 2007-08-22 14:01:28 -0700 | [diff] [blame] | 1775 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1776 | { |
| 1777 | unsigned long free, allowed; |
| 1778 | |
| 1779 | vm_acct_memory(pages); |
| 1780 | |
| 1781 | /* |
| 1782 | * Sometimes we want to use more memory than we have |
| 1783 | */ |
| 1784 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) |
| 1785 | return 0; |
| 1786 | |
| 1787 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
| 1788 | unsigned long n; |
| 1789 | |
Christoph Lameter | 347ce43 | 2006-06-30 01:55:35 -0700 | [diff] [blame] | 1790 | free = global_page_state(NR_FILE_PAGES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1791 | free += nr_swap_pages; |
| 1792 | |
| 1793 | /* |
| 1794 | * Any slabs which are created with the |
| 1795 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents |
| 1796 | * which are reclaimable, under pressure. The dentry |
| 1797 | * cache and most inode caches should fall into this |
| 1798 | */ |
Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 1799 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | |
| 1801 | /* |
| 1802 | * Leave the last 3% for root |
| 1803 | */ |
| 1804 | if (!cap_sys_admin) |
| 1805 | free -= free / 32; |
| 1806 | |
| 1807 | if (free > pages) |
| 1808 | return 0; |
| 1809 | |
| 1810 | /* |
| 1811 | * nr_free_pages() is very expensive on large systems, |
| 1812 | * only call if we're about to fail. |
| 1813 | */ |
| 1814 | n = nr_free_pages(); |
Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1815 | |
| 1816 | /* |
| 1817 | * Leave reserved pages. The pages are not for anonymous pages. |
| 1818 | */ |
| 1819 | if (n <= totalreserve_pages) |
| 1820 | goto error; |
| 1821 | else |
| 1822 | n -= totalreserve_pages; |
| 1823 | |
| 1824 | /* |
| 1825 | * Leave the last 3% for root |
| 1826 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | if (!cap_sys_admin) |
| 1828 | n -= n / 32; |
| 1829 | free += n; |
| 1830 | |
| 1831 | if (free > pages) |
| 1832 | return 0; |
Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1833 | |
| 1834 | goto error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | } |
| 1836 | |
| 1837 | allowed = totalram_pages * sysctl_overcommit_ratio / 100; |
| 1838 | /* |
| 1839 | * Leave the last 3% for root |
| 1840 | */ |
| 1841 | if (!cap_sys_admin) |
| 1842 | allowed -= allowed / 32; |
| 1843 | allowed += total_swap_pages; |
| 1844 | |
| 1845 | /* Don't let a single process grow too big: |
| 1846 | leave 3% of the size of this process for other processes */ |
Alan Cox | 731572d | 2008-10-29 14:01:20 -0700 | [diff] [blame] | 1847 | if (mm) |
| 1848 | allowed -= mm->total_vm / 32; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1849 | |
Simon Derr | 2f60f8d | 2005-08-04 19:52:03 -0700 | [diff] [blame] | 1850 | /* |
| 1851 | * cast `allowed' as a signed long because vm_committed_space |
| 1852 | * sometimes has a negative value |
| 1853 | */ |
Alan Cox | 80119ef | 2008-05-23 13:04:31 -0700 | [diff] [blame] | 1854 | if (atomic_long_read(&vm_committed_space) < (long)allowed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1855 | return 0; |
Hideo AOKI | d5ddc79 | 2006-04-10 22:53:01 -0700 | [diff] [blame] | 1856 | error: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1857 | vm_unacct_memory(pages); |
| 1858 | |
| 1859 | return -ENOMEM; |
| 1860 | } |
| 1861 | |
| 1862 | int in_gate_area_no_task(unsigned long addr) |
| 1863 | { |
| 1864 | return 0; |
| 1865 | } |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1866 | |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1867 | int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1868 | { |
| 1869 | BUG(); |
Nick Piggin | d0217ac | 2007-07-19 01:47:03 -0700 | [diff] [blame] | 1870 | return 0; |
David Howells | b0e1519 | 2006-01-06 00:11:42 -0800 | [diff] [blame] | 1871 | } |
Paul Mundt | b507317 | 2007-07-21 04:37:25 -0700 | [diff] [blame] | 1872 | EXPORT_SYMBOL(filemap_fault); |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1873 | |
| 1874 | /* |
| 1875 | * Access another process' address space. |
| 1876 | * - source/target buffer must be kernel space |
| 1877 | */ |
| 1878 | int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) |
| 1879 | { |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1880 | struct vm_area_struct *vma; |
| 1881 | struct mm_struct *mm; |
| 1882 | |
| 1883 | if (addr + len < addr) |
| 1884 | return 0; |
| 1885 | |
| 1886 | mm = get_task_mm(tsk); |
| 1887 | if (!mm) |
| 1888 | return 0; |
| 1889 | |
| 1890 | down_read(&mm->mmap_sem); |
| 1891 | |
| 1892 | /* the access must start within one of the target process's mappings */ |
David Howells | 0159b14 | 2006-09-27 01:50:16 -0700 | [diff] [blame] | 1893 | vma = find_vma(mm, addr); |
| 1894 | if (vma) { |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1895 | /* don't overrun this mapping */ |
| 1896 | if (addr + len >= vma->vm_end) |
| 1897 | len = vma->vm_end - addr; |
| 1898 | |
| 1899 | /* only read or write mappings where it is permitted */ |
David Howells | d00c7b99 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1900 | if (write && vma->vm_flags & VM_MAYWRITE) |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1901 | len -= copy_to_user((void *) addr, buf, len); |
David Howells | d00c7b99 | 2006-09-27 01:50:19 -0700 | [diff] [blame] | 1902 | else if (!write && vma->vm_flags & VM_MAYREAD) |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 1903 | len -= copy_from_user(buf, (void *) addr, len); |
| 1904 | else |
| 1905 | len = 0; |
| 1906 | } else { |
| 1907 | len = 0; |
| 1908 | } |
| 1909 | |
| 1910 | up_read(&mm->mmap_sem); |
| 1911 | mmput(mm); |
| 1912 | return len; |
| 1913 | } |