blob: b064822be82e4acb1117a44ec3c0fdee903783c3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * mm/mmap.c
3 *
4 * Written by obz.
5 *
Alan Cox046c6882009-01-05 14:06:29 +00006 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9#include <linux/slab.h>
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070010#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mm.h>
12#include <linux/shm.h>
13#include <linux/mman.h>
14#include <linux/pagemap.h>
15#include <linux/swap.h>
16#include <linux/syscalls.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080017#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/init.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/personality.h>
22#include <linux/security.h>
23#include <linux/hugetlb.h>
24#include <linux/profile.h>
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040025#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mount.h>
27#include <linux/mempolicy.h>
28#include <linux/rmap.h>
Andrea Arcangelicddb8a52008-07-28 15:46:29 -070029#include <linux/mmu_notifier.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020030#include <linux/perf_event.h>
Al Viro120a7952010-10-30 02:54:44 -040031#include <linux/audit.h>
Andrea Arcangelib15d00b2011-01-13 15:46:59 -080032#include <linux/khugepaged.h>
Srikar Dronamraju2b144492012-02-09 14:56:42 +053033#include <linux/uprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <asm/uaccess.h>
36#include <asm/cacheflush.h>
37#include <asm/tlb.h>
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +020038#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jan Beulich42b77722008-07-23 21:27:10 -070040#include "internal.h"
41
Kirill Korotaev3a459752006-09-07 14:17:04 +040042#ifndef arch_mmap_check
43#define arch_mmap_check(addr, len, flags) (0)
44#endif
45
Martin Schwidefsky08e7d9b2008-02-04 22:29:16 -080046#ifndef arch_rebalance_pgtables
47#define arch_rebalance_pgtables(addr, len) (addr)
48#endif
49
Hugh Dickinse0da3822005-04-19 13:29:15 -070050static void unmap_region(struct mm_struct *mm,
51 struct vm_area_struct *vma, struct vm_area_struct *prev,
52 unsigned long start, unsigned long end);
53
Linus Torvalds1da177e2005-04-16 15:20:36 -070054/* description of effects of mapping type and prot in current implementation.
55 * this is due to the limited x86 page protection hardware. The expected
56 * behavior is in parens:
57 *
58 * map_type prot
59 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
60 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
61 * w: (no) no w: (no) no w: (yes) yes w: (no) no
62 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
63 *
64 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
65 * w: (no) no w: (no) no w: (copy) copy w: (no) no
66 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
67 *
68 */
69pgprot_t protection_map[16] = {
70 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
72};
73
Hugh Dickins804af2c2006-07-26 21:39:49 +010074pgprot_t vm_get_page_prot(unsigned long vm_flags)
75{
Dave Kleikampb845f312008-07-08 00:28:51 +100076 return __pgprot(pgprot_val(protection_map[vm_flags &
77 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
78 pgprot_val(arch_vm_get_page_prot(vm_flags)));
Hugh Dickins804af2c2006-07-26 21:39:49 +010079}
80EXPORT_SYMBOL(vm_get_page_prot);
81
Shaohua Li34679d72011-05-24 17:11:18 -070082int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
83int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
Christoph Lameterc3d8c142005-09-06 15:16:33 -070084int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
Shaohua Li34679d72011-05-24 17:11:18 -070085/*
86 * Make sure vm_committed_as in one cacheline and not cacheline shared with
87 * other variables. It can be updated by several CPUs frequently.
88 */
89struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91/*
K. Y. Srinivasan997071b2012-11-15 14:34:42 -080092 * The global memory commitment made in the system can be a metric
93 * that can be used to drive ballooning decisions when Linux is hosted
94 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
95 * balancing memory across competing virtual machines that are hosted.
96 * Several metrics drive this policy engine including the guest reported
97 * memory commitment.
98 */
99unsigned long vm_memory_committed(void)
100{
101 return percpu_counter_read_positive(&vm_committed_as);
102}
103EXPORT_SYMBOL_GPL(vm_memory_committed);
104
105/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * Check that a process has enough memory to allocate a new virtual
107 * mapping. 0 means there is enough memory for the allocation to
108 * succeed and -ENOMEM implies there is not.
109 *
110 * We currently support three overcommit policies, which are set via the
111 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
112 *
113 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
114 * Additional code 2002 Jul 20 by Robert Love.
115 *
116 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
117 *
118 * Note this is a helper function intended to be used by LSMs which
119 * wish to use this logic.
120 */
Alan Cox34b4e4a2007-08-22 14:01:28 -0700121int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 unsigned long free, allowed;
124
125 vm_acct_memory(pages);
126
127 /*
128 * Sometimes we want to use more memory than we have
129 */
130 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
131 return 0;
132
133 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
Dmitry Finkc15bef32011-07-25 17:12:19 -0700134 free = global_page_state(NR_FREE_PAGES);
135 free += global_page_state(NR_FILE_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Dmitry Finkc15bef32011-07-25 17:12:19 -0700137 /*
138 * shmem pages shouldn't be counted as free in this
139 * case, they can't be purged, only swapped out, and
140 * that won't affect the overall amount of available
141 * memory in the system.
142 */
143 free -= global_page_state(NR_SHMEM);
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 free += nr_swap_pages;
146
147 /*
148 * Any slabs which are created with the
149 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
150 * which are reclaimable, under pressure. The dentry
151 * cache and most inode caches should fall into this
152 */
Christoph Lameter972d1a72006-09-25 23:31:51 -0700153 free += global_page_state(NR_SLAB_RECLAIMABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
155 /*
Dmitry Finkc15bef32011-07-25 17:12:19 -0700156 * Leave reserved pages. The pages are not for anonymous pages.
157 */
158 if (free <= totalreserve_pages)
159 goto error;
160 else
161 free -= totalreserve_pages;
162
163 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 * Leave the last 3% for root
165 */
166 if (!cap_sys_admin)
167 free -= free / 32;
168
169 if (free > pages)
170 return 0;
171
Hideo AOKI6d9f7832006-04-10 22:53:00 -0700172 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174
175 allowed = (totalram_pages - hugetlb_total_pages())
176 * sysctl_overcommit_ratio / 100;
177 /*
178 * Leave the last 3% for root
179 */
180 if (!cap_sys_admin)
181 allowed -= allowed / 32;
182 allowed += total_swap_pages;
183
184 /* Don't let a single process grow too big:
185 leave 3% of the size of this process for other processes */
Alan Cox731572d2008-10-29 14:01:20 -0700186 if (mm)
187 allowed -= mm->total_vm / 32;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -0700189 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 return 0;
Hideo AOKI6d9f7832006-04-10 22:53:00 -0700191error:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 vm_unacct_memory(pages);
193
194 return -ENOMEM;
195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/*
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700198 * Requires inode->i_mapping->i_mmap_mutex
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 */
200static void __remove_shared_vm_struct(struct vm_area_struct *vma,
201 struct file *file, struct address_space *mapping)
202{
203 if (vma->vm_flags & VM_DENYWRITE)
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -0800204 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 if (vma->vm_flags & VM_SHARED)
206 mapping->i_mmap_writable--;
207
208 flush_dcache_mmap_lock(mapping);
209 if (unlikely(vma->vm_flags & VM_NONLINEAR))
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700210 list_del_init(&vma->shared.nonlinear);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 else
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700212 vma_interval_tree_remove(vma, &mapping->i_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 flush_dcache_mmap_unlock(mapping);
214}
215
216/*
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700217 * Unlink a file-based vm structure from its interval tree, to hide
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700218 * vma from rmap and vmtruncate before freeing its page tables.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 */
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700220void unlink_file_vma(struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222 struct file *file = vma->vm_file;
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 if (file) {
225 struct address_space *mapping = file->f_mapping;
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700226 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 __remove_shared_vm_struct(vma, file, mapping);
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700228 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 }
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700230}
231
232/*
233 * Close a vm structure and free it, returning the next.
234 */
235static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
236{
237 struct vm_area_struct *next = vma->vm_next;
238
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700239 might_sleep();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 if (vma->vm_ops && vma->vm_ops->close)
241 vma->vm_ops->close(vma);
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -0700242 if (vma->vm_file)
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700243 fput(vma->vm_file);
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700244 mpol_put(vma_policy(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 kmem_cache_free(vm_area_cachep, vma);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -0700246 return next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -0700249static unsigned long do_brk(unsigned long addr, unsigned long len);
250
Heiko Carstens6a6160a2009-01-14 14:14:15 +0100251SYSCALL_DEFINE1(brk, unsigned long, brk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
253 unsigned long rlim, retval;
254 unsigned long newbrk, oldbrk;
255 struct mm_struct *mm = current->mm;
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700256 unsigned long min_brk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 down_write(&mm->mmap_sem);
259
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700260#ifdef CONFIG_COMPAT_BRK
Jiri Kosina5520e892011-01-13 15:47:23 -0800261 /*
262 * CONFIG_COMPAT_BRK can still be overridden by setting
263 * randomize_va_space to 2, which will still cause mm->start_brk
264 * to be arbitrarily shifted
265 */
Jiri Kosina4471a672011-04-14 15:22:09 -0700266 if (current->brk_randomized)
Jiri Kosina5520e892011-01-13 15:47:23 -0800267 min_brk = mm->start_brk;
268 else
269 min_brk = mm->end_data;
Jiri Kosinaa5b45922008-06-05 22:46:05 -0700270#else
271 min_brk = mm->start_brk;
272#endif
273 if (brk < min_brk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 goto out;
Ram Gupta1e624192006-04-10 22:52:57 -0700275
276 /*
277 * Check against rlimit here. If this check is done later after the test
278 * of oldbrk with newbrk then it can escape the test and let the data
279 * segment grow beyond its set limit the in case where the limit is
280 * not page aligned -Ram Gupta
281 */
Jiri Slaby59e99e52010-03-05 13:41:44 -0800282 rlim = rlimit(RLIMIT_DATA);
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100283 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
284 (mm->end_data - mm->start_data) > rlim)
Ram Gupta1e624192006-04-10 22:52:57 -0700285 goto out;
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 newbrk = PAGE_ALIGN(brk);
288 oldbrk = PAGE_ALIGN(mm->brk);
289 if (oldbrk == newbrk)
290 goto set_brk;
291
292 /* Always allow shrinking brk. */
293 if (brk <= mm->brk) {
294 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
295 goto set_brk;
296 goto out;
297 }
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 /* Check against existing mmap mappings. */
300 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
301 goto out;
302
303 /* Ok, looks good - let it rip. */
304 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
305 goto out;
306set_brk:
307 mm->brk = brk;
308out:
309 retval = mm->brk;
310 up_write(&mm->mmap_sem);
311 return retval;
312}
313
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700314#ifdef CONFIG_DEBUG_VM_RB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315static int browse_rb(struct rb_root *root)
316{
317 int i = 0, j;
318 struct rb_node *nd, *pn = NULL;
319 unsigned long prev = 0, pend = 0;
320
321 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
322 struct vm_area_struct *vma;
323 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
324 if (vma->vm_start < prev)
325 printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
326 if (vma->vm_start < pend)
327 printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
328 if (vma->vm_start > vma->vm_end)
329 printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
330 i++;
331 pn = nd;
David Millerd1af65d2007-02-28 20:13:13 -0800332 prev = vma->vm_start;
333 pend = vma->vm_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 }
335 j = 0;
336 for (nd = pn; nd; nd = rb_prev(nd)) {
337 j++;
338 }
339 if (i != j)
340 printk("backwards %d, forwards %d\n", j, i), i = 0;
341 return i;
342}
343
344void validate_mm(struct mm_struct *mm)
345{
346 int bug = 0;
347 int i = 0;
Michel Lespinasseed8ea812012-10-08 16:31:45 -0700348 struct vm_area_struct *vma = mm->mmap;
349 while (vma) {
350 struct anon_vma_chain *avc;
351 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
352 anon_vma_interval_tree_verify(avc);
353 vma = vma->vm_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 i++;
355 }
356 if (i != mm->map_count)
357 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
358 i = browse_rb(&mm->mm_rb);
359 if (i != mm->map_count)
360 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
Eric Sesterhenn46a350e2006-04-01 01:23:29 +0200361 BUG_ON(bug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363#else
364#define validate_mm(mm) do { } while (0)
365#endif
366
Michel Lespinassebf181b92012-10-08 16:31:39 -0700367/*
368 * vma has some anon_vma assigned, and is already inserted on that
369 * anon_vma's interval trees.
370 *
371 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
372 * vma must be removed from the anon_vma's interval trees using
373 * anon_vma_interval_tree_pre_update_vma().
374 *
375 * After the update, the vma will be reinserted using
376 * anon_vma_interval_tree_post_update_vma().
377 *
378 * The entire update must be protected by exclusive mmap_sem and by
379 * the root anon_vma's mutex.
380 */
381static inline void
382anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
383{
384 struct anon_vma_chain *avc;
385
386 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
387 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
388}
389
390static inline void
391anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
392{
393 struct anon_vma_chain *avc;
394
395 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
396 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
397}
398
Hugh Dickins6597d782012-10-08 16:29:07 -0700399static int find_vma_links(struct mm_struct *mm, unsigned long addr,
400 unsigned long end, struct vm_area_struct **pprev,
401 struct rb_node ***rb_link, struct rb_node **rb_parent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
Hugh Dickins6597d782012-10-08 16:29:07 -0700403 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 __rb_link = &mm->mm_rb.rb_node;
406 rb_prev = __rb_parent = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 while (*__rb_link) {
409 struct vm_area_struct *vma_tmp;
410
411 __rb_parent = *__rb_link;
412 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
413
414 if (vma_tmp->vm_end > addr) {
Hugh Dickins6597d782012-10-08 16:29:07 -0700415 /* Fail if an existing vma overlaps the area */
416 if (vma_tmp->vm_start < end)
417 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 __rb_link = &__rb_parent->rb_left;
419 } else {
420 rb_prev = __rb_parent;
421 __rb_link = &__rb_parent->rb_right;
422 }
423 }
424
425 *pprev = NULL;
426 if (rb_prev)
427 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
428 *rb_link = __rb_link;
429 *rb_parent = __rb_parent;
Hugh Dickins6597d782012-10-08 16:29:07 -0700430 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
434 struct rb_node **rb_link, struct rb_node *rb_parent)
435{
436 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
437 rb_insert_color(&vma->vm_rb, &mm->mm_rb);
438}
439
Denys Vlasenkocb8f4882008-10-18 20:27:01 -0700440static void __vma_link_file(struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
ZhenwenXu48aae422009-01-06 14:40:21 -0800442 struct file *file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
444 file = vma->vm_file;
445 if (file) {
446 struct address_space *mapping = file->f_mapping;
447
448 if (vma->vm_flags & VM_DENYWRITE)
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -0800449 atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 if (vma->vm_flags & VM_SHARED)
451 mapping->i_mmap_writable++;
452
453 flush_dcache_mmap_lock(mapping);
454 if (unlikely(vma->vm_flags & VM_NONLINEAR))
455 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
456 else
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700457 vma_interval_tree_insert(vma, &mapping->i_mmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 flush_dcache_mmap_unlock(mapping);
459 }
460}
461
462static void
463__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
464 struct vm_area_struct *prev, struct rb_node **rb_link,
465 struct rb_node *rb_parent)
466{
467 __vma_link_list(mm, vma, prev, rb_parent);
468 __vma_link_rb(mm, vma, rb_link, rb_parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
471static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
472 struct vm_area_struct *prev, struct rb_node **rb_link,
473 struct rb_node *rb_parent)
474{
475 struct address_space *mapping = NULL;
476
477 if (vma->vm_file)
478 mapping = vma->vm_file->f_mapping;
479
Peter Zijlstra97a89412011-05-24 17:12:04 -0700480 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700481 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 __vma_link(mm, vma, prev, rb_link, rb_parent);
484 __vma_link_file(vma);
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700487 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 mm->map_count++;
490 validate_mm(mm);
491}
492
493/*
Kautuk Consul88f6b4c2012-03-21 16:34:16 -0700494 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700495 * mm's list and rbtree. It has already been inserted into the interval tree.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 */
ZhenwenXu48aae422009-01-06 14:40:21 -0800497static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
Hugh Dickins6597d782012-10-08 16:29:07 -0700499 struct vm_area_struct *prev;
ZhenwenXu48aae422009-01-06 14:40:21 -0800500 struct rb_node **rb_link, *rb_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Hugh Dickins6597d782012-10-08 16:29:07 -0700502 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
503 &prev, &rb_link, &rb_parent))
504 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 __vma_link(mm, vma, prev, rb_link, rb_parent);
506 mm->map_count++;
507}
508
509static inline void
510__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
511 struct vm_area_struct *prev)
512{
Linus Torvalds297c5ee2010-08-20 16:24:55 -0700513 struct vm_area_struct *next = vma->vm_next;
514
515 prev->vm_next = next;
516 if (next)
517 next->vm_prev = prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 rb_erase(&vma->vm_rb, &mm->mm_rb);
519 if (mm->mmap_cache == vma)
520 mm->mmap_cache = prev;
521}
522
523/*
524 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
525 * is already present in an i_mmap tree without adjusting the tree.
526 * The following helper function should be used when such adjustments
527 * are necessary. The "insert" vma (if any) is to be inserted
528 * before we drop the necessary locks.
529 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800530int vma_adjust(struct vm_area_struct *vma, unsigned long start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
532{
533 struct mm_struct *mm = vma->vm_mm;
534 struct vm_area_struct *next = vma->vm_next;
535 struct vm_area_struct *importer = NULL;
536 struct address_space *mapping = NULL;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700537 struct rb_root *root = NULL;
Rik van Riel012f18002010-08-09 17:18:40 -0700538 struct anon_vma *anon_vma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 struct file *file = vma->vm_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 long adjust_next = 0;
541 int remove_next = 0;
542
543 if (next && !insert) {
Linus Torvalds287d97a2010-04-10 15:22:30 -0700544 struct vm_area_struct *exporter = NULL;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (end >= next->vm_end) {
547 /*
548 * vma expands, overlapping all the next, and
549 * perhaps the one after too (mprotect case 6).
550 */
551again: remove_next = 1 + (end > next->vm_end);
552 end = next->vm_end;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700553 exporter = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 importer = vma;
555 } else if (end > next->vm_start) {
556 /*
557 * vma expands, overlapping part of the next:
558 * mprotect case 5 shifting the boundary up.
559 */
560 adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700561 exporter = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 importer = vma;
563 } else if (end < vma->vm_end) {
564 /*
565 * vma shrinks, and !insert tells it's not
566 * split_vma inserting another: so it must be
567 * mprotect case 4 shifting the boundary down.
568 */
569 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
Linus Torvalds287d97a2010-04-10 15:22:30 -0700570 exporter = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 importer = next;
572 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573
Rik van Riel5beb4932010-03-05 13:42:07 -0800574 /*
575 * Easily overlooked: when mprotect shifts the boundary,
576 * make sure the expanding vma has anon_vma set if the
577 * shrinking vma had, to cover any anon pages imported.
578 */
Linus Torvalds287d97a2010-04-10 15:22:30 -0700579 if (exporter && exporter->anon_vma && !importer->anon_vma) {
580 if (anon_vma_clone(importer, exporter))
Rik van Riel5beb4932010-03-05 13:42:07 -0800581 return -ENOMEM;
Linus Torvalds287d97a2010-04-10 15:22:30 -0700582 importer->anon_vma = exporter->anon_vma;
Rik van Riel5beb4932010-03-05 13:42:07 -0800583 }
584 }
585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 if (file) {
587 mapping = file->f_mapping;
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530588 if (!(vma->vm_flags & VM_NONLINEAR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 root = &mapping->i_mmap;
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530590 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530591
592 if (adjust_next)
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530593 uprobe_munmap(next, next->vm_start,
594 next->vm_end);
Srikar Dronamraju682968e2012-03-30 23:56:46 +0530595 }
596
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700597 mutex_lock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 if (insert) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 /*
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700600 * Put into interval tree now, so instantiated pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 * are visible to arm/parisc __flush_dcache_page
602 * throughout; but we cannot insert into address
603 * space until vma start or end is updated.
604 */
605 __vma_link_file(insert);
606 }
607 }
608
Andrea Arcangeli94fcc582011-01-13 15:47:08 -0800609 vma_adjust_trans_huge(vma, start, end, adjust_next);
610
Michel Lespinassebf181b92012-10-08 16:31:39 -0700611 anon_vma = vma->anon_vma;
612 if (!anon_vma && adjust_next)
613 anon_vma = next->anon_vma;
614 if (anon_vma) {
Michel Lespinasseca42b262012-10-08 16:30:01 -0700615 VM_BUG_ON(adjust_next && next->anon_vma &&
616 anon_vma != next->anon_vma);
Rik van Riel012f18002010-08-09 17:18:40 -0700617 anon_vma_lock(anon_vma);
Michel Lespinassebf181b92012-10-08 16:31:39 -0700618 anon_vma_interval_tree_pre_update_vma(vma);
619 if (adjust_next)
620 anon_vma_interval_tree_pre_update_vma(next);
621 }
Rik van Riel012f18002010-08-09 17:18:40 -0700622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 if (root) {
624 flush_dcache_mmap_lock(mapping);
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700625 vma_interval_tree_remove(vma, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (adjust_next)
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700627 vma_interval_tree_remove(next, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 }
629
630 vma->vm_start = start;
631 vma->vm_end = end;
632 vma->vm_pgoff = pgoff;
633 if (adjust_next) {
634 next->vm_start += adjust_next << PAGE_SHIFT;
635 next->vm_pgoff += adjust_next;
636 }
637
638 if (root) {
639 if (adjust_next)
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700640 vma_interval_tree_insert(next, root);
641 vma_interval_tree_insert(vma, root);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 flush_dcache_mmap_unlock(mapping);
643 }
644
645 if (remove_next) {
646 /*
647 * vma_merge has merged next into vma, and needs
648 * us to remove next before dropping the locks.
649 */
650 __vma_unlink(mm, next, vma);
651 if (file)
652 __remove_shared_vm_struct(next, file, mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 } else if (insert) {
654 /*
655 * split_vma has split insert from vma, and needs
656 * us to insert it before dropping the locks
657 * (it may either follow vma or precede it).
658 */
659 __insert_vm_struct(mm, insert);
660 }
661
Michel Lespinassebf181b92012-10-08 16:31:39 -0700662 if (anon_vma) {
663 anon_vma_interval_tree_post_update_vma(vma);
664 if (adjust_next)
665 anon_vma_interval_tree_post_update_vma(next);
Rik van Riel012f18002010-08-09 17:18:40 -0700666 anon_vma_unlock(anon_vma);
Michel Lespinassebf181b92012-10-08 16:31:39 -0700667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 if (mapping)
Peter Zijlstra3d48ae42011-05-24 17:12:06 -0700669 mutex_unlock(&mapping->i_mmap_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530671 if (root) {
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100672 uprobe_mmap(vma);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530673
674 if (adjust_next)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100675 uprobe_mmap(next);
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530676 }
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (remove_next) {
Matt Helsley925d1c42008-04-29 01:01:36 -0700679 if (file) {
Srikar Dronamrajucbc91f72012-04-11 16:05:27 +0530680 uprobe_munmap(next, next->vm_start, next->vm_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 fput(file);
Matt Helsley925d1c42008-04-29 01:01:36 -0700682 }
Rik van Riel5beb4932010-03-05 13:42:07 -0800683 if (next->anon_vma)
684 anon_vma_merge(vma, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 mm->map_count--;
Lee Schermerhornf0be3d32008-04-28 02:13:08 -0700686 mpol_put(vma_policy(next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 kmem_cache_free(vm_area_cachep, next);
688 /*
689 * In mprotect's case 6 (see comments on vma_merge),
690 * we must remove another next too. It would clutter
691 * up the code too much to do both in one go.
692 */
693 if (remove_next == 2) {
694 next = vma->vm_next;
695 goto again;
696 }
697 }
Srikar Dronamraju2b144492012-02-09 14:56:42 +0530698 if (insert && file)
Ingo Molnar7b2d81d2012-02-17 09:27:41 +0100699 uprobe_mmap(insert);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 validate_mm(mm);
Rik van Riel5beb4932010-03-05 13:42:07 -0800702
703 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
706/*
707 * If the vma has a ->close operation then the driver probably needs to release
708 * per-vma resources, so we don't attempt to merge those.
709 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710static inline int is_mergeable_vma(struct vm_area_struct *vma,
711 struct file *file, unsigned long vm_flags)
712{
Konstantin Khlebnikov0b173bc2012-10-08 16:28:46 -0700713 if (vma->vm_flags ^ vm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return 0;
715 if (vma->vm_file != file)
716 return 0;
717 if (vma->vm_ops && vma->vm_ops->close)
718 return 0;
719 return 1;
720}
721
722static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
Shaohua Li965f55d2011-05-24 17:11:20 -0700723 struct anon_vma *anon_vma2,
724 struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725{
Shaohua Li965f55d2011-05-24 17:11:20 -0700726 /*
727 * The list_is_singular() test is to avoid merging VMA cloned from
728 * parents. This can improve scalability caused by anon_vma lock.
729 */
730 if ((!anon_vma1 || !anon_vma2) && (!vma ||
731 list_is_singular(&vma->anon_vma_chain)))
732 return 1;
733 return anon_vma1 == anon_vma2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
736/*
737 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
738 * in front of (at a lower virtual address and file offset than) the vma.
739 *
740 * We cannot merge two vmas if they have differently assigned (non-NULL)
741 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
742 *
743 * We don't check here for the merged mmap wrapping around the end of pagecache
744 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
745 * wrap, nor mmaps which cover the final page at index -1UL.
746 */
747static int
748can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
749 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
750{
751 if (is_mergeable_vma(vma, file, vm_flags) &&
Shaohua Li965f55d2011-05-24 17:11:20 -0700752 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 if (vma->vm_pgoff == vm_pgoff)
754 return 1;
755 }
756 return 0;
757}
758
759/*
760 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
761 * beyond (at a higher virtual address and file offset than) the vma.
762 *
763 * We cannot merge two vmas if they have differently assigned (non-NULL)
764 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
765 */
766static int
767can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
768 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
769{
770 if (is_mergeable_vma(vma, file, vm_flags) &&
Shaohua Li965f55d2011-05-24 17:11:20 -0700771 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 pgoff_t vm_pglen;
773 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
774 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
775 return 1;
776 }
777 return 0;
778}
779
780/*
781 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
782 * whether that can be merged with its predecessor or its successor.
783 * Or both (it neatly fills a hole).
784 *
785 * In most cases - when called for mmap, brk or mremap - [addr,end) is
786 * certain not to be mapped by the time vma_merge is called; but when
787 * called for mprotect, it is certain to be already mapped (either at
788 * an offset within prev, or at the start of next), and the flags of
789 * this area are about to be changed to vm_flags - and the no-change
790 * case has already been eliminated.
791 *
792 * The following mprotect cases have to be considered, where AAAA is
793 * the area passed down from mprotect_fixup, never extending beyond one
794 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
795 *
796 * AAAA AAAA AAAA AAAA
797 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
798 * cannot merge might become might become might become
799 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
800 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
801 * mremap move: PPPPNNNNNNNN 8
802 * AAAA
803 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
804 * might become case 1 below case 2 below case 3 below
805 *
806 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
807 * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
808 */
809struct vm_area_struct *vma_merge(struct mm_struct *mm,
810 struct vm_area_struct *prev, unsigned long addr,
811 unsigned long end, unsigned long vm_flags,
812 struct anon_vma *anon_vma, struct file *file,
813 pgoff_t pgoff, struct mempolicy *policy)
814{
815 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
816 struct vm_area_struct *area, *next;
Rik van Riel5beb4932010-03-05 13:42:07 -0800817 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
819 /*
820 * We later require that vma->vm_flags == vm_flags,
821 * so this tests vma->vm_flags & VM_SPECIAL, too.
822 */
823 if (vm_flags & VM_SPECIAL)
824 return NULL;
825
826 if (prev)
827 next = prev->vm_next;
828 else
829 next = mm->mmap;
830 area = next;
831 if (next && next->vm_end == end) /* cases 6, 7, 8 */
832 next = next->vm_next;
833
834 /*
835 * Can it merge with the predecessor?
836 */
837 if (prev && prev->vm_end == addr &&
838 mpol_equal(vma_policy(prev), policy) &&
839 can_vma_merge_after(prev, vm_flags,
840 anon_vma, file, pgoff)) {
841 /*
842 * OK, it can. Can we now merge in the successor as well?
843 */
844 if (next && end == next->vm_start &&
845 mpol_equal(policy, vma_policy(next)) &&
846 can_vma_merge_before(next, vm_flags,
847 anon_vma, file, pgoff+pglen) &&
848 is_mergeable_anon_vma(prev->anon_vma,
Shaohua Li965f55d2011-05-24 17:11:20 -0700849 next->anon_vma, NULL)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 /* cases 1, 6 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800851 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 next->vm_end, prev->vm_pgoff, NULL);
853 } else /* cases 2, 5, 7 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800854 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 end, prev->vm_pgoff, NULL);
Rik van Riel5beb4932010-03-05 13:42:07 -0800856 if (err)
857 return NULL;
Andrea Arcangelib15d00b2011-01-13 15:46:59 -0800858 khugepaged_enter_vma_merge(prev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return prev;
860 }
861
862 /*
863 * Can this new request be merged in front of next?
864 */
865 if (next && end == next->vm_start &&
866 mpol_equal(policy, vma_policy(next)) &&
867 can_vma_merge_before(next, vm_flags,
868 anon_vma, file, pgoff+pglen)) {
869 if (prev && addr < prev->vm_end) /* case 4 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800870 err = vma_adjust(prev, prev->vm_start,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 addr, prev->vm_pgoff, NULL);
872 else /* cases 3, 8 */
Rik van Riel5beb4932010-03-05 13:42:07 -0800873 err = vma_adjust(area, addr, next->vm_end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 next->vm_pgoff - pglen, NULL);
Rik van Riel5beb4932010-03-05 13:42:07 -0800875 if (err)
876 return NULL;
Andrea Arcangelib15d00b2011-01-13 15:46:59 -0800877 khugepaged_enter_vma_merge(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return area;
879 }
880
881 return NULL;
882}
883
884/*
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -0700885 * Rough compatbility check to quickly see if it's even worth looking
886 * at sharing an anon_vma.
887 *
888 * They need to have the same vm_file, and the flags can only differ
889 * in things that mprotect may change.
890 *
891 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
892 * we can merge the two vma's. For example, we refuse to merge a vma if
893 * there is a vm_ops->close() function, because that indicates that the
894 * driver is doing some kind of reference counting. But that doesn't
895 * really matter for the anon_vma sharing case.
896 */
897static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
898{
899 return a->vm_end == b->vm_start &&
900 mpol_equal(vma_policy(a), vma_policy(b)) &&
901 a->vm_file == b->vm_file &&
902 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
903 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
904}
905
906/*
907 * Do some basic sanity checking to see if we can re-use the anon_vma
908 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
909 * the same as 'old', the other will be the new one that is trying
910 * to share the anon_vma.
911 *
912 * NOTE! This runs with mm_sem held for reading, so it is possible that
913 * the anon_vma of 'old' is concurrently in the process of being set up
914 * by another page fault trying to merge _that_. But that's ok: if it
915 * is being set up, that automatically means that it will be a singleton
916 * acceptable for merging, so we can do all of this optimistically. But
917 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
918 *
919 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
920 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
921 * is to return an anon_vma that is "complex" due to having gone through
922 * a fork).
923 *
924 * We also make sure that the two vma's are compatible (adjacent,
925 * and with the same memory policies). That's all stable, even with just
926 * a read lock on the mm_sem.
927 */
928static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
929{
930 if (anon_vma_compatible(a, b)) {
931 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
932
933 if (anon_vma && list_is_singular(&old->anon_vma_chain))
934 return anon_vma;
935 }
936 return NULL;
937}
938
939/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
941 * neighbouring vmas for a suitable anon_vma, before it goes off
942 * to allocate a new anon_vma. It checks because a repetitive
943 * sequence of mprotects and faults may otherwise lead to distinct
944 * anon_vmas being allocated, preventing vma merge in subsequent
945 * mprotect.
946 */
947struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
948{
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -0700949 struct anon_vma *anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 struct vm_area_struct *near;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
952 near = vma->vm_next;
953 if (!near)
954 goto try_prev;
955
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -0700956 anon_vma = reusable_anon_vma(near, vma, near);
957 if (anon_vma)
958 return anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959try_prev:
Linus Torvalds9be34c92011-06-16 00:35:09 -0700960 near = vma->vm_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 if (!near)
962 goto none;
963
Linus Torvaldsd0e9fe12010-04-10 10:36:19 -0700964 anon_vma = reusable_anon_vma(near, near, vma);
965 if (anon_vma)
966 return anon_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967none:
968 /*
969 * There's no absolute need to look only at touching neighbours:
970 * we could search further afield for "compatible" anon_vmas.
971 * But it would probably just be a waste of time searching,
972 * or lead to too many vmas hanging off the same anon_vma.
973 * We're trying to allow mprotect remerging later on,
974 * not trying to minimize memory used for anon_vmas.
975 */
976 return NULL;
977}
978
979#ifdef CONFIG_PROC_FS
Hugh Dickinsab50b8e2005-10-29 18:15:56 -0700980void vm_stat_account(struct mm_struct *mm, unsigned long flags,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct file *file, long pages)
982{
983 const unsigned long stack_flags
984 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
985
Huang Shijie44de9d02012-07-31 16:41:49 -0700986 mm->total_vm += pages;
987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 if (file) {
989 mm->shared_vm += pages;
990 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
991 mm->exec_vm += pages;
992 } else if (flags & stack_flags)
993 mm->stack_vm += pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994}
995#endif /* CONFIG_PROC_FS */
996
997/*
Al Viro40401532012-02-13 03:58:52 +0000998 * If a hint addr is less than mmap_min_addr change hint to be as
999 * low as possible but still greater than mmap_min_addr
1000 */
1001static inline unsigned long round_hint_to_min(unsigned long hint)
1002{
1003 hint &= PAGE_MASK;
1004 if (((void *)hint != NULL) &&
1005 (hint < mmap_min_addr))
1006 return PAGE_ALIGN(mmap_min_addr);
1007 return hint;
1008}
1009
1010/*
Jianjun Kong27f5de72009-09-17 19:26:26 -07001011 * The caller must hold down_write(&current->mm->mmap_sem).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 */
1013
Al Viroe3fc6292012-05-30 20:08:42 -04001014unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 unsigned long len, unsigned long prot,
1016 unsigned long flags, unsigned long pgoff)
1017{
1018 struct mm_struct * mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 struct inode *inode;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001020 vm_flags_t vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 /*
1023 * Does the application expect PROT_READ to imply PROT_EXEC?
1024 *
1025 * (the exception is when the underlying filesystem is noexec
1026 * mounted, in which case we dont add PROT_EXEC.)
1027 */
1028 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -08001029 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 prot |= PROT_EXEC;
1031
1032 if (!len)
1033 return -EINVAL;
1034
Eric Paris7cd94142007-11-26 18:47:40 -05001035 if (!(flags & MAP_FIXED))
1036 addr = round_hint_to_min(addr);
1037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 /* Careful about overflows.. */
1039 len = PAGE_ALIGN(len);
Al Viro9206de92009-12-03 15:23:11 -05001040 if (!len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return -ENOMEM;
1042
1043 /* offset overflow? */
1044 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1045 return -EOVERFLOW;
1046
1047 /* Too many mappings? */
1048 if (mm->map_count > sysctl_max_map_count)
1049 return -ENOMEM;
1050
1051 /* Obtain the address to map to. we verify (or select) it and ensure
1052 * that it represents a valid section of the address space.
1053 */
1054 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1055 if (addr & ~PAGE_MASK)
1056 return addr;
1057
1058 /* Do simple checking here so the lower-level routines won't have
1059 * to. we assume access permissions have been handled by the open
1060 * of the memory object, so we don't do any here.
1061 */
1062 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1063 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1064
Huang Shijiecdf7b342009-09-21 17:03:36 -07001065 if (flags & MAP_LOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 if (!can_do_mlock())
1067 return -EPERM;
Rik van Rielba470de2008-10-18 20:26:50 -07001068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 /* mlock MCL_FUTURE? */
1070 if (vm_flags & VM_LOCKED) {
1071 unsigned long locked, lock_limit;
Chris Wright93ea1d02005-05-01 08:58:38 -07001072 locked = len >> PAGE_SHIFT;
1073 locked += mm->locked_vm;
Jiri Slaby59e99e52010-03-05 13:41:44 -08001074 lock_limit = rlimit(RLIMIT_MEMLOCK);
Chris Wright93ea1d02005-05-01 08:58:38 -07001075 lock_limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1077 return -EAGAIN;
1078 }
1079
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -08001080 inode = file ? file->f_path.dentry->d_inode : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 if (file) {
1083 switch (flags & MAP_TYPE) {
1084 case MAP_SHARED:
1085 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1086 return -EACCES;
1087
1088 /*
1089 * Make sure we don't allow writing to an append-only
1090 * file..
1091 */
1092 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1093 return -EACCES;
1094
1095 /*
1096 * Make sure there are no mandatory locks on the file.
1097 */
1098 if (locks_verify_locked(inode))
1099 return -EAGAIN;
1100
1101 vm_flags |= VM_SHARED | VM_MAYSHARE;
1102 if (!(file->f_mode & FMODE_WRITE))
1103 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1104
1105 /* fall through */
1106 case MAP_PRIVATE:
1107 if (!(file->f_mode & FMODE_READ))
1108 return -EACCES;
Josef "Jeff" Sipekd3ac7f82006-12-08 02:36:44 -08001109 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
Linus Torvalds80c56062006-10-15 14:09:55 -07001110 if (vm_flags & VM_EXEC)
1111 return -EPERM;
1112 vm_flags &= ~VM_MAYEXEC;
1113 }
Linus Torvalds80c56062006-10-15 14:09:55 -07001114
1115 if (!file->f_op || !file->f_op->mmap)
1116 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 break;
1118
1119 default:
1120 return -EINVAL;
1121 }
1122 } else {
1123 switch (flags & MAP_TYPE) {
1124 case MAP_SHARED:
Tejun Heoce363942008-09-03 16:09:47 +02001125 /*
1126 * Ignore pgoff.
1127 */
1128 pgoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 vm_flags |= VM_SHARED | VM_MAYSHARE;
1130 break;
1131 case MAP_PRIVATE:
1132 /*
1133 * Set pgoff according to addr for anon_vma.
1134 */
1135 pgoff = addr >> PAGE_SHIFT;
1136 break;
1137 default:
1138 return -EINVAL;
1139 }
1140 }
1141
Mel Gorman5a6fe122009-02-10 14:02:27 +00001142 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001143}
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001144
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001145SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1146 unsigned long, prot, unsigned long, flags,
1147 unsigned long, fd, unsigned long, pgoff)
1148{
1149 struct file *file = NULL;
1150 unsigned long retval = -EBADF;
1151
1152 if (!(flags & MAP_ANONYMOUS)) {
Al Viro120a7952010-10-30 02:54:44 -04001153 audit_mmap_fd(fd, flags);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001154 if (unlikely(flags & MAP_HUGETLB))
1155 return -EINVAL;
1156 file = fget(fd);
1157 if (!file)
1158 goto out;
1159 } else if (flags & MAP_HUGETLB) {
1160 struct user_struct *user = NULL;
1161 /*
1162 * VM_NORESERVE is used because the reservations will be
1163 * taken when vm_ops->mmap() is called
1164 * A dummy user value is used because we are not locking
1165 * memory so no accounting is necessary
1166 */
Steven Truelove40716e22012-03-21 16:34:14 -07001167 file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
1168 VM_NORESERVE, &user,
1169 HUGETLB_ANONHUGE_INODE);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001170 if (IS_ERR(file))
1171 return PTR_ERR(file);
1172 }
1173
1174 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1175
Al Viroeb36c582012-05-30 20:17:35 -04001176 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
Hugh Dickins66f0dc42009-12-30 20:17:34 +00001177 if (file)
1178 fput(file);
1179out:
1180 return retval;
1181}
1182
Christoph Hellwiga4679372010-03-10 15:21:15 -08001183#ifdef __ARCH_WANT_SYS_OLD_MMAP
1184struct mmap_arg_struct {
1185 unsigned long addr;
1186 unsigned long len;
1187 unsigned long prot;
1188 unsigned long flags;
1189 unsigned long fd;
1190 unsigned long offset;
1191};
1192
1193SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1194{
1195 struct mmap_arg_struct a;
1196
1197 if (copy_from_user(&a, arg, sizeof(a)))
1198 return -EFAULT;
1199 if (a.offset & ~PAGE_MASK)
1200 return -EINVAL;
1201
1202 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1203 a.offset >> PAGE_SHIFT);
1204}
1205#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1206
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001207/*
1208 * Some shared mappigns will want the pages marked read-only
1209 * to track write events. If so, we'll downgrade vm_page_prot
1210 * to the private version (using protection_map[] without the
1211 * VM_SHARED bit).
1212 */
1213int vma_wants_writenotify(struct vm_area_struct *vma)
1214{
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001215 vm_flags_t vm_flags = vma->vm_flags;
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001216
1217 /* If it was private or non-writable, the write bit is already clear */
1218 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1219 return 0;
1220
1221 /* The backer wishes to know when pages are first written to? */
1222 if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1223 return 1;
1224
1225 /* The open routine did something to the protections already? */
1226 if (pgprot_val(vma->vm_page_prot) !=
Coly Li3ed75eb2007-10-18 23:39:15 -07001227 pgprot_val(vm_get_page_prot(vm_flags)))
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001228 return 0;
1229
1230 /* Specialty mapping? */
Konstantin Khlebnikov4b6e1e32012-10-08 16:28:40 -07001231 if (vm_flags & VM_PFNMAP)
Alexey Dobriyan4e950f62007-07-30 02:36:13 +04001232 return 0;
1233
1234 /* Can the mapping track the dirty pages? */
1235 return vma->vm_file && vma->vm_file->f_mapping &&
1236 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1237}
1238
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001239/*
1240 * We account for memory if it's a private writeable mapping,
Mel Gorman5a6fe122009-02-10 14:02:27 +00001241 * not hugepages and VM_NORESERVE wasn't set.
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001242 */
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001243static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001244{
Mel Gorman5a6fe122009-02-10 14:02:27 +00001245 /*
1246 * hugetlb has its own accounting separate from the core VM
1247 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1248 */
1249 if (file && is_file_hugepages(file))
1250 return 0;
1251
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001252 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1253}
1254
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001255unsigned long mmap_region(struct file *file, unsigned long addr,
1256 unsigned long len, unsigned long flags,
KOSAKI Motohiroca16d142011-05-26 19:16:19 +09001257 vm_flags_t vm_flags, unsigned long pgoff)
Miklos Szeredi0165ab42007-07-15 23:38:26 -07001258{
1259 struct mm_struct *mm = current->mm;
1260 struct vm_area_struct *vma, *prev;
1261 int correct_wcount = 0;
1262 int error;
1263 struct rb_node **rb_link, *rb_parent;
1264 unsigned long charged = 0;
1265 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
1266
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 /* Clear old maps */
1268 error = -ENOMEM;
1269munmap_back:
Hugh Dickins6597d782012-10-08 16:29:07 -07001270 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 if (do_munmap(mm, addr, len))
1272 return -ENOMEM;
1273 goto munmap_back;
1274 }
1275
1276 /* Check against address space limit. */
akpm@osdl.org119f6572005-05-01 08:58:35 -07001277 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return -ENOMEM;
1279
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001280 /*
1281 * Set 'VM_NORESERVE' if we should not account for the
Mel Gorman5a6fe122009-02-10 14:02:27 +00001282 * memory use of this mapping.
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001283 */
Mel Gorman5a6fe122009-02-10 14:02:27 +00001284 if ((flags & MAP_NORESERVE)) {
1285 /* We honor MAP_NORESERVE if allowed to overcommit */
1286 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1287 vm_flags |= VM_NORESERVE;
1288
1289 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1290 if (file && is_file_hugepages(file))
1291 vm_flags |= VM_NORESERVE;
1292 }
Andy Whitcroftcdfd4322008-07-23 21:27:28 -07001293
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001294 /*
1295 * Private writable mapping: check memory availability
1296 */
Mel Gorman5a6fe122009-02-10 14:02:27 +00001297 if (accountable_mapping(file, vm_flags)) {
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001298 charged = len >> PAGE_SHIFT;
Al Viro191c5422012-02-13 03:58:52 +00001299 if (security_vm_enough_memory_mm(mm, charged))
Linus Torvaldsfc8744a2009-01-31 15:08:56 -08001300 return -ENOMEM;
1301 vm_flags |= VM_ACCOUNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 }
1303
1304 /*
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001305 * Can we just expand an old mapping?
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 */
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001307 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1308 if (vma)
1309 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
1311 /*
1312 * Determine the object being mapped and call the appropriate
1313 * specific mapper. the address has already been validated, but
1314 * not unmapped, but the maps are removed from the list.
1315 */
Pekka Enbergc5e3b832006-03-25 03:06:43 -08001316 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 if (!vma) {
1318 error = -ENOMEM;
1319 goto unacct_error;
1320 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
1322 vma->vm_mm = mm;
1323 vma->vm_start = addr;
1324 vma->vm_end = addr + len;
1325 vma->vm_flags = vm_flags;
Coly Li3ed75eb2007-10-18 23:39:15 -07001326 vma->vm_page_prot = vm_get_page_prot(vm_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 vma->vm_pgoff = pgoff;
Rik van Riel5beb4932010-03-05 13:42:07 -08001328 INIT_LIST_HEAD(&vma->anon_vma_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Hugh Dickinsce8fea72012-03-06 12:28:52 -08001330 error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 if (file) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1334 goto free_vma;
1335 if (vm_flags & VM_DENYWRITE) {
1336 error = deny_write_access(file);
1337 if (error)
1338 goto free_vma;
1339 correct_wcount = 1;
1340 }
Al Virocb0942b2012-08-27 14:48:26 -04001341 vma->vm_file = get_file(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 error = file->f_op->mmap(file, vma);
1343 if (error)
1344 goto unmap_and_free_vma;
Huang Shijief8dbf0a72009-09-21 17:03:41 -07001345
1346 /* Can addr have changed??
1347 *
1348 * Answer: Yes, several device drivers can do it in their
1349 * f_op->mmap method. -DaveM
1350 */
1351 addr = vma->vm_start;
1352 pgoff = vma->vm_pgoff;
1353 vm_flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 } else if (vm_flags & VM_SHARED) {
Al Viro835ee792012-03-05 06:39:47 +00001355 if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1356 goto free_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 error = shmem_zero_setup(vma);
1358 if (error)
1359 goto free_vma;
1360 }
1361
Magnus Dammc9d0bf22009-12-14 17:59:49 -08001362 if (vma_wants_writenotify(vma)) {
1363 pgprot_t pprot = vma->vm_page_prot;
1364
1365 /* Can vma->vm_page_prot have changed??
1366 *
1367 * Answer: Yes, drivers may have changed it in their
1368 * f_op->mmap method.
1369 *
1370 * Ensures that vmas marked as uncached stay that way.
1371 */
Hugh Dickins1ddd4392007-10-22 20:45:12 -07001372 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
Magnus Dammc9d0bf22009-12-14 17:59:49 -08001373 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1374 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1375 }
Peter Zijlstrad08b3852006-09-25 23:30:57 -07001376
Linus Torvaldsde33c8d2009-01-29 17:46:42 -08001377 vma_link(mm, vma, prev, rb_link, rb_parent);
1378 file = vma->vm_file;
Oleg Nesterov4d3d5b42008-04-28 02:12:10 -07001379
1380 /* Once vma denies write, undo our temporary denial count */
1381 if (correct_wcount)
1382 atomic_inc(&inode->i_writecount);
1383out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384 perf_event_mmap(vma);
Peter Zijlstra0a4a9392009-03-30 19:07:05 +02001385
Hugh Dickinsab50b8e2005-10-29 18:15:56 -07001386 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 if (vm_flags & VM_LOCKED) {
KOSAKI Motohiro06f9d8c2010-03-05 13:41:43 -08001388 if (!mlock_vma_pages_range(vma, addr, addr + len))
1389 mm->locked_vm += (len >> PAGE_SHIFT);
Rik van Rielba470de2008-10-18 20:26:50 -07001390 } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
Nick Piggin54cb8822007-07-19 01:46:59 -07001391 make_pages_present(addr, addr + len);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301392
Oleg Nesterovc7a3a882012-08-19 19:10:42 +02001393 if (file)
1394 uprobe_mmap(vma);
Srikar Dronamraju2b144492012-02-09 14:56:42 +05301395
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 return addr;
1397
1398unmap_and_free_vma:
1399 if (correct_wcount)
1400 atomic_inc(&inode->i_writecount);
1401 vma->vm_file = NULL;
1402 fput(file);
1403
1404 /* Undo any partial mapping done by a device driver. */
Hugh Dickinse0da3822005-04-19 13:29:15 -07001405 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1406 charged = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407free_vma:
1408 kmem_cache_free(vm_area_cachep, vma);
1409unacct_error:
1410 if (charged)
1411 vm_unacct_memory(charged);
1412 return error;
1413}
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415/* Get an address range which is currently unmapped.
1416 * For shmat() with addr=0.
1417 *
1418 * Ugly calling convention alert:
1419 * Return value with the low bits set means error value,
1420 * ie
1421 * if (ret & ~PAGE_MASK)
1422 * error = ret;
1423 *
1424 * This function "knows" that -ENOMEM has the bits set.
1425 */
1426#ifndef HAVE_ARCH_UNMAPPED_AREA
1427unsigned long
1428arch_get_unmapped_area(struct file *filp, unsigned long addr,
1429 unsigned long len, unsigned long pgoff, unsigned long flags)
1430{
1431 struct mm_struct *mm = current->mm;
1432 struct vm_area_struct *vma;
1433 unsigned long start_addr;
1434
1435 if (len > TASK_SIZE)
1436 return -ENOMEM;
1437
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001438 if (flags & MAP_FIXED)
1439 return addr;
1440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 if (addr) {
1442 addr = PAGE_ALIGN(addr);
1443 vma = find_vma(mm, addr);
1444 if (TASK_SIZE - len >= addr &&
1445 (!vma || addr + len <= vma->vm_start))
1446 return addr;
1447 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001448 if (len > mm->cached_hole_size) {
1449 start_addr = addr = mm->free_area_cache;
1450 } else {
1451 start_addr = addr = TASK_UNMAPPED_BASE;
1452 mm->cached_hole_size = 0;
1453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455full_search:
1456 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1457 /* At this point: (!vma || addr < vma->vm_end). */
1458 if (TASK_SIZE - len < addr) {
1459 /*
1460 * Start a new search - just in case we missed
1461 * some holes.
1462 */
1463 if (start_addr != TASK_UNMAPPED_BASE) {
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001464 addr = TASK_UNMAPPED_BASE;
1465 start_addr = addr;
1466 mm->cached_hole_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 goto full_search;
1468 }
1469 return -ENOMEM;
1470 }
1471 if (!vma || addr + len <= vma->vm_start) {
1472 /*
1473 * Remember the place where we stopped the search:
1474 */
1475 mm->free_area_cache = addr + len;
1476 return addr;
1477 }
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001478 if (addr + mm->cached_hole_size < vma->vm_start)
1479 mm->cached_hole_size = vma->vm_start - addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 addr = vma->vm_end;
1481 }
1482}
1483#endif
1484
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001485void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486{
1487 /*
1488 * Is this a new hole at the lowest possible address?
1489 */
Xiao Guangrongf44d2192012-03-21 16:33:56 -07001490 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001491 mm->free_area_cache = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492}
1493
1494/*
1495 * This mmap-allocator allocates new areas top-down from below the
1496 * stack's low limit (the base):
1497 */
1498#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1499unsigned long
1500arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1501 const unsigned long len, const unsigned long pgoff,
1502 const unsigned long flags)
1503{
1504 struct vm_area_struct *vma;
1505 struct mm_struct *mm = current->mm;
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001506 unsigned long addr = addr0, start_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
1508 /* requested length too big for entire address space */
1509 if (len > TASK_SIZE)
1510 return -ENOMEM;
1511
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001512 if (flags & MAP_FIXED)
1513 return addr;
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 /* requesting a specific address */
1516 if (addr) {
1517 addr = PAGE_ALIGN(addr);
1518 vma = find_vma(mm, addr);
1519 if (TASK_SIZE - len >= addr &&
1520 (!vma || addr + len <= vma->vm_start))
1521 return addr;
1522 }
1523
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001524 /* check if free_area_cache is useful for us */
1525 if (len <= mm->cached_hole_size) {
1526 mm->cached_hole_size = 0;
1527 mm->free_area_cache = mm->mmap_base;
1528 }
1529
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001530try_again:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 /* either no address requested or can't fit in requested address hole */
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001532 start_addr = addr = mm->free_area_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001534 if (addr < len)
1535 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001537 addr -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 do {
1539 /*
1540 * Lookup failure means no vma is above this address,
1541 * else if new region fits below vma->vm_start,
1542 * return with success:
1543 */
1544 vma = find_vma(mm, addr);
1545 if (!vma || addr+len <= vma->vm_start)
1546 /* remember the address as a hint for next time */
1547 return (mm->free_area_cache = addr);
1548
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001549 /* remember the largest hole we saw so far */
1550 if (addr + mm->cached_hole_size < vma->vm_start)
1551 mm->cached_hole_size = vma->vm_start - addr;
1552
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 /* try just below the current vma->vm_start */
1554 addr = vma->vm_start-len;
Linus Torvalds49a43872005-05-18 15:39:33 -07001555 } while (len < vma->vm_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
Xiao Guangrongb716ad92012-03-21 16:33:56 -07001557fail:
1558 /*
1559 * if hint left us with no space for the requested
1560 * mapping then try again:
1561 *
1562 * Note: this is different with the case of bottomup
1563 * which does the fully line-search, but we use find_vma
1564 * here that causes some holes skipped.
1565 */
1566 if (start_addr != mm->mmap_base) {
1567 mm->free_area_cache = mm->mmap_base;
1568 mm->cached_hole_size = 0;
1569 goto try_again;
1570 }
1571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 /*
1573 * A failed mmap() very likely causes application failure,
1574 * so fall back to the bottom-up function here. This scenario
1575 * can happen with large stack limits and large mmap()
1576 * allocations.
1577 */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001578 mm->cached_hole_size = ~0UL;
1579 mm->free_area_cache = TASK_UNMAPPED_BASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1581 /*
1582 * Restore the topdown base:
1583 */
1584 mm->free_area_cache = mm->mmap_base;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001585 mm->cached_hole_size = ~0UL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
1587 return addr;
1588}
1589#endif
1590
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001591void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592{
1593 /*
1594 * Is this a new hole at the highest possible address?
1595 */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001596 if (addr > mm->free_area_cache)
1597 mm->free_area_cache = addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
1599 /* dont allow allocations above current base */
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001600 if (mm->free_area_cache > mm->mmap_base)
1601 mm->free_area_cache = mm->mmap_base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602}
1603
1604unsigned long
1605get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1606 unsigned long pgoff, unsigned long flags)
1607{
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001608 unsigned long (*get_area)(struct file *, unsigned long,
1609 unsigned long, unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Al Viro9206de92009-12-03 15:23:11 -05001611 unsigned long error = arch_mmap_check(addr, len, flags);
1612 if (error)
1613 return error;
1614
1615 /* Careful about overflows.. */
1616 if (len > TASK_SIZE)
1617 return -ENOMEM;
1618
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001619 get_area = current->mm->get_unmapped_area;
1620 if (file && file->f_op && file->f_op->get_unmapped_area)
1621 get_area = file->f_op->get_unmapped_area;
1622 addr = get_area(file, addr, len, pgoff, flags);
1623 if (IS_ERR_VALUE(addr))
1624 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625
Linus Torvalds07ab67c2005-05-19 22:43:37 -07001626 if (addr > TASK_SIZE - len)
1627 return -ENOMEM;
1628 if (addr & ~PAGE_MASK)
1629 return -EINVAL;
Benjamin Herrenschmidt06abdfb2007-05-06 14:50:13 -07001630
Al Viro9ac4ed42012-05-30 17:13:15 -04001631 addr = arch_rebalance_pgtables(addr, len);
1632 error = security_mmap_addr(addr);
1633 return error ? error : addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634}
1635
1636EXPORT_SYMBOL(get_unmapped_area);
1637
1638/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
ZhenwenXu48aae422009-01-06 14:40:21 -08001639struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640{
1641 struct vm_area_struct *vma = NULL;
1642
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001643 if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
1644 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001646 /* Check the cache first. */
1647 /* (Cache hit rate is typically around 35%.) */
1648 vma = mm->mmap_cache;
1649 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1650 struct rb_node *rb_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001652 rb_node = mm->mm_rb.rb_node;
1653 vma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001655 while (rb_node) {
1656 struct vm_area_struct *vma_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001658 vma_tmp = rb_entry(rb_node,
1659 struct vm_area_struct, vm_rb);
1660
1661 if (vma_tmp->vm_end > addr) {
1662 vma = vma_tmp;
1663 if (vma_tmp->vm_start <= addr)
1664 break;
1665 rb_node = rb_node->rb_left;
1666 } else
1667 rb_node = rb_node->rb_right;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 }
Rajman Mekaco841e31e2012-05-29 15:06:21 -07001669 if (vma)
1670 mm->mmap_cache = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 }
1672 return vma;
1673}
1674
1675EXPORT_SYMBOL(find_vma);
1676
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08001677/*
1678 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08001679 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680struct vm_area_struct *
1681find_vma_prev(struct mm_struct *mm, unsigned long addr,
1682 struct vm_area_struct **pprev)
1683{
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08001684 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08001686 vma = find_vma(mm, addr);
Mikulas Patocka83cd9042012-03-04 19:52:03 -05001687 if (vma) {
1688 *pprev = vma->vm_prev;
1689 } else {
1690 struct rb_node *rb_node = mm->mm_rb.rb_node;
1691 *pprev = NULL;
1692 while (rb_node) {
1693 *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1694 rb_node = rb_node->rb_right;
1695 }
1696 }
KOSAKI Motohiro6bd48372012-01-10 15:08:07 -08001697 return vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698}
1699
1700/*
1701 * Verify that the stack growth is acceptable and
1702 * update accounting. This is shared with both the
1703 * grow-up and grow-down cases.
1704 */
ZhenwenXu48aae422009-01-06 14:40:21 -08001705static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
1707 struct mm_struct *mm = vma->vm_mm;
1708 struct rlimit *rlim = current->signal->rlim;
Adam Litke0d59a012007-01-30 14:35:39 -08001709 unsigned long new_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711 /* address space limit tests */
akpm@osdl.org119f6572005-05-01 08:58:35 -07001712 if (!may_expand_vm(mm, grow))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 return -ENOMEM;
1714
1715 /* Stack limit test */
Jiri Slaby59e99e52010-03-05 13:41:44 -08001716 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 return -ENOMEM;
1718
1719 /* mlock limit tests */
1720 if (vma->vm_flags & VM_LOCKED) {
1721 unsigned long locked;
1722 unsigned long limit;
1723 locked = mm->locked_vm + grow;
Jiri Slaby59e99e52010-03-05 13:41:44 -08001724 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
1725 limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 if (locked > limit && !capable(CAP_IPC_LOCK))
1727 return -ENOMEM;
1728 }
1729
Adam Litke0d59a012007-01-30 14:35:39 -08001730 /* Check to ensure the stack will not grow into a hugetlb-only region */
1731 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1732 vma->vm_end - size;
1733 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1734 return -EFAULT;
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 /*
1737 * Overcommit.. This must be the final test, as it will
1738 * update security statistics.
1739 */
Hugh Dickins05fa1992009-04-16 21:58:12 +01001740 if (security_vm_enough_memory_mm(mm, grow))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 return -ENOMEM;
1742
1743 /* Ok, everything looks good - let it rip */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 if (vma->vm_flags & VM_LOCKED)
1745 mm->locked_vm += grow;
Hugh Dickinsab50b8e2005-10-29 18:15:56 -07001746 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return 0;
1748}
1749
Hugh Dickins46dea3d2005-10-29 18:16:20 -07001750#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751/*
Hugh Dickins46dea3d2005-10-29 18:16:20 -07001752 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1753 * vma is the last one with address > vma->vm_end. Have to extend vma.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 */
Hugh Dickins46dea3d2005-10-29 18:16:20 -07001755int expand_upwards(struct vm_area_struct *vma, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756{
1757 int error;
1758
1759 if (!(vma->vm_flags & VM_GROWSUP))
1760 return -EFAULT;
1761
1762 /*
1763 * We must make sure the anon_vma is allocated
1764 * so that the anon_vma locking is not a noop.
1765 */
1766 if (unlikely(anon_vma_prepare(vma)))
1767 return -ENOMEM;
Rik van Rielbb4a3402010-08-09 17:18:37 -07001768 vma_lock_anon_vma(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 /*
1771 * vma->vm_start/vm_end cannot change under us because the caller
1772 * is required to hold the mmap_sem in read mode. We need the
1773 * anon_vma lock to serialize against concurrent expand_stacks.
Helge Deller06b32f32006-12-19 19:28:33 +01001774 * Also guard against wrapping around to address 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 */
Helge Deller06b32f32006-12-19 19:28:33 +01001776 if (address < PAGE_ALIGN(address+4))
1777 address = PAGE_ALIGN(address+4);
1778 else {
Rik van Rielbb4a3402010-08-09 17:18:37 -07001779 vma_unlock_anon_vma(vma);
Helge Deller06b32f32006-12-19 19:28:33 +01001780 return -ENOMEM;
1781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 error = 0;
1783
1784 /* Somebody else might have raced and expanded it already */
1785 if (address > vma->vm_end) {
1786 unsigned long size, grow;
1787
1788 size = address - vma->vm_start;
1789 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1790
Hugh Dickins42c36f62011-05-09 17:44:42 -07001791 error = -ENOMEM;
1792 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1793 error = acct_stack_growth(vma, size, grow);
1794 if (!error) {
Michel Lespinassebf181b92012-10-08 16:31:39 -07001795 anon_vma_interval_tree_pre_update_vma(vma);
Hugh Dickins42c36f62011-05-09 17:44:42 -07001796 vma->vm_end = address;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001797 anon_vma_interval_tree_post_update_vma(vma);
Hugh Dickins42c36f62011-05-09 17:44:42 -07001798 perf_event_mmap(vma);
1799 }
Eric B Munson3af9e852010-05-18 15:30:49 +01001800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 }
Rik van Rielbb4a3402010-08-09 17:18:37 -07001802 vma_unlock_anon_vma(vma);
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08001803 khugepaged_enter_vma_merge(vma);
Michel Lespinasseed8ea812012-10-08 16:31:45 -07001804 validate_mm(vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 return error;
1806}
Hugh Dickins46dea3d2005-10-29 18:16:20 -07001807#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809/*
1810 * vma is the first one with address < vma->vm_start. Have to extend vma.
1811 */
Michal Hockod05f3162011-05-24 17:11:44 -07001812int expand_downwards(struct vm_area_struct *vma,
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001813 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814{
1815 int error;
1816
1817 /*
1818 * We must make sure the anon_vma is allocated
1819 * so that the anon_vma locking is not a noop.
1820 */
1821 if (unlikely(anon_vma_prepare(vma)))
1822 return -ENOMEM;
Eric Paris88694772007-11-26 18:47:26 -05001823
1824 address &= PAGE_MASK;
Al Viroe5467852012-05-30 13:30:51 -04001825 error = security_mmap_addr(address);
Eric Paris88694772007-11-26 18:47:26 -05001826 if (error)
1827 return error;
1828
Rik van Rielbb4a3402010-08-09 17:18:37 -07001829 vma_lock_anon_vma(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
1831 /*
1832 * vma->vm_start/vm_end cannot change under us because the caller
1833 * is required to hold the mmap_sem in read mode. We need the
1834 * anon_vma lock to serialize against concurrent expand_stacks.
1835 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
1837 /* Somebody else might have raced and expanded it already */
1838 if (address < vma->vm_start) {
1839 unsigned long size, grow;
1840
1841 size = vma->vm_end - address;
1842 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1843
Linus Torvaldsa626ca62011-04-13 08:07:28 -07001844 error = -ENOMEM;
1845 if (grow <= vma->vm_pgoff) {
1846 error = acct_stack_growth(vma, size, grow);
1847 if (!error) {
Michel Lespinassebf181b92012-10-08 16:31:39 -07001848 anon_vma_interval_tree_pre_update_vma(vma);
Linus Torvaldsa626ca62011-04-13 08:07:28 -07001849 vma->vm_start = address;
1850 vma->vm_pgoff -= grow;
Michel Lespinassebf181b92012-10-08 16:31:39 -07001851 anon_vma_interval_tree_post_update_vma(vma);
Linus Torvaldsa626ca62011-04-13 08:07:28 -07001852 perf_event_mmap(vma);
1853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 }
1855 }
Rik van Rielbb4a3402010-08-09 17:18:37 -07001856 vma_unlock_anon_vma(vma);
Andrea Arcangelib15d00b2011-01-13 15:46:59 -08001857 khugepaged_enter_vma_merge(vma);
Michel Lespinasseed8ea812012-10-08 16:31:45 -07001858 validate_mm(vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 return error;
1860}
1861
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001862#ifdef CONFIG_STACK_GROWSUP
1863int expand_stack(struct vm_area_struct *vma, unsigned long address)
1864{
1865 return expand_upwards(vma, address);
1866}
1867
1868struct vm_area_struct *
1869find_extend_vma(struct mm_struct *mm, unsigned long addr)
1870{
1871 struct vm_area_struct *vma, *prev;
1872
1873 addr &= PAGE_MASK;
1874 vma = find_vma_prev(mm, addr, &prev);
1875 if (vma && (vma->vm_start <= addr))
1876 return vma;
Denys Vlasenko1c127182008-11-12 01:24:41 +01001877 if (!prev || expand_stack(prev, addr))
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001878 return NULL;
Rik van Rielba470de2008-10-18 20:26:50 -07001879 if (prev->vm_flags & VM_LOCKED) {
KOSAKI Motohiroc58267c2010-03-05 13:41:43 -08001880 mlock_vma_pages_range(prev, addr, prev->vm_end);
Rik van Rielba470de2008-10-18 20:26:50 -07001881 }
Ollie Wildb6a2fea2007-07-19 01:48:16 -07001882 return prev;
1883}
1884#else
1885int expand_stack(struct vm_area_struct *vma, unsigned long address)
1886{
1887 return expand_downwards(vma, address);
1888}
1889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890struct vm_area_struct *
1891find_extend_vma(struct mm_struct * mm, unsigned long addr)
1892{
1893 struct vm_area_struct * vma;
1894 unsigned long start;
1895
1896 addr &= PAGE_MASK;
1897 vma = find_vma(mm,addr);
1898 if (!vma)
1899 return NULL;
1900 if (vma->vm_start <= addr)
1901 return vma;
1902 if (!(vma->vm_flags & VM_GROWSDOWN))
1903 return NULL;
1904 start = vma->vm_start;
1905 if (expand_stack(vma, addr))
1906 return NULL;
Rik van Rielba470de2008-10-18 20:26:50 -07001907 if (vma->vm_flags & VM_LOCKED) {
KOSAKI Motohiroc58267c2010-03-05 13:41:43 -08001908 mlock_vma_pages_range(vma, addr, start);
Rik van Rielba470de2008-10-18 20:26:50 -07001909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 return vma;
1911}
1912#endif
1913
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914/*
Hugh Dickins2c0b3812005-10-29 18:15:56 -07001915 * Ok - we have the memory areas we should free on the vma list,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 * so release them, and do the vma updates.
Hugh Dickins2c0b3812005-10-29 18:15:56 -07001917 *
1918 * Called with the mm semaphore held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 */
Hugh Dickins2c0b3812005-10-29 18:15:56 -07001920static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001922 unsigned long nr_accounted = 0;
1923
Hugh Dickins365e9c872005-10-29 18:16:18 -07001924 /* Update high watermark before we lower total_vm */
1925 update_hiwater_vm(mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 do {
Hugh Dickins2c0b3812005-10-29 18:15:56 -07001927 long nrpages = vma_pages(vma);
1928
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001929 if (vma->vm_flags & VM_ACCOUNT)
1930 nr_accounted += nrpages;
Hugh Dickins2c0b3812005-10-29 18:15:56 -07001931 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -07001932 vma = remove_vma(vma);
Hugh Dickins146425a2005-04-19 13:29:18 -07001933 } while (vma);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001934 vm_unacct_memory(nr_accounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 validate_mm(mm);
1936}
1937
1938/*
1939 * Get rid of page table information in the indicated region.
1940 *
Paolo 'Blaisorblade' Giarrussof10df682005-09-21 09:55:37 -07001941 * Called with the mm semaphore held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 */
1943static void unmap_region(struct mm_struct *mm,
Hugh Dickinse0da3822005-04-19 13:29:15 -07001944 struct vm_area_struct *vma, struct vm_area_struct *prev,
1945 unsigned long start, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
Hugh Dickinse0da3822005-04-19 13:29:15 -07001947 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001948 struct mmu_gather tlb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 lru_add_drain();
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001951 tlb_gather_mmu(&tlb, mm, 0);
Hugh Dickins365e9c872005-10-29 18:16:18 -07001952 update_hiwater_rss(mm);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07001953 unmap_vmas(&tlb, vma, start, end);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07001954 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1955 next ? next->vm_start : 0);
1956 tlb_finish_mmu(&tlb, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
1959/*
1960 * Create a list of vma's touched by the unmap, removing them from the mm's
1961 * vma list as we go..
1962 */
1963static void
1964detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1965 struct vm_area_struct *prev, unsigned long end)
1966{
1967 struct vm_area_struct **insertion_point;
1968 struct vm_area_struct *tail_vma = NULL;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001969 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970
1971 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
Linus Torvalds297c5ee2010-08-20 16:24:55 -07001972 vma->vm_prev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 do {
1974 rb_erase(&vma->vm_rb, &mm->mm_rb);
1975 mm->map_count--;
1976 tail_vma = vma;
1977 vma = vma->vm_next;
1978 } while (vma && vma->vm_start < end);
1979 *insertion_point = vma;
Linus Torvalds297c5ee2010-08-20 16:24:55 -07001980 if (vma)
1981 vma->vm_prev = prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 tail_vma->vm_next = NULL;
Wolfgang Wander1363c3c2005-06-21 17:14:49 -07001983 if (mm->unmap_area == arch_unmap_area)
1984 addr = prev ? prev->vm_end : mm->mmap_base;
1985 else
1986 addr = vma ? vma->vm_start : mm->mmap_base;
1987 mm->unmap_area(mm, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 mm->mmap_cache = NULL; /* Kill the cache. */
1989}
1990
1991/*
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08001992 * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
1993 * munmap path where it doesn't make sense to fail.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 */
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08001995static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 unsigned long addr, int new_below)
1997{
1998 struct mempolicy *pol;
1999 struct vm_area_struct *new;
Rik van Riel5beb4932010-03-05 13:42:07 -08002000 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
Andi Kleena5516432008-07-23 21:27:41 -07002002 if (is_vm_hugetlb_page(vma) && (addr &
2003 ~(huge_page_mask(hstate_vma(vma)))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 return -EINVAL;
2005
Christoph Lametere94b1762006-12-06 20:33:17 -08002006 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 if (!new)
Rik van Riel5beb4932010-03-05 13:42:07 -08002008 goto out_err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
2010 /* most fields are the same, copy all, and then fixup */
2011 *new = *vma;
2012
Rik van Riel5beb4932010-03-05 13:42:07 -08002013 INIT_LIST_HEAD(&new->anon_vma_chain);
2014
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 if (new_below)
2016 new->vm_end = addr;
2017 else {
2018 new->vm_start = addr;
2019 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2020 }
2021
Lee Schermerhorn846a16b2008-04-28 02:13:09 -07002022 pol = mpol_dup(vma_policy(vma));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 if (IS_ERR(pol)) {
Rik van Riel5beb4932010-03-05 13:42:07 -08002024 err = PTR_ERR(pol);
2025 goto out_free_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 }
2027 vma_set_policy(new, pol);
2028
Rik van Riel5beb4932010-03-05 13:42:07 -08002029 if (anon_vma_clone(new, vma))
2030 goto out_free_mpol;
2031
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002032 if (new->vm_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 get_file(new->vm_file);
2034
2035 if (new->vm_ops && new->vm_ops->open)
2036 new->vm_ops->open(new);
2037
2038 if (new_below)
Rik van Riel5beb4932010-03-05 13:42:07 -08002039 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 ((addr - new->vm_start) >> PAGE_SHIFT), new);
2041 else
Rik van Riel5beb4932010-03-05 13:42:07 -08002042 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
Rik van Riel5beb4932010-03-05 13:42:07 -08002044 /* Success. */
2045 if (!err)
2046 return 0;
2047
2048 /* Clean everything up if vma_adjust failed. */
Rik van Riel58927532010-04-26 12:33:03 -04002049 if (new->vm_ops && new->vm_ops->close)
2050 new->vm_ops->close(new);
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002051 if (new->vm_file)
Rik van Riel5beb4932010-03-05 13:42:07 -08002052 fput(new->vm_file);
Andrea Arcangeli2aeadc32010-09-22 13:05:12 -07002053 unlink_anon_vmas(new);
Rik van Riel5beb4932010-03-05 13:42:07 -08002054 out_free_mpol:
2055 mpol_put(pol);
2056 out_free_vma:
2057 kmem_cache_free(vm_area_cachep, new);
2058 out_err:
2059 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060}
2061
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002062/*
2063 * Split a vma into two pieces at address 'addr', a new vma is allocated
2064 * either for the first part or the tail.
2065 */
2066int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2067 unsigned long addr, int new_below)
2068{
2069 if (mm->map_count >= sysctl_max_map_count)
2070 return -ENOMEM;
2071
2072 return __split_vma(mm, vma, addr, new_below);
2073}
2074
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075/* Munmap is split into 2 main parts -- this part which finds
2076 * what needs doing, and the areas themselves, which do the
2077 * work. This now handles partial unmappings.
2078 * Jeremy Fitzhardinge <jeremy@goop.org>
2079 */
2080int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2081{
2082 unsigned long end;
Hugh Dickins146425a2005-04-19 13:29:18 -07002083 struct vm_area_struct *vma, *prev, *last;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2086 return -EINVAL;
2087
2088 if ((len = PAGE_ALIGN(len)) == 0)
2089 return -EINVAL;
2090
2091 /* Find the first overlapping VMA */
Linus Torvalds9be34c92011-06-16 00:35:09 -07002092 vma = find_vma(mm, start);
Hugh Dickins146425a2005-04-19 13:29:18 -07002093 if (!vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 return 0;
Linus Torvalds9be34c92011-06-16 00:35:09 -07002095 prev = vma->vm_prev;
Hugh Dickins146425a2005-04-19 13:29:18 -07002096 /* we have start < vma->vm_end */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 /* if it doesn't overlap, we have nothing.. */
2099 end = start + len;
Hugh Dickins146425a2005-04-19 13:29:18 -07002100 if (vma->vm_start >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return 0;
2102
2103 /*
2104 * If we need to split any vma, do it now to save pain later.
2105 *
2106 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2107 * unmapped vm_area_struct will remain in use: so lower split_vma
2108 * places tmp vma above, and higher split_vma places tmp vma below.
2109 */
Hugh Dickins146425a2005-04-19 13:29:18 -07002110 if (start > vma->vm_start) {
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002111 int error;
2112
2113 /*
2114 * Make sure that map_count on return from munmap() will
2115 * not exceed its limit; but let map_count go just above
2116 * its limit temporarily, to help free resources as expected.
2117 */
2118 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2119 return -ENOMEM;
2120
2121 error = __split_vma(mm, vma, start, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 if (error)
2123 return error;
Hugh Dickins146425a2005-04-19 13:29:18 -07002124 prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 }
2126
2127 /* Does it split the last one? */
2128 last = find_vma(mm, end);
2129 if (last && end > last->vm_start) {
KOSAKI Motohiro659ace52009-12-14 17:57:56 -08002130 int error = __split_vma(mm, last, end, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 if (error)
2132 return error;
2133 }
Hugh Dickins146425a2005-04-19 13:29:18 -07002134 vma = prev? prev->vm_next: mm->mmap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 /*
Rik van Rielba470de2008-10-18 20:26:50 -07002137 * unlock any mlock()ed ranges before detaching vmas
2138 */
2139 if (mm->locked_vm) {
2140 struct vm_area_struct *tmp = vma;
2141 while (tmp && tmp->vm_start < end) {
2142 if (tmp->vm_flags & VM_LOCKED) {
2143 mm->locked_vm -= vma_pages(tmp);
2144 munlock_vma_pages_all(tmp);
2145 }
2146 tmp = tmp->vm_next;
2147 }
2148 }
2149
2150 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 * Remove the vma's, and unmap the actual pages
2152 */
Hugh Dickins146425a2005-04-19 13:29:18 -07002153 detach_vmas_to_be_unmapped(mm, vma, prev, end);
2154 unmap_region(mm, vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 /* Fix up all other VM information */
Hugh Dickins2c0b3812005-10-29 18:15:56 -07002157 remove_vma_list(mm, vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
2159 return 0;
2160}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
Al Virobfce2812012-04-20 21:57:04 -04002162int vm_munmap(unsigned long start, size_t len)
Linus Torvaldsa46ef992012-04-20 16:20:01 -07002163{
2164 int ret;
Al Virobfce2812012-04-20 21:57:04 -04002165 struct mm_struct *mm = current->mm;
Linus Torvaldsa46ef992012-04-20 16:20:01 -07002166
2167 down_write(&mm->mmap_sem);
2168 ret = do_munmap(mm, start, len);
2169 up_write(&mm->mmap_sem);
2170 return ret;
2171}
2172EXPORT_SYMBOL(vm_munmap);
2173
Heiko Carstens6a6160a2009-01-14 14:14:15 +01002174SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 profile_munmap(addr);
Al Virobfce2812012-04-20 21:57:04 -04002177 return vm_munmap(addr, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178}
2179
2180static inline void verify_mm_writelocked(struct mm_struct *mm)
2181{
Paul E. McKenneya241ec62005-10-30 15:03:12 -08002182#ifdef CONFIG_DEBUG_VM
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2184 WARN_ON(1);
2185 up_read(&mm->mmap_sem);
2186 }
2187#endif
2188}
2189
2190/*
2191 * this is really a simplified "do_mmap". it only handles
2192 * anonymous maps. eventually we may be able to do some
2193 * brk-specific accounting here.
2194 */
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002195static unsigned long do_brk(unsigned long addr, unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196{
2197 struct mm_struct * mm = current->mm;
2198 struct vm_area_struct * vma, * prev;
2199 unsigned long flags;
2200 struct rb_node ** rb_link, * rb_parent;
2201 pgoff_t pgoff = addr >> PAGE_SHIFT;
Kirill Korotaev3a459752006-09-07 14:17:04 +04002202 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
2204 len = PAGE_ALIGN(len);
2205 if (!len)
2206 return addr;
2207
Kirill Korotaev3a459752006-09-07 14:17:04 +04002208 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2209
Al Viro2c6a1012009-12-03 19:40:46 -05002210 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2211 if (error & ~PAGE_MASK)
Kirill Korotaev3a459752006-09-07 14:17:04 +04002212 return error;
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 /*
2215 * mlock MCL_FUTURE?
2216 */
2217 if (mm->def_flags & VM_LOCKED) {
2218 unsigned long locked, lock_limit;
Chris Wright93ea1d02005-05-01 08:58:38 -07002219 locked = len >> PAGE_SHIFT;
2220 locked += mm->locked_vm;
Jiri Slaby59e99e52010-03-05 13:41:44 -08002221 lock_limit = rlimit(RLIMIT_MEMLOCK);
Chris Wright93ea1d02005-05-01 08:58:38 -07002222 lock_limit >>= PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2224 return -EAGAIN;
2225 }
2226
2227 /*
2228 * mm->mmap_sem is required to protect against another thread
2229 * changing the mappings in case we sleep.
2230 */
2231 verify_mm_writelocked(mm);
2232
2233 /*
2234 * Clear old maps. this also does some error checking for us
2235 */
2236 munmap_back:
Hugh Dickins6597d782012-10-08 16:29:07 -07002237 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 if (do_munmap(mm, addr, len))
2239 return -ENOMEM;
2240 goto munmap_back;
2241 }
2242
2243 /* Check against address space limits *after* clearing old maps... */
akpm@osdl.org119f6572005-05-01 08:58:35 -07002244 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 return -ENOMEM;
2246
2247 if (mm->map_count > sysctl_max_map_count)
2248 return -ENOMEM;
2249
Al Viro191c5422012-02-13 03:58:52 +00002250 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 return -ENOMEM;
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 /* Can we just expand an old private anonymous mapping? */
Rik van Rielba470de2008-10-18 20:26:50 -07002254 vma = vma_merge(mm, prev, addr, addr + len, flags,
2255 NULL, NULL, pgoff, NULL);
2256 if (vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 goto out;
2258
2259 /*
2260 * create a vma struct for an anonymous mapping
2261 */
Pekka Enbergc5e3b832006-03-25 03:06:43 -08002262 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 if (!vma) {
2264 vm_unacct_memory(len >> PAGE_SHIFT);
2265 return -ENOMEM;
2266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
Rik van Riel5beb4932010-03-05 13:42:07 -08002268 INIT_LIST_HEAD(&vma->anon_vma_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 vma->vm_mm = mm;
2270 vma->vm_start = addr;
2271 vma->vm_end = addr + len;
2272 vma->vm_pgoff = pgoff;
2273 vma->vm_flags = flags;
Coly Li3ed75eb2007-10-18 23:39:15 -07002274 vma->vm_page_prot = vm_get_page_prot(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 vma_link(mm, vma, prev, rb_link, rb_parent);
2276out:
Eric B Munson3af9e852010-05-18 15:30:49 +01002277 perf_event_mmap(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 mm->total_vm += len >> PAGE_SHIFT;
2279 if (flags & VM_LOCKED) {
Rik van Rielba470de2008-10-18 20:26:50 -07002280 if (!mlock_vma_pages_range(vma, addr, addr + len))
2281 mm->locked_vm += (len >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 }
2283 return addr;
2284}
2285
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07002286unsigned long vm_brk(unsigned long addr, unsigned long len)
2287{
2288 struct mm_struct *mm = current->mm;
2289 unsigned long ret;
2290
2291 down_write(&mm->mmap_sem);
2292 ret = do_brk(addr, len);
2293 up_write(&mm->mmap_sem);
2294 return ret;
2295}
2296EXPORT_SYMBOL(vm_brk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298/* Release all mmaps. */
2299void exit_mmap(struct mm_struct *mm)
2300{
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002301 struct mmu_gather tlb;
Rik van Rielba470de2008-10-18 20:26:50 -07002302 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 unsigned long nr_accounted = 0;
2304
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +02002305 /* mm's last user has gone, and its about to be pulled down */
Andrea Arcangelicddb8a52008-07-28 15:46:29 -07002306 mmu_notifier_release(mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +02002307
Rik van Rielba470de2008-10-18 20:26:50 -07002308 if (mm->locked_vm) {
2309 vma = mm->mmap;
2310 while (vma) {
2311 if (vma->vm_flags & VM_LOCKED)
2312 munlock_vma_pages_all(vma);
2313 vma = vma->vm_next;
2314 }
2315 }
Jeremy Fitzhardinge9480c532009-02-11 13:04:41 -08002316
2317 arch_exit_mmap(mm);
2318
Rik van Rielba470de2008-10-18 20:26:50 -07002319 vma = mm->mmap;
Jeremy Fitzhardinge9480c532009-02-11 13:04:41 -08002320 if (!vma) /* Can happen if dup_mmap() received an OOM */
2321 return;
2322
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 lru_add_drain();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 flush_cache_mm(mm);
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002325 tlb_gather_mmu(&tlb, mm, 1);
Oleg Nesterov901608d2009-01-06 14:40:29 -08002326 /* update_hiwater_rss(mm) here? but nobody should be looking */
Hugh Dickinse0da3822005-04-19 13:29:15 -07002327 /* Use -1 here to ensure all VMAs in the mm are unmapped */
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002328 unmap_vmas(&tlb, vma, 0, -1);
Hugh Dickins9ba69292009-09-21 17:02:20 -07002329
Peter Zijlstrad16dfc52011-05-24 17:11:45 -07002330 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
Al Viro853f5e22012-03-05 14:03:47 -05002331 tlb_finish_mmu(&tlb, 0, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 /*
Hugh Dickins8f4f8c12005-10-29 18:16:29 -07002334 * Walk the list again, actually closing and freeing it,
2335 * with preemption enabled, without holding any MM locks.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 */
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002337 while (vma) {
2338 if (vma->vm_flags & VM_ACCOUNT)
2339 nr_accounted += vma_pages(vma);
Hugh Dickinsa8fb5612005-10-29 18:15:57 -07002340 vma = remove_vma(vma);
Linus Torvalds4f74d2c2012-05-06 13:54:06 -07002341 }
2342 vm_unacct_memory(nr_accounted);
Hugh Dickinse0da3822005-04-19 13:29:15 -07002343
Hugh Dickinsf9aed622012-08-21 16:15:45 -07002344 WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345}
2346
2347/* Insert vm structure into process list sorted by address
2348 * and into the inode's i_mmap tree. If vm_file is non-NULL
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07002349 * then i_mmap_mutex is taken here.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 */
Hugh Dickins6597d782012-10-08 16:29:07 -07002351int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352{
Hugh Dickins6597d782012-10-08 16:29:07 -07002353 struct vm_area_struct *prev;
2354 struct rb_node **rb_link, *rb_parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
2356 /*
2357 * The vm_pgoff of a purely anonymous vma should be irrelevant
2358 * until its first write fault, when page's anon_vma and index
2359 * are set. But now set the vm_pgoff it will almost certainly
2360 * end up with (unless mremap moves it elsewhere before that
2361 * first wfault), so /proc/pid/maps tells a consistent story.
2362 *
2363 * By setting it to reflect the virtual start address of the
2364 * vma, merges and splits can happen in a seamless way, just
2365 * using the existing file pgoff checks and manipulations.
2366 * Similarly in do_mmap_pgoff and in do_brk.
2367 */
2368 if (!vma->vm_file) {
2369 BUG_ON(vma->anon_vma);
2370 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2371 }
Hugh Dickins6597d782012-10-08 16:29:07 -07002372 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2373 &prev, &rb_link, &rb_parent))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 return -ENOMEM;
Hugh Dickins2fd4ef82005-09-14 06:13:02 +01002375 if ((vma->vm_flags & VM_ACCOUNT) &&
Alan Cox34b4e4a2007-08-22 14:01:28 -07002376 security_vm_enough_memory_mm(mm, vma_pages(vma)))
Hugh Dickins2fd4ef82005-09-14 06:13:02 +01002377 return -ENOMEM;
Srikar Dronamraju2b144492012-02-09 14:56:42 +05302378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 vma_link(mm, vma, prev, rb_link, rb_parent);
2380 return 0;
2381}
2382
2383/*
2384 * Copy the vma structure to a new location in the same mm,
2385 * prior to moving page table entries, to effect an mremap move.
2386 */
2387struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
Michel Lespinasse38a76012012-10-08 16:31:50 -07002388 unsigned long addr, unsigned long len, pgoff_t pgoff,
2389 bool *need_rmap_locks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390{
2391 struct vm_area_struct *vma = *vmap;
2392 unsigned long vma_start = vma->vm_start;
2393 struct mm_struct *mm = vma->vm_mm;
2394 struct vm_area_struct *new_vma, *prev;
2395 struct rb_node **rb_link, *rb_parent;
2396 struct mempolicy *pol;
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002397 bool faulted_in_anon_vma = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
2399 /*
2400 * If anonymous vma has not yet been faulted, update new pgoff
2401 * to match new location, to increase its chance of merging.
2402 */
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002403 if (unlikely(!vma->vm_file && !vma->anon_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 pgoff = addr >> PAGE_SHIFT;
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002405 faulted_in_anon_vma = false;
2406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
Hugh Dickins6597d782012-10-08 16:29:07 -07002408 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2409 return NULL; /* should never get here */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2411 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2412 if (new_vma) {
2413 /*
2414 * Source vma may have been merged into new_vma
2415 */
Andrea Arcangeli948f0172012-01-10 15:08:05 -08002416 if (unlikely(vma_start >= new_vma->vm_start &&
2417 vma_start < new_vma->vm_end)) {
2418 /*
2419 * The only way we can get a vma_merge with
2420 * self during an mremap is if the vma hasn't
2421 * been faulted in yet and we were allowed to
2422 * reset the dst vma->vm_pgoff to the
2423 * destination address of the mremap to allow
2424 * the merge to happen. mremap must change the
2425 * vm_pgoff linearity between src and dst vmas
2426 * (in turn preventing a vma_merge) to be
2427 * safe. It is only safe to keep the vm_pgoff
2428 * linear if there are no pages mapped yet.
2429 */
2430 VM_BUG_ON(faulted_in_anon_vma);
Michel Lespinasse38a76012012-10-08 16:31:50 -07002431 *vmap = vma = new_vma;
Michel Lespinasse108d6642012-10-08 16:31:36 -07002432 }
Michel Lespinasse38a76012012-10-08 16:31:50 -07002433 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 } else {
Christoph Lametere94b1762006-12-06 20:33:17 -08002435 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 if (new_vma) {
2437 *new_vma = *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 new_vma->vm_start = addr;
2439 new_vma->vm_end = addr + len;
2440 new_vma->vm_pgoff = pgoff;
Michel Lespinasse523d4e22012-10-08 16:31:48 -07002441 pol = mpol_dup(vma_policy(vma));
2442 if (IS_ERR(pol))
2443 goto out_free_vma;
2444 vma_set_policy(new_vma, pol);
2445 INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2446 if (anon_vma_clone(new_vma, vma))
2447 goto out_free_mempol;
Konstantin Khlebnikove9714ac2012-10-08 16:28:54 -07002448 if (new_vma->vm_file)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 get_file(new_vma->vm_file);
2450 if (new_vma->vm_ops && new_vma->vm_ops->open)
2451 new_vma->vm_ops->open(new_vma);
2452 vma_link(mm, new_vma, prev, rb_link, rb_parent);
Michel Lespinasse38a76012012-10-08 16:31:50 -07002453 *need_rmap_locks = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 }
2455 }
2456 return new_vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08002457
2458 out_free_mempol:
2459 mpol_put(pol);
2460 out_free_vma:
2461 kmem_cache_free(vm_area_cachep, new_vma);
2462 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463}
akpm@osdl.org119f6572005-05-01 08:58:35 -07002464
2465/*
2466 * Return true if the calling process may expand its vm space by the passed
2467 * number of pages
2468 */
2469int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2470{
2471 unsigned long cur = mm->total_vm; /* pages */
2472 unsigned long lim;
2473
Jiri Slaby59e99e52010-03-05 13:41:44 -08002474 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
akpm@osdl.org119f6572005-05-01 08:58:35 -07002475
2476 if (cur + npages > lim)
2477 return 0;
2478 return 1;
2479}
Roland McGrathfa5dc222007-02-08 14:20:41 -08002480
2481
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002482static int special_mapping_fault(struct vm_area_struct *vma,
2483 struct vm_fault *vmf)
Roland McGrathfa5dc222007-02-08 14:20:41 -08002484{
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002485 pgoff_t pgoff;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002486 struct page **pages;
2487
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002488 /*
2489 * special mappings have no vm_file, and in that case, the mm
2490 * uses vm_pgoff internally. So we have to subtract it from here.
2491 * We are allowed to do this because we are the mm; do not copy
2492 * this code into drivers!
2493 */
2494 pgoff = vmf->pgoff - vma->vm_pgoff;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002495
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002496 for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2497 pgoff--;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002498
2499 if (*pages) {
2500 struct page *page = *pages;
2501 get_page(page);
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002502 vmf->page = page;
2503 return 0;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002504 }
2505
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002506 return VM_FAULT_SIGBUS;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002507}
2508
2509/*
2510 * Having a close hook prevents vma merging regardless of flags.
2511 */
2512static void special_mapping_close(struct vm_area_struct *vma)
2513{
2514}
2515
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04002516static const struct vm_operations_struct special_mapping_vmops = {
Roland McGrathfa5dc222007-02-08 14:20:41 -08002517 .close = special_mapping_close,
Nick Pigginb1d0e4f2008-02-09 01:15:19 +01002518 .fault = special_mapping_fault,
Roland McGrathfa5dc222007-02-08 14:20:41 -08002519};
2520
2521/*
2522 * Called with mm->mmap_sem held for writing.
2523 * Insert a new vma covering the given region, with the given flags.
2524 * Its pages are supplied by the given array of struct page *.
2525 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2526 * The region past the last page supplied will always produce SIGBUS.
2527 * The array pointer and the pages it points to are assumed to stay alive
2528 * for as long as this mapping might exist.
2529 */
2530int install_special_mapping(struct mm_struct *mm,
2531 unsigned long addr, unsigned long len,
2532 unsigned long vm_flags, struct page **pages)
2533{
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002534 int ret;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002535 struct vm_area_struct *vma;
2536
2537 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2538 if (unlikely(vma == NULL))
2539 return -ENOMEM;
2540
Rik van Riel5beb4932010-03-05 13:42:07 -08002541 INIT_LIST_HEAD(&vma->anon_vma_chain);
Roland McGrathfa5dc222007-02-08 14:20:41 -08002542 vma->vm_mm = mm;
2543 vma->vm_start = addr;
2544 vma->vm_end = addr + len;
2545
Nick Piggin2f987352008-02-02 03:08:53 +01002546 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
Coly Li3ed75eb2007-10-18 23:39:15 -07002547 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
Roland McGrathfa5dc222007-02-08 14:20:41 -08002548
2549 vma->vm_ops = &special_mapping_vmops;
2550 vma->vm_private_data = pages;
2551
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002552 ret = insert_vm_struct(mm, vma);
2553 if (ret)
2554 goto out;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002555
2556 mm->total_vm += len >> PAGE_SHIFT;
2557
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002558 perf_event_mmap(vma);
Peter Zijlstra089dd792009-06-05 14:04:55 +02002559
Roland McGrathfa5dc222007-02-08 14:20:41 -08002560 return 0;
Tavis Ormandy462e635e2010-12-09 15:29:42 +01002561
2562out:
2563 kmem_cache_free(vm_area_cachep, vma);
2564 return ret;
Roland McGrathfa5dc222007-02-08 14:20:41 -08002565}
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002566
2567static DEFINE_MUTEX(mm_all_locks_mutex);
2568
Peter Zijlstra454ed842008-08-11 09:30:25 +02002569static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002570{
Michel Lespinassebf181b92012-10-08 16:31:39 -07002571 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002572 /*
2573 * The LSB of head.next can't change from under us
2574 * because we hold the mm_all_locks_mutex.
2575 */
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07002576 mutex_lock_nest_lock(&anon_vma->root->mutex, &mm->mmap_sem);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002577 /*
2578 * We can safely modify head.next after taking the
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07002579 * anon_vma->root->mutex. If some other vma in this mm shares
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002580 * the same anon_vma we won't take it again.
2581 *
2582 * No need of atomic instructions here, head.next
2583 * can't change from under us thanks to the
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07002584 * anon_vma->root->mutex.
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002585 */
2586 if (__test_and_set_bit(0, (unsigned long *)
Michel Lespinassebf181b92012-10-08 16:31:39 -07002587 &anon_vma->root->rb_root.rb_node))
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002588 BUG();
2589 }
2590}
2591
Peter Zijlstra454ed842008-08-11 09:30:25 +02002592static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002593{
2594 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2595 /*
2596 * AS_MM_ALL_LOCKS can't change from under us because
2597 * we hold the mm_all_locks_mutex.
2598 *
2599 * Operations on ->flags have to be atomic because
2600 * even if AS_MM_ALL_LOCKS is stable thanks to the
2601 * mm_all_locks_mutex, there may be other cpus
2602 * changing other bitflags in parallel to us.
2603 */
2604 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2605 BUG();
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07002606 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002607 }
2608}
2609
2610/*
2611 * This operation locks against the VM for all pte/vma/mm related
2612 * operations that could ever happen on a certain mm. This includes
2613 * vmtruncate, try_to_unmap, and all page faults.
2614 *
2615 * The caller must take the mmap_sem in write mode before calling
2616 * mm_take_all_locks(). The caller isn't allowed to release the
2617 * mmap_sem until mm_drop_all_locks() returns.
2618 *
2619 * mmap_sem in write mode is required in order to block all operations
2620 * that could modify pagetables and free pages without need of
2621 * altering the vma layout (for example populate_range() with
2622 * nonlinear vmas). It's also needed in write mode to avoid new
2623 * anon_vmas to be associated with existing vmas.
2624 *
2625 * A single task can't take more than one mm_take_all_locks() in a row
2626 * or it would deadlock.
2627 *
Michel Lespinassebf181b92012-10-08 16:31:39 -07002628 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002629 * mapping->flags avoid to take the same lock twice, if more than one
2630 * vma in this mm is backed by the same anon_vma or address_space.
2631 *
2632 * We can take all the locks in random order because the VM code
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07002633 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002634 * takes more than one of them in a row. Secondly we're protected
2635 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2636 *
2637 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2638 * that may have to take thousand of locks.
2639 *
2640 * mm_take_all_locks() can fail if it's interrupted by signals.
2641 */
2642int mm_take_all_locks(struct mm_struct *mm)
2643{
2644 struct vm_area_struct *vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08002645 struct anon_vma_chain *avc;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002646
2647 BUG_ON(down_read_trylock(&mm->mmap_sem));
2648
2649 mutex_lock(&mm_all_locks_mutex);
2650
2651 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2652 if (signal_pending(current))
2653 goto out_unlock;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002654 if (vma->vm_file && vma->vm_file->f_mapping)
Peter Zijlstra454ed842008-08-11 09:30:25 +02002655 vm_lock_mapping(mm, vma->vm_file->f_mapping);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002656 }
Peter Zijlstra7cd5a022008-08-11 09:30:25 +02002657
2658 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2659 if (signal_pending(current))
2660 goto out_unlock;
2661 if (vma->anon_vma)
Rik van Riel5beb4932010-03-05 13:42:07 -08002662 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2663 vm_lock_anon_vma(mm, avc->anon_vma);
Peter Zijlstra7cd5a022008-08-11 09:30:25 +02002664 }
2665
Kautuk Consul584cff52011-10-31 17:08:59 -07002666 return 0;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002667
2668out_unlock:
Kautuk Consul584cff52011-10-31 17:08:59 -07002669 mm_drop_all_locks(mm);
2670 return -EINTR;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002671}
2672
2673static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2674{
Michel Lespinassebf181b92012-10-08 16:31:39 -07002675 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002676 /*
2677 * The LSB of head.next can't change to 0 from under
2678 * us because we hold the mm_all_locks_mutex.
2679 *
2680 * We must however clear the bitflag before unlocking
Michel Lespinassebf181b92012-10-08 16:31:39 -07002681 * the vma so the users using the anon_vma->rb_root will
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002682 * never see our bitflag.
2683 *
2684 * No need of atomic instructions here, head.next
2685 * can't change from under us until we release the
Peter Zijlstra2b575eb2011-05-24 17:12:11 -07002686 * anon_vma->root->mutex.
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002687 */
2688 if (!__test_and_clear_bit(0, (unsigned long *)
Michel Lespinassebf181b92012-10-08 16:31:39 -07002689 &anon_vma->root->rb_root.rb_node))
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002690 BUG();
Rik van Rielcba48b92010-08-09 17:18:38 -07002691 anon_vma_unlock(anon_vma);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002692 }
2693}
2694
2695static void vm_unlock_mapping(struct address_space *mapping)
2696{
2697 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2698 /*
2699 * AS_MM_ALL_LOCKS can't change to 0 from under us
2700 * because we hold the mm_all_locks_mutex.
2701 */
Peter Zijlstra3d48ae42011-05-24 17:12:06 -07002702 mutex_unlock(&mapping->i_mmap_mutex);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002703 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2704 &mapping->flags))
2705 BUG();
2706 }
2707}
2708
2709/*
2710 * The mmap_sem cannot be released by the caller until
2711 * mm_drop_all_locks() returns.
2712 */
2713void mm_drop_all_locks(struct mm_struct *mm)
2714{
2715 struct vm_area_struct *vma;
Rik van Riel5beb4932010-03-05 13:42:07 -08002716 struct anon_vma_chain *avc;
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002717
2718 BUG_ON(down_read_trylock(&mm->mmap_sem));
2719 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2720
2721 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2722 if (vma->anon_vma)
Rik van Riel5beb4932010-03-05 13:42:07 -08002723 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2724 vm_unlock_anon_vma(avc->anon_vma);
Andrea Arcangeli7906d002008-07-28 15:46:26 -07002725 if (vma->vm_file && vma->vm_file->f_mapping)
2726 vm_unlock_mapping(vma->vm_file->f_mapping);
2727 }
2728
2729 mutex_unlock(&mm_all_locks_mutex);
2730}
David Howells8feae132009-01-08 12:04:47 +00002731
2732/*
2733 * initialise the VMA slab
2734 */
2735void __init mmap_init(void)
2736{
KOSAKI Motohiro00a62ce2009-04-30 15:08:51 -07002737 int ret;
2738
2739 ret = percpu_counter_init(&vm_committed_as, 0);
2740 VM_BUG_ON(ret);
David Howells8feae132009-01-08 12:04:47 +00002741}