blob: b075d1d1f6c83583b3db322fa640b5a432ecb334 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070011#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010012#include <linux/page-isolation.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/hugetlb.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040014#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070015#include <linux/ksm.h>
Andy Lutomirski9b924402012-07-05 16:00:11 -070016#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18/*
Nick Piggin0a27a142007-05-06 14:49:53 -070019 * Any behaviour which results in changes to the vma->vm_flags needs to
20 * take mmap_sem for writing. Others, which simply traverse vmas, need
21 * to only take it for reading.
22 */
23static int madvise_need_mmap_write(int behavior)
24{
25 switch (behavior) {
26 case MADV_REMOVE:
27 case MADV_WILLNEED:
28 case MADV_DONTNEED:
29 return 0;
30 default:
31 /* be safe, default to 1. list exceptions explicitly */
32 return 1;
33 }
34}
35
36/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * We can potentially split a vm area into separate
38 * areas, each area with its own behavior.
39 */
Prasanna Meda05b74382005-06-21 17:14:37 -070040static long madvise_behavior(struct vm_area_struct * vma,
41 struct vm_area_struct **prev,
42 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
44 struct mm_struct * mm = vma->vm_mm;
45 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070046 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070047 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070048
49 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080050 case MADV_NORMAL:
51 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
52 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070053 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080054 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070055 break;
56 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080057 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070058 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080059 case MADV_DONTFORK:
60 new_flags |= VM_DONTCOPY;
61 break;
62 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070063 if (vma->vm_flags & VM_IO) {
64 error = -EINVAL;
65 goto out;
66 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080067 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070068 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070069 case MADV_DONTDUMP:
70 new_flags |= VM_NODUMP;
71 break;
72 case MADV_DODUMP:
73 new_flags &= ~VM_NODUMP;
74 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070075 case MADV_MERGEABLE:
76 case MADV_UNMERGEABLE:
77 error = ksm_madvise(vma, start, end, behavior, &new_flags);
78 if (error)
79 goto out;
80 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -080081 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080082 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -080083 error = hugepage_madvise(vma, &new_flags, behavior);
Andrea Arcangeli0af4e982011-01-13 15:46:55 -080084 if (error)
85 goto out;
86 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070087 }
88
Prasanna Meda05b74382005-06-21 17:14:37 -070089 if (new_flags == vma->vm_flags) {
90 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -070091 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -070092 }
93
94 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
95 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Colin Crossa9e6b182013-06-26 17:26:01 -070096 vma->vm_file, pgoff, vma_policy(vma),
97 vma_get_anon_name(vma));
Prasanna Meda05b74382005-06-21 17:14:37 -070098 if (*prev) {
99 vma = *prev;
100 goto success;
101 }
102
103 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 if (start != vma->vm_start) {
106 error = split_vma(mm, vma, start, 1);
107 if (error)
108 goto out;
109 }
110
111 if (end != vma->vm_end) {
112 error = split_vma(mm, vma, end, 0);
113 if (error)
114 goto out;
115 }
116
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700117success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 /*
119 * vm_flags is protected by the mmap_sem held in write mode.
120 */
Prasanna Medae798c6e2005-06-21 17:14:36 -0700121 vma->vm_flags = new_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123out:
124 if (error == -ENOMEM)
125 error = -EAGAIN;
126 return error;
127}
128
129/*
130 * Schedule all required I/O operations. Do not wait for completion.
131 */
132static long madvise_willneed(struct vm_area_struct * vma,
Prasanna Meda05b74382005-06-21 17:14:37 -0700133 struct vm_area_struct ** prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 unsigned long start, unsigned long end)
135{
136 struct file *file = vma->vm_file;
137
Suzuki1bef4002005-10-11 08:29:06 -0700138 if (!file)
139 return -EBADF;
140
Nick Piggin70688e42008-04-28 02:13:02 -0700141 if (file->f_mapping->a_ops->get_xip_mem) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700142 /* no bad return value, but ignore advice */
143 return 0;
144 }
145
Prasanna Meda05b74382005-06-21 17:14:37 -0700146 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
148 if (end > vma->vm_end)
149 end = vma->vm_end;
150 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
151
Wu Fengguangf7e839d2009-06-16 15:31:20 -0700152 force_page_cache_readahead(file->f_mapping, file, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 return 0;
154}
155
156/*
157 * Application no longer needs these pages. If the pages are dirty,
158 * it's OK to just throw them away. The app will be more careful about
159 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700160 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * these pages later if no one else has touched them in the meantime,
162 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700163 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 *
165 * NB: This interface discards data rather than pushes it out to swap,
166 * as some implementations do. This has performance implications for
167 * applications like large transactional databases which want to discard
168 * pages in anonymous maps after committing to backing store the data
169 * that was kept in them. There is no reason to write this data out to
170 * the swap area if the application is discarding it.
171 *
172 * An interface that causes the system to free clean pages and flush
173 * dirty pages is already available as msync(MS_INVALIDATE).
174 */
175static long madvise_dontneed(struct vm_area_struct * vma,
Prasanna Meda05b74382005-06-21 17:14:37 -0700176 struct vm_area_struct ** prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 unsigned long start, unsigned long end)
178{
Prasanna Meda05b74382005-06-21 17:14:37 -0700179 *prev = vma;
Linus Torvalds6aab3412005-11-28 14:34:23 -0800180 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 return -EINVAL;
182
183 if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
184 struct zap_details details = {
185 .nonlinear_vma = vma,
186 .last_index = ULONG_MAX,
187 };
188 zap_page_range(vma, start, end - start, &details);
189 } else
190 zap_page_range(vma, start, end - start, NULL);
191 return 0;
192}
193
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800194/*
195 * Application wants to free up the pages and associated backing store.
196 * This is effectively punching a hole into the middle of a file.
197 *
198 * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
199 * Other filesystems return -ENOSYS.
200 */
201static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800202 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800203 unsigned long start, unsigned long end)
204{
205 struct address_space *mapping;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700206 loff_t offset, endoff;
207 int error;
Andy Lutomirski9b924402012-07-05 16:00:11 -0700208 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800209
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700210 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800211
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800212 if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
213 return -EINVAL;
214
Andy Lutomirski9b924402012-07-05 16:00:11 -0700215 f = vma->vm_file;
216
217 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800218 return -EINVAL;
219 }
220
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100221 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
222 return -EACCES;
223
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800224 mapping = vma->vm_file->f_mapping;
225
226 offset = (loff_t)(start - vma->vm_start)
227 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
228 endoff = (loff_t)(end - vma->vm_start - 1)
229 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700230
Andy Lutomirski9b924402012-07-05 16:00:11 -0700231 /*
232 * vmtruncate_range may need to take i_mutex. We need to
233 * explicitly grab a reference because the vma (and hence the
234 * vma's reference to the file) can go away as soon as we drop
235 * mmap_sem.
236 */
237 get_file(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700238 up_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700239 error = vmtruncate_range(mapping->host, offset, endoff);
Andy Lutomirski9b924402012-07-05 16:00:11 -0700240 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700241 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700242 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800243}
244
Andi Kleen9893e492009-09-16 11:50:17 +0200245#ifdef CONFIG_MEMORY_FAILURE
246/*
247 * Error injection support for memory error handling.
248 */
Andi Kleenafcf9382009-12-16 12:20:00 +0100249static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200250{
251 int ret = 0;
252
253 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM;
255 for (; start < end; start += PAGE_SIZE) {
256 struct page *p;
Andi Kleend15f1072009-12-16 12:20:00 +0100257 int ret = get_user_pages_fast(start, 1, 0, &p);
Andi Kleen9893e492009-09-16 11:50:17 +0200258 if (ret != 1)
259 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100260 if (bhv == MADV_SOFT_OFFLINE) {
261 printk(KERN_INFO "Soft offlining page %lx at %lx\n",
262 page_to_pfn(p), start);
263 ret = soft_offline_page(p, MF_COUNT_INCREASED);
264 if (ret)
265 break;
266 continue;
267 }
Andi Kleen9893e492009-09-16 11:50:17 +0200268 printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
269 page_to_pfn(p), start);
270 /* Ignore return value for now */
Tony Luckcd42f4a2011-12-15 10:48:12 -0800271 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
Andi Kleen9893e492009-09-16 11:50:17 +0200272 }
273 return ret;
274}
275#endif
276
suzuki165cd402005-07-27 11:43:59 -0700277static long
278madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
279 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800282 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700283 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700285 return madvise_willneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 case MADV_DONTNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700287 return madvise_dontneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700289 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291}
292
Nick Piggin75927af2009-06-16 15:32:38 -0700293static int
294madvise_behavior_valid(int behavior)
295{
296 switch (behavior) {
297 case MADV_DOFORK:
298 case MADV_DONTFORK:
299 case MADV_NORMAL:
300 case MADV_SEQUENTIAL:
301 case MADV_RANDOM:
302 case MADV_REMOVE:
303 case MADV_WILLNEED:
304 case MADV_DONTNEED:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700305#ifdef CONFIG_KSM
306 case MADV_MERGEABLE:
307 case MADV_UNMERGEABLE:
308#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800309#ifdef CONFIG_TRANSPARENT_HUGEPAGE
310 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800311 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800312#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700313 case MADV_DONTDUMP:
314 case MADV_DODUMP:
Nick Piggin75927af2009-06-16 15:32:38 -0700315 return 1;
316
317 default:
318 return 0;
319 }
320}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322/*
323 * The madvise(2) system call.
324 *
325 * Applications can use madvise() to advise the kernel how it should
326 * handle paging I/O in this VM area. The idea is to help the kernel
327 * use appropriate read-ahead and caching techniques. The information
328 * provided is advisory only, and can be safely disregarded by the
329 * kernel without affecting the correct operation of the application.
330 *
331 * behavior values:
332 * MADV_NORMAL - the default behavior is to read clusters. This
333 * results in some read-ahead and read-behind.
334 * MADV_RANDOM - the system should read the minimum amount of data
335 * on any access, since it is unlikely that the appli-
336 * cation will need more than what it asks for.
337 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
338 * once, so they can be aggressively read ahead, and
339 * can be freed soon after they are accessed.
340 * MADV_WILLNEED - the application is notifying the system to read
341 * some pages ahead.
342 * MADV_DONTNEED - the application is finished with the given range,
343 * so the kernel can free resources associated with it.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800344 * MADV_REMOVE - the application wants to free up the given range of
345 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700346 * MADV_DONTFORK - omit this area from child's address space when forking:
347 * typically, to avoid COWing pages pinned by get_user_pages().
348 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700349 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
350 * this area with pages of identical content from other such areas.
351 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 *
353 * return values:
354 * zero - success
355 * -EINVAL - start + len < 0, start is not page-aligned,
356 * "behavior" is not a valid value, or application
357 * is attempting to release locked or shared pages.
358 * -ENOMEM - addresses in the specified range are not currently
359 * mapped, or are outside the AS of the process.
360 * -EIO - an I/O error occurred while paging in data.
361 * -EBADF - map exists, but area maps something that isn't a file.
362 * -EAGAIN - a kernel resource was temporarily unavailable.
363 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100364SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365{
Prasanna Meda05b74382005-06-21 17:14:37 -0700366 unsigned long end, tmp;
367 struct vm_area_struct * vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 int unmapped_error = 0;
369 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700370 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 size_t len;
372
Andi Kleen9893e492009-09-16 11:50:17 +0200373#ifdef CONFIG_MEMORY_FAILURE
Andi Kleenafcf9382009-12-16 12:20:00 +0100374 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
375 return madvise_hwpoison(behavior, start, start+len_in);
Andi Kleen9893e492009-09-16 11:50:17 +0200376#endif
Nick Piggin75927af2009-06-16 15:32:38 -0700377 if (!madvise_behavior_valid(behavior))
378 return error;
379
Jason Baronf7977792007-07-15 23:38:21 -0700380 write = madvise_need_mmap_write(behavior);
381 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700382 down_write(&current->mm->mmap_sem);
383 else
384 down_read(&current->mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 if (start & ~PAGE_MASK)
387 goto out;
388 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
389
390 /* Check to see whether len was rounded up from small -ve to zero */
391 if (len_in && !len)
392 goto out;
393
394 end = start + len;
395 if (end < start)
396 goto out;
397
398 error = 0;
399 if (end == start)
400 goto out;
401
402 /*
403 * If the interval [start,end) covers some unmapped address
404 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700405 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700407 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700408 if (vma && start > vma->vm_start)
409 prev = vma;
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 for (;;) {
412 /* Still start < end. */
413 error = -ENOMEM;
414 if (!vma)
415 goto out;
416
Prasanna Meda05b74382005-06-21 17:14:37 -0700417 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (start < vma->vm_start) {
419 unmapped_error = -ENOMEM;
420 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700421 if (start >= end)
422 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
424
Prasanna Meda05b74382005-06-21 17:14:37 -0700425 /* Here vma->vm_start <= start < (end|vma->vm_end) */
426 tmp = vma->vm_end;
427 if (end < tmp)
428 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429
Prasanna Meda05b74382005-06-21 17:14:37 -0700430 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
431 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 if (error)
433 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700434 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700435 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700436 start = prev->vm_end;
437 error = unmapped_error;
438 if (start >= end)
439 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700440 if (prev)
441 vma = prev->vm_next;
442 else /* madvise_remove dropped mmap_sem */
443 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445out:
Jason Baronf7977792007-07-15 23:38:21 -0700446 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700447 up_write(&current->mm->mmap_sem);
448 else
449 up_read(&current->mm->mmap_sem);
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 return error;
452}