blob: 31b2a86d126ddd13d350625e63678251d7de9c48 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/mm/madvise.c
4 *
5 * Copyright (C) 1999 Linus Torvalds
6 * Copyright (C) 2002 Christoph Hellwig
7 */
8
9#include <linux/mman.h>
10#include <linux/pagemap.h>
11#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070012#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010013#include <linux/page-isolation.h>
Pavel Emelyanov05ce7722017-02-22 15:42:40 -080014#include <linux/userfaultfd_k.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/hugetlb.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070016#include <linux/falloc.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040017#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070018#include <linux/ksm.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070019#include <linux/fs.h>
Andy Lutomirski9ab42332012-07-05 16:00:11 -070020#include <linux/file.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080021#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040022#include <linux/backing-dev.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080023#include <linux/swap.h>
24#include <linux/swapops.h>
Hugh Dickins3a4f8a02017-02-24 14:59:36 -080025#include <linux/shmem_fs.h>
Minchan Kim854e9ed2016-01-15 16:54:53 -080026#include <linux/mmu_notifier.h>
27
28#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Kirill A. Shutemov23519072017-02-22 15:46:39 -080030#include "internal.h"
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*
Nick Piggin0a27a142007-05-06 14:49:53 -070033 * Any behaviour which results in changes to the vma->vm_flags needs to
34 * take mmap_sem for writing. Others, which simply traverse vmas, need
35 * to only take it for reading.
36 */
37static int madvise_need_mmap_write(int behavior)
38{
39 switch (behavior) {
40 case MADV_REMOVE:
41 case MADV_WILLNEED:
42 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -080043 case MADV_FREE:
Nick Piggin0a27a142007-05-06 14:49:53 -070044 return 0;
45 default:
46 /* be safe, default to 1. list exceptions explicitly */
47 return 1;
48 }
49}
50
51/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 * We can potentially split a vm area into separate
53 * areas, each area with its own behavior.
54 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -070055static long madvise_behavior(struct vm_area_struct *vma,
Prasanna Meda05b74382005-06-21 17:14:37 -070056 struct vm_area_struct **prev,
57 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Vladimir Cernovec9bed92013-09-11 14:20:15 -070059 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070061 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070062 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070063
64 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080065 case MADV_NORMAL:
66 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070068 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080069 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070070 break;
71 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080072 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070073 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080074 case MADV_DONTFORK:
75 new_flags |= VM_DONTCOPY;
76 break;
77 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070078 if (vma->vm_flags & VM_IO) {
79 error = -EINVAL;
80 goto out;
81 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080082 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070083 break;
Rik van Rield2cd9ed2017-09-06 16:25:15 -070084 case MADV_WIPEONFORK:
85 /* MADV_WIPEONFORK is only supported on anonymous memory. */
86 if (vma->vm_file || vma->vm_flags & VM_SHARED) {
87 error = -EINVAL;
88 goto out;
89 }
90 new_flags |= VM_WIPEONFORK;
91 break;
92 case MADV_KEEPONFORK:
93 new_flags &= ~VM_WIPEONFORK;
94 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070095 case MADV_DONTDUMP:
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070096 new_flags |= VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070097 break;
98 case MADV_DODUMP:
Daniel Blackd41aa522018-10-05 15:52:19 -070099 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -0700100 error = -EINVAL;
101 goto out;
102 }
103 new_flags &= ~VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -0700104 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700105 case MADV_MERGEABLE:
106 case MADV_UNMERGEABLE:
107 error = ksm_madvise(vma, start, end, behavior, &new_flags);
David Rientjesdef5efe2017-02-24 14:58:47 -0800108 if (error) {
109 /*
110 * madvise() returns EAGAIN if kernel resources, such as
111 * slab, are temporarily unavailable.
112 */
113 if (error == -ENOMEM)
114 error = -EAGAIN;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700115 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800116 }
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700117 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800118 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800119 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800120 error = hugepage_madvise(vma, &new_flags, behavior);
David Rientjesdef5efe2017-02-24 14:58:47 -0800121 if (error) {
122 /*
123 * madvise() returns EAGAIN if kernel resources, such as
124 * slab, are temporarily unavailable.
125 */
126 if (error == -ENOMEM)
127 error = -EAGAIN;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800128 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800129 }
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800130 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -0700131 }
132
Prasanna Meda05b74382005-06-21 17:14:37 -0700133 if (new_flags == vma->vm_flags) {
134 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700135 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700136 }
137
138 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
139 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700140 vma->vm_file, pgoff, vma_policy(vma),
Colin Cross62dd4632015-10-27 16:42:08 -0700141 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
Prasanna Meda05b74382005-06-21 17:14:37 -0700142 if (*prev) {
143 vma = *prev;
144 goto success;
145 }
146
147 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
149 if (start != vma->vm_start) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800150 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
151 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800153 }
154 error = __split_vma(mm, vma, start, 1);
155 if (error) {
156 /*
157 * madvise() returns EAGAIN if kernel resources, such as
158 * slab, are temporarily unavailable.
159 */
160 if (error == -ENOMEM)
161 error = -EAGAIN;
162 goto out;
163 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 }
165
166 if (end != vma->vm_end) {
David Rientjesdef5efe2017-02-24 14:58:47 -0800167 if (unlikely(mm->map_count >= sysctl_max_map_count)) {
168 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 goto out;
David Rientjesdef5efe2017-02-24 14:58:47 -0800170 }
171 error = __split_vma(mm, vma, end, 0);
172 if (error) {
173 /*
174 * madvise() returns EAGAIN if kernel resources, such as
175 * slab, are temporarily unavailable.
176 */
177 if (error == -ENOMEM)
178 error = -EAGAIN;
179 goto out;
180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 }
182
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700183success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 /*
185 * vm_flags is protected by the mmap_sem held in write mode.
186 */
Laurent Dufour3cfc37d2018-04-17 16:33:15 +0200187 vm_write_begin(vma);
188 WRITE_ONCE(vma->vm_flags, new_flags);
189 vm_write_end(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return error;
192}
193
Shaohua Li1998cc02013-02-22 16:32:31 -0800194#ifdef CONFIG_SWAP
195static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
196 unsigned long end, struct mm_walk *walk)
197{
198 pte_t *orig_pte;
199 struct vm_area_struct *vma = walk->private;
200 unsigned long index;
201
202 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
203 return 0;
204
205 for (index = start; index != end; index += PAGE_SIZE) {
206 pte_t pte;
207 swp_entry_t entry;
208 struct page *page;
209 spinlock_t *ptl;
210
211 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
212 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
213 pte_unmap_unlock(orig_pte, ptl);
214
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800215 if (pte_present(pte) || pte_none(pte))
Shaohua Li1998cc02013-02-22 16:32:31 -0800216 continue;
217 entry = pte_to_swp_entry(pte);
218 if (unlikely(non_swap_entry(entry)))
219 continue;
220
221 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700222 vma, index, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800223 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300224 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800225 }
226
227 return 0;
228}
229
230static void force_swapin_readahead(struct vm_area_struct *vma,
231 unsigned long start, unsigned long end)
232{
233 struct mm_walk walk = {
234 .mm = vma->vm_mm,
235 .pmd_entry = swapin_walk_pmd_entry,
236 .private = vma,
237 };
238
239 walk_page_range(start, end, &walk);
240
241 lru_add_drain(); /* Push any new pages onto the LRU now */
242}
243
244static void force_shm_swapin_readahead(struct vm_area_struct *vma,
245 unsigned long start, unsigned long end,
246 struct address_space *mapping)
247{
248 pgoff_t index;
249 struct page *page;
250 swp_entry_t swap;
251
252 for (; start < end; start += PAGE_SIZE) {
253 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
254
Johannes Weiner55231e52014-05-22 11:54:17 -0700255 page = find_get_entry(mapping, index);
Shaohua Li1998cc02013-02-22 16:32:31 -0800256 if (!radix_tree_exceptional_entry(page)) {
257 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300258 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800259 continue;
260 }
261 swap = radix_to_swp_entry(page);
262 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
Shaohua Li23955622017-07-10 15:47:11 -0700263 NULL, 0, false);
Shaohua Li1998cc02013-02-22 16:32:31 -0800264 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300265 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800266 }
267
268 lru_add_drain(); /* Push any new pages onto the LRU now */
269}
270#endif /* CONFIG_SWAP */
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272/*
273 * Schedule all required I/O operations. Do not wait for completion.
274 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700275static long madvise_willneed(struct vm_area_struct *vma,
276 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 unsigned long start, unsigned long end)
278{
279 struct file *file = vma->vm_file;
280
chenjie6ea8d952017-11-29 16:10:54 -0800281 *prev = vma;
Shaohua Li1998cc02013-02-22 16:32:31 -0800282#ifdef CONFIG_SWAP
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100283 if (!file) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100284 force_swapin_readahead(vma, start, end);
Shaohua Li1998cc02013-02-22 16:32:31 -0800285 return 0;
286 }
Shaohua Li1998cc02013-02-22 16:32:31 -0800287
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100288 if (shmem_mapping(file->f_mapping)) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100289 force_shm_swapin_readahead(vma, start, end,
290 file->f_mapping);
291 return 0;
292 }
293#else
Suzuki1bef4002005-10-11 08:29:06 -0700294 if (!file)
295 return -EBADF;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100296#endif
Suzuki1bef4002005-10-11 08:29:06 -0700297
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -0800298 if (IS_DAX(file_inode(file))) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700299 /* no bad return value, but ignore advice */
300 return 0;
301 }
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
304 if (end > vma->vm_end)
305 end = vma->vm_end;
306 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
307
Wu Fengguangf7e839d2009-06-16 15:31:20 -0700308 force_page_cache_readahead(file->f_mapping, file, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 return 0;
310}
311
Minchan Kim854e9ed2016-01-15 16:54:53 -0800312static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
313 unsigned long end, struct mm_walk *walk)
314
315{
316 struct mmu_gather *tlb = walk->private;
317 struct mm_struct *mm = tlb->mm;
318 struct vm_area_struct *vma = walk->vma;
319 spinlock_t *ptl;
320 pte_t *orig_pte, *pte, ptent;
321 struct page *page;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800322 int nr_swap = 0;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800323 unsigned long next;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800324
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800325 next = pmd_addr_end(addr, end);
326 if (pmd_trans_huge(*pmd))
327 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
328 goto next;
329
Minchan Kim854e9ed2016-01-15 16:54:53 -0800330 if (pmd_trans_unstable(pmd))
331 return 0;
332
Aneesh Kumar K.V07e32662016-12-12 16:42:40 -0800333 tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800334 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Mel Gorman3ea27712017-08-02 13:31:52 -0700335 flush_tlb_batched_pending(mm);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800336 arch_enter_lazy_mmu_mode();
337 for (; addr != end; pte++, addr += PAGE_SIZE) {
338 ptent = *pte;
339
Minchan Kim64b42bc2016-01-15 16:55:06 -0800340 if (pte_none(ptent))
Minchan Kim854e9ed2016-01-15 16:54:53 -0800341 continue;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800342 /*
343 * If the pte has swp_entry, just clear page table to
344 * prevent swap-in which is more expensive rather than
345 * (page allocation + zeroing).
346 */
347 if (!pte_present(ptent)) {
348 swp_entry_t entry;
349
350 entry = pte_to_swp_entry(ptent);
351 if (non_swap_entry(entry))
352 continue;
353 nr_swap--;
354 free_swap_and_cache(entry);
355 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
356 continue;
357 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800358
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700359 page = _vm_normal_page(vma, addr, ptent, true);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800360 if (!page)
361 continue;
362
363 /*
364 * If pmd isn't transhuge but the page is THP and
365 * is owned by only this process, split it and
366 * deactivate all pages.
367 */
368 if (PageTransCompound(page)) {
369 if (page_mapcount(page) != 1)
370 goto out;
371 get_page(page);
372 if (!trylock_page(page)) {
373 put_page(page);
374 goto out;
375 }
376 pte_unmap_unlock(orig_pte, ptl);
377 if (split_huge_page(page)) {
378 unlock_page(page);
379 put_page(page);
380 pte_offset_map_lock(mm, pmd, addr, &ptl);
381 goto out;
382 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800383 unlock_page(page);
Eric Biggers263630e2017-08-25 15:55:39 -0700384 put_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800385 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
386 pte--;
387 addr -= PAGE_SIZE;
388 continue;
389 }
390
391 VM_BUG_ON_PAGE(PageTransCompound(page), page);
392
393 if (PageSwapCache(page) || PageDirty(page)) {
394 if (!trylock_page(page))
395 continue;
396 /*
397 * If page is shared with others, we couldn't clear
398 * PG_dirty of the page.
399 */
400 if (page_mapcount(page) != 1) {
401 unlock_page(page);
402 continue;
403 }
404
405 if (PageSwapCache(page) && !try_to_free_swap(page)) {
406 unlock_page(page);
407 continue;
408 }
409
410 ClearPageDirty(page);
411 unlock_page(page);
412 }
413
414 if (pte_young(ptent) || pte_dirty(ptent)) {
415 /*
416 * Some of architecture(ex, PPC) don't update TLB
417 * with set_pte_at and tlb_remove_tlb_entry so for
418 * the portability, remap the pte with old|clean
419 * after pte clearing.
420 */
421 ptent = ptep_get_and_clear_full(mm, addr, pte,
422 tlb->fullmm);
423
424 ptent = pte_mkold(ptent);
425 ptent = pte_mkclean(ptent);
426 set_pte_at(mm, addr, pte, ptent);
427 tlb_remove_tlb_entry(tlb, pte, addr);
428 }
Shaohua Li802a3a92017-05-03 14:52:32 -0700429 mark_page_lazyfree(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800430 }
431out:
Minchan Kim64b42bc2016-01-15 16:55:06 -0800432 if (nr_swap) {
433 if (current->mm == mm)
434 sync_mm_rss(mm);
435
436 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
437 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800438 arch_leave_lazy_mmu_mode();
439 pte_unmap_unlock(orig_pte, ptl);
440 cond_resched();
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800441next:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800442 return 0;
443}
444
445static void madvise_free_page_range(struct mmu_gather *tlb,
446 struct vm_area_struct *vma,
447 unsigned long addr, unsigned long end)
448{
449 struct mm_walk free_walk = {
450 .pmd_entry = madvise_free_pte_range,
451 .mm = vma->vm_mm,
452 .private = tlb,
453 };
454
Laurent Dufour3cfc37d2018-04-17 16:33:15 +0200455 vm_write_begin(vma);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800456 tlb_start_vma(tlb, vma);
457 walk_page_range(addr, end, &free_walk);
458 tlb_end_vma(tlb, vma);
Laurent Dufour3cfc37d2018-04-17 16:33:15 +0200459 vm_write_end(vma);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800460}
461
462static int madvise_free_single_vma(struct vm_area_struct *vma,
463 unsigned long start_addr, unsigned long end_addr)
464{
465 unsigned long start, end;
466 struct mm_struct *mm = vma->vm_mm;
467 struct mmu_gather tlb;
468
Minchan Kim854e9ed2016-01-15 16:54:53 -0800469 /* MADV_FREE works for only anon vma at the moment */
470 if (!vma_is_anonymous(vma))
471 return -EINVAL;
472
473 start = max(vma->vm_start, start_addr);
474 if (start >= vma->vm_end)
475 return -EINVAL;
476 end = min(vma->vm_end, end_addr);
477 if (end <= vma->vm_start)
478 return -EINVAL;
479
480 lru_add_drain();
481 tlb_gather_mmu(&tlb, mm, start, end);
482 update_hiwater_rss(mm);
483
484 mmu_notifier_invalidate_range_start(mm, start, end);
485 madvise_free_page_range(&tlb, vma, start, end);
486 mmu_notifier_invalidate_range_end(mm, start, end);
487 tlb_finish_mmu(&tlb, start, end);
488
489 return 0;
490}
491
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492/*
493 * Application no longer needs these pages. If the pages are dirty,
494 * it's OK to just throw them away. The app will be more careful about
495 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700496 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 * these pages later if no one else has touched them in the meantime,
498 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700499 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 *
501 * NB: This interface discards data rather than pushes it out to swap,
502 * as some implementations do. This has performance implications for
503 * applications like large transactional databases which want to discard
504 * pages in anonymous maps after committing to backing store the data
505 * that was kept in them. There is no reason to write this data out to
506 * the swap area if the application is discarding it.
507 *
508 * An interface that causes the system to free clean pages and flush
509 * dirty pages is already available as msync(MS_INVALIDATE).
510 */
Mike Rapoport230ca982017-07-10 15:49:02 -0700511static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
512 unsigned long start, unsigned long end)
513{
514 zap_page_range(vma, start, end - start);
515 return 0;
516}
517
518static long madvise_dontneed_free(struct vm_area_struct *vma,
519 struct vm_area_struct **prev,
520 unsigned long start, unsigned long end,
521 int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
Prasanna Meda05b74382005-06-21 17:14:37 -0700523 *prev = vma;
Kirill A. Shutemov23519072017-02-22 15:46:39 -0800524 if (!can_madv_dontneed_vma(vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 return -EINVAL;
526
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800527 if (!userfaultfd_remove(vma, start, end)) {
528 *prev = NULL; /* mmap_sem has been dropped, prev is stale */
529
530 down_read(&current->mm->mmap_sem);
531 vma = find_vma(current->mm, start);
532 if (!vma)
533 return -ENOMEM;
534 if (start < vma->vm_start) {
535 /*
536 * This "vma" under revalidation is the one
537 * with the lowest vma->vm_start where start
538 * is also < vma->vm_end. If start <
539 * vma->vm_start it means an hole materialized
540 * in the user address space within the
Mike Rapoport230ca982017-07-10 15:49:02 -0700541 * virtual range passed to MADV_DONTNEED
542 * or MADV_FREE.
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800543 */
544 return -ENOMEM;
545 }
546 if (!can_madv_dontneed_vma(vma))
547 return -EINVAL;
548 if (end > vma->vm_end) {
549 /*
550 * Don't fail if end > vma->vm_end. If the old
551 * vma was splitted while the mmap_sem was
552 * released the effect of the concurrent
Mike Rapoport230ca982017-07-10 15:49:02 -0700553 * operation may not cause madvise() to
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800554 * have an undefined result. There may be an
555 * adjacent next vma that we'll walk
556 * next. userfaultfd_remove() will generate an
557 * UFFD_EVENT_REMOVE repetition on the
558 * end-vma->vm_end range, but the manager can
559 * handle a repetition fine.
560 */
561 end = vma->vm_end;
562 }
563 VM_WARN_ON(start >= end);
564 }
Mike Rapoport230ca982017-07-10 15:49:02 -0700565
566 if (behavior == MADV_DONTNEED)
567 return madvise_dontneed_single_vma(vma, start, end);
568 else if (behavior == MADV_FREE)
569 return madvise_free_single_vma(vma, start, end);
570 else
571 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800574/*
575 * Application wants to free up the pages and associated backing store.
576 * This is effectively punching a hole into the middle of a file.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800577 */
578static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800579 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800580 unsigned long start, unsigned long end)
581{
Hugh Dickins3f31d072012-05-29 15:06:40 -0700582 loff_t offset;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700583 int error;
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700584 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800585
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700586 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800587
Mike Kravetz72079ba2015-09-08 15:01:57 -0700588 if (vma->vm_flags & VM_LOCKED)
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800589 return -EINVAL;
590
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700591 f = vma->vm_file;
592
593 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800594 return -EINVAL;
595 }
596
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100597 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
598 return -EACCES;
599
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800600 offset = (loff_t)(start - vma->vm_start)
601 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700602
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700603 /*
604 * Filesystem's fallocate may need to take i_mutex. We need to
605 * explicitly grab a reference because the vma (and hence the
606 * vma's reference to the file) can go away as soon as we drop
607 * mmap_sem.
608 */
609 get_file(f);
Andrea Arcangeli70ccb922017-03-09 16:17:11 -0800610 if (userfaultfd_remove(vma, start, end)) {
611 /* mmap_sem was not released by userfaultfd_remove() */
612 up_read(&current->mm->mmap_sem);
613 }
Anna Schumaker72c72bd2014-11-07 14:44:25 -0500614 error = vfs_fallocate(f,
Hugh Dickins3f31d072012-05-29 15:06:40 -0700615 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
616 offset, end - start);
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700617 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700618 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700619 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800620}
621
Andi Kleen9893e492009-09-16 11:50:17 +0200622#ifdef CONFIG_MEMORY_FAILURE
623/*
624 * Error injection support for memory error handling.
625 */
Anshuman Khandual97167a72017-05-03 14:55:25 -0700626static int madvise_inject_error(int behavior,
627 unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200628{
Anshuman Khandual97167a72017-05-03 14:55:25 -0700629 struct page *page;
Mel Gormanc461ad62017-08-31 16:15:30 -0700630 struct zone *zone;
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700631 unsigned int order;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700632
Andi Kleen9893e492009-09-16 11:50:17 +0200633 if (!capable(CAP_SYS_ADMIN))
634 return -EPERM;
Anshuman Khandual97167a72017-05-03 14:55:25 -0700635
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700636
637 for (; start < end; start += PAGE_SIZE << order) {
Dan Williams23e7b5c2018-07-13 21:50:06 -0700638 unsigned long pfn;
Andrew Morton325c4ef2013-09-11 14:23:03 -0700639 int ret;
640
Anshuman Khandual97167a72017-05-03 14:55:25 -0700641 ret = get_user_pages_fast(start, 1, 0, &page);
Andi Kleen9893e492009-09-16 11:50:17 +0200642 if (ret != 1)
643 return ret;
Dan Williams23e7b5c2018-07-13 21:50:06 -0700644 pfn = page_to_pfn(page);
Andrew Morton325c4ef2013-09-11 14:23:03 -0700645
Alexandru Moise19bfbe22017-10-03 16:14:31 -0700646 /*
647 * When soft offlining hugepages, after migrating the page
648 * we dissolve it, therefore in the second loop "page" will
649 * no longer be a compound page, and order will be 0.
650 */
651 order = compound_order(compound_head(page));
652
Anshuman Khandual97167a72017-05-03 14:55:25 -0700653 if (PageHWPoison(page)) {
654 put_page(page);
Wanpeng Li29b4eed2013-09-11 14:22:59 -0700655 continue;
656 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700657
658 if (behavior == MADV_SOFT_OFFLINE) {
659 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
Dan Williams23e7b5c2018-07-13 21:50:06 -0700660 pfn, start);
Anshuman Khandual97167a72017-05-03 14:55:25 -0700661
662 ret = soft_offline_page(page, MF_COUNT_INCREASED);
Andi Kleenafcf9382009-12-16 12:20:00 +0100663 if (ret)
Wanpeng Li83024232013-09-11 14:23:02 -0700664 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100665 continue;
666 }
Anshuman Khandual97167a72017-05-03 14:55:25 -0700667
Dan Williams23e7b5c2018-07-13 21:50:06 -0700668 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
669 pfn, start);
670
671 /*
672 * Drop the page reference taken by get_user_pages_fast(). In
673 * the absence of MF_COUNT_INCREASED the memory_failure()
674 * routine is responsible for pinning the page to prevent it
675 * from being released back to the page allocator.
676 */
677 put_page(page);
678 ret = memory_failure(pfn, 0);
Naoya Horiguchi23a003b2016-03-15 14:56:36 -0700679 if (ret)
680 return ret;
Andi Kleen9893e492009-09-16 11:50:17 +0200681 }
Mel Gormanc461ad62017-08-31 16:15:30 -0700682
683 /* Ensure that all poisoned pages are removed from per-cpu lists */
684 for_each_populated_zone(zone)
685 drain_all_pages(zone);
686
Andrew Morton325c4ef2013-09-11 14:23:03 -0700687 return 0;
Andi Kleen9893e492009-09-16 11:50:17 +0200688}
689#endif
690
suzuki165cd402005-07-27 11:43:59 -0700691static long
692madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
693 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800696 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700697 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700699 return madvise_willneed(vma, prev, start, end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800700 case MADV_FREE:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 case MADV_DONTNEED:
Mike Rapoport230ca982017-07-10 15:49:02 -0700702 return madvise_dontneed_free(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700704 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706}
707
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700708static bool
Nick Piggin75927af2009-06-16 15:32:38 -0700709madvise_behavior_valid(int behavior)
710{
711 switch (behavior) {
712 case MADV_DOFORK:
713 case MADV_DONTFORK:
714 case MADV_NORMAL:
715 case MADV_SEQUENTIAL:
716 case MADV_RANDOM:
717 case MADV_REMOVE:
718 case MADV_WILLNEED:
719 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800720 case MADV_FREE:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700721#ifdef CONFIG_KSM
722 case MADV_MERGEABLE:
723 case MADV_UNMERGEABLE:
724#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800725#ifdef CONFIG_TRANSPARENT_HUGEPAGE
726 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800727 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800728#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700729 case MADV_DONTDUMP:
730 case MADV_DODUMP:
Rik van Rield2cd9ed2017-09-06 16:25:15 -0700731 case MADV_WIPEONFORK:
732 case MADV_KEEPONFORK:
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700733#ifdef CONFIG_MEMORY_FAILURE
734 case MADV_SOFT_OFFLINE:
735 case MADV_HWPOISON:
736#endif
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700737 return true;
Nick Piggin75927af2009-06-16 15:32:38 -0700738
739 default:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700740 return false;
Nick Piggin75927af2009-06-16 15:32:38 -0700741 }
742}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744/*
745 * The madvise(2) system call.
746 *
747 * Applications can use madvise() to advise the kernel how it should
748 * handle paging I/O in this VM area. The idea is to help the kernel
749 * use appropriate read-ahead and caching techniques. The information
750 * provided is advisory only, and can be safely disregarded by the
751 * kernel without affecting the correct operation of the application.
752 *
753 * behavior values:
754 * MADV_NORMAL - the default behavior is to read clusters. This
755 * results in some read-ahead and read-behind.
756 * MADV_RANDOM - the system should read the minimum amount of data
757 * on any access, since it is unlikely that the appli-
758 * cation will need more than what it asks for.
759 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
760 * once, so they can be aggressively read ahead, and
761 * can be freed soon after they are accessed.
762 * MADV_WILLNEED - the application is notifying the system to read
763 * some pages ahead.
764 * MADV_DONTNEED - the application is finished with the given range,
765 * so the kernel can free resources associated with it.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700766 * MADV_FREE - the application marks pages in the given range as lazy free,
767 * where actual purges are postponed until memory pressure happens.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800768 * MADV_REMOVE - the application wants to free up the given range of
769 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700770 * MADV_DONTFORK - omit this area from child's address space when forking:
771 * typically, to avoid COWing pages pinned by get_user_pages().
772 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Yang Shic02c3002017-10-13 15:57:37 -0700773 * MADV_WIPEONFORK - present the child process with zero-filled memory in this
774 * range after a fork.
775 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700776 * MADV_HWPOISON - trigger memory error handler as if the given memory range
777 * were corrupted by unrecoverable hardware memory failure.
778 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700779 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
780 * this area with pages of identical content from other such areas.
781 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700782 * MADV_HUGEPAGE - the application wants to back the given range by transparent
783 * huge pages in the future. Existing pages might be coalesced and
784 * new pages might be allocated as THP.
785 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
786 * transparent huge pages so the existing pages will not be
787 * coalesced into THP and new pages will not be allocated as THP.
788 * MADV_DONTDUMP - the application wants to prevent pages in the given range
789 * from being included in its core dump.
790 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 *
792 * return values:
793 * zero - success
794 * -EINVAL - start + len < 0, start is not page-aligned,
795 * "behavior" is not a valid value, or application
Yang Shic02c3002017-10-13 15:57:37 -0700796 * is attempting to release locked or shared pages,
797 * or the specified address range includes file, Huge TLB,
798 * MAP_SHARED or VMPFNMAP range.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 * -ENOMEM - addresses in the specified range are not currently
800 * mapped, or are outside the AS of the process.
801 * -EIO - an I/O error occurred while paging in data.
802 * -EBADF - map exists, but area maps something that isn't a file.
803 * -EAGAIN - a kernel resource was temporarily unavailable.
804 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100805SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
Prasanna Meda05b74382005-06-21 17:14:37 -0700807 unsigned long end, tmp;
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700808 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 int unmapped_error = 0;
810 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700811 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 size_t len;
Shaohua Li1998cc02013-02-22 16:32:31 -0800813 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
Andrey Konovalov690c4ca2019-09-25 16:48:30 -0700815 start = untagged_addr(start);
816
Nick Piggin75927af2009-06-16 15:32:38 -0700817 if (!madvise_behavior_valid(behavior))
818 return error;
819
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700820 if (start & ~PAGE_MASK)
821 return error;
822 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
823
824 /* Check to see whether len was rounded up from small -ve to zero */
825 if (len_in && !len)
826 return error;
827
828 end = start + len;
829 if (end < start)
830 return error;
831
832 error = 0;
833 if (end == start)
834 return error;
835
Anshuman Khandual5e451be2017-05-03 14:55:28 -0700836#ifdef CONFIG_MEMORY_FAILURE
837 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
838 return madvise_inject_error(behavior, start, start + len_in);
839#endif
840
Jason Baronf7977792007-07-15 23:38:21 -0700841 write = madvise_need_mmap_write(behavior);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700842 if (write) {
843 if (down_write_killable(&current->mm->mmap_sem))
844 return -EINTR;
845 } else {
Nick Piggin0a27a142007-05-06 14:49:53 -0700846 down_read(&current->mm->mmap_sem);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700847 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 /*
850 * If the interval [start,end) covers some unmapped address
851 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700852 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700854 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700855 if (vma && start > vma->vm_start)
856 prev = vma;
857
Shaohua Li1998cc02013-02-22 16:32:31 -0800858 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 for (;;) {
860 /* Still start < end. */
861 error = -ENOMEM;
862 if (!vma)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700863 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
Prasanna Meda05b74382005-06-21 17:14:37 -0700865 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (start < vma->vm_start) {
867 unmapped_error = -ENOMEM;
868 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700869 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700870 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 }
872
Prasanna Meda05b74382005-06-21 17:14:37 -0700873 /* Here vma->vm_start <= start < (end|vma->vm_end) */
874 tmp = vma->vm_end;
875 if (end < tmp)
876 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Prasanna Meda05b74382005-06-21 17:14:37 -0700878 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
879 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (error)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700881 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700882 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700883 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700884 start = prev->vm_end;
885 error = unmapped_error;
886 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700887 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700888 if (prev)
889 vma = prev->vm_next;
890 else /* madvise_remove dropped mmap_sem */
891 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893out:
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700894 blk_finish_plug(&plug);
Jason Baronf7977792007-07-15 23:38:21 -0700895 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700896 up_write(&current->mm->mmap_sem);
897 else
898 up_read(&current->mm->mmap_sem);
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return error;
901}