blob: b753f0265384c58dd5bbe1a704c7cb1b91bb50c8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/madvise.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 2002 Christoph Hellwig
6 */
7
8#include <linux/mman.h>
9#include <linux/pagemap.h>
10#include <linux/syscalls.h>
Prasanna Meda05b74382005-06-21 17:14:37 -070011#include <linux/mempolicy.h>
Andi Kleenafcf9382009-12-16 12:20:00 +010012#include <linux/page-isolation.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/hugetlb.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070014#include <linux/falloc.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040015#include <linux/sched.h>
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070016#include <linux/ksm.h>
Hugh Dickins3f31d072012-05-29 15:06:40 -070017#include <linux/fs.h>
Andy Lutomirski9ab42332012-07-05 16:00:11 -070018#include <linux/file.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080019#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040020#include <linux/backing-dev.h>
Shaohua Li1998cc02013-02-22 16:32:31 -080021#include <linux/swap.h>
22#include <linux/swapops.h>
Minchan Kim854e9ed2016-01-15 16:54:53 -080023#include <linux/mmu_notifier.h>
Mel Gorman5a1eef72017-08-02 13:31:52 -070024#include "internal.h"
Minchan Kim854e9ed2016-01-15 16:54:53 -080025
26#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Kirill A. Shutemov7ade3742017-02-22 15:46:39 -080028#include "internal.h"
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/*
Nick Piggin0a27a142007-05-06 14:49:53 -070031 * Any behaviour which results in changes to the vma->vm_flags needs to
32 * take mmap_sem for writing. Others, which simply traverse vmas, need
33 * to only take it for reading.
34 */
35static int madvise_need_mmap_write(int behavior)
36{
37 switch (behavior) {
38 case MADV_REMOVE:
39 case MADV_WILLNEED:
40 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -080041 case MADV_FREE:
Nick Piggin0a27a142007-05-06 14:49:53 -070042 return 0;
43 default:
44 /* be safe, default to 1. list exceptions explicitly */
45 return 1;
46 }
47}
48
49/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 * We can potentially split a vm area into separate
51 * areas, each area with its own behavior.
52 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -070053static long madvise_behavior(struct vm_area_struct *vma,
Prasanna Meda05b74382005-06-21 17:14:37 -070054 struct vm_area_struct **prev,
55 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Vladimir Cernovec9bed92013-09-11 14:20:15 -070057 struct mm_struct *mm = vma->vm_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 int error = 0;
Prasanna Meda05b74382005-06-21 17:14:37 -070059 pgoff_t pgoff;
Hugh Dickins3866ea92009-09-21 17:01:52 -070060 unsigned long new_flags = vma->vm_flags;
Prasanna Medae798c6e2005-06-21 17:14:36 -070061
62 switch (behavior) {
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080063 case MADV_NORMAL:
64 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
65 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -070066 case MADV_SEQUENTIAL:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080067 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070068 break;
69 case MADV_RANDOM:
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080070 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
Prasanna Medae798c6e2005-06-21 17:14:36 -070071 break;
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080072 case MADV_DONTFORK:
73 new_flags |= VM_DONTCOPY;
74 break;
75 case MADV_DOFORK:
Hugh Dickins3866ea92009-09-21 17:01:52 -070076 if (vma->vm_flags & VM_IO) {
77 error = -EINVAL;
78 goto out;
79 }
Michael S. Tsirkinf8225662006-02-14 13:53:08 -080080 new_flags &= ~VM_DONTCOPY;
Prasanna Medae798c6e2005-06-21 17:14:36 -070081 break;
Jason Baronaccb61f2012-03-23 15:02:51 -070082 case MADV_DONTDUMP:
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070083 new_flags |= VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070084 break;
85 case MADV_DODUMP:
Daniel Black00a28d942018-10-05 15:52:19 -070086 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -070087 error = -EINVAL;
88 goto out;
89 }
90 new_flags &= ~VM_DONTDUMP;
Jason Baronaccb61f2012-03-23 15:02:51 -070091 break;
Hugh Dickinsf8af4da2009-09-21 17:01:57 -070092 case MADV_MERGEABLE:
93 case MADV_UNMERGEABLE:
94 error = ksm_madvise(vma, start, end, behavior, &new_flags);
95 if (error)
96 goto out;
97 break;
Andrea Arcangeli0af4e982011-01-13 15:46:55 -080098 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -080099 case MADV_NOHUGEPAGE:
Andrea Arcangeli60ab3242011-01-13 15:47:18 -0800100 error = hugepage_madvise(vma, &new_flags, behavior);
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800101 if (error)
102 goto out;
103 break;
Prasanna Medae798c6e2005-06-21 17:14:36 -0700104 }
105
Prasanna Meda05b74382005-06-21 17:14:37 -0700106 if (new_flags == vma->vm_flags) {
107 *prev = vma;
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700108 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700109 }
110
111 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
112 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
Andrea Arcangeli19a809a2015-09-04 15:46:24 -0700113 vma->vm_file, pgoff, vma_policy(vma),
Colin Cross3e4578f2015-10-27 16:42:08 -0700114 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
Prasanna Meda05b74382005-06-21 17:14:37 -0700115 if (*prev) {
116 vma = *prev;
117 goto success;
118 }
119
120 *prev = vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 if (start != vma->vm_start) {
123 error = split_vma(mm, vma, start, 1);
124 if (error)
125 goto out;
126 }
127
128 if (end != vma->vm_end) {
129 error = split_vma(mm, vma, end, 0);
130 if (error)
131 goto out;
132 }
133
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700134success:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 /*
136 * vm_flags is protected by the mmap_sem held in write mode.
137 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Laurent Dufourdd2b4652018-04-17 16:33:15 +0200139 vm_write_begin(vma);
140 WRITE_ONCE(vma->vm_flags, new_flags);
141 vm_write_end(vma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142out:
143 if (error == -ENOMEM)
144 error = -EAGAIN;
145 return error;
146}
147
Shaohua Li1998cc02013-02-22 16:32:31 -0800148#ifdef CONFIG_SWAP
149static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
150 unsigned long end, struct mm_walk *walk)
151{
152 pte_t *orig_pte;
153 struct vm_area_struct *vma = walk->private;
154 unsigned long index;
155
156 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
157 return 0;
158
159 for (index = start; index != end; index += PAGE_SIZE) {
160 pte_t pte;
161 swp_entry_t entry;
162 struct page *page;
163 spinlock_t *ptl;
164
165 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
166 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
167 pte_unmap_unlock(orig_pte, ptl);
168
Kirill A. Shutemov0661a332015-02-10 14:10:04 -0800169 if (pte_present(pte) || pte_none(pte))
Shaohua Li1998cc02013-02-22 16:32:31 -0800170 continue;
171 entry = pte_to_swp_entry(pte);
172 if (unlikely(non_swap_entry(entry)))
173 continue;
174
175 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
176 vma, index);
177 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300178 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800179 }
180
181 return 0;
182}
183
184static void force_swapin_readahead(struct vm_area_struct *vma,
185 unsigned long start, unsigned long end)
186{
187 struct mm_walk walk = {
188 .mm = vma->vm_mm,
189 .pmd_entry = swapin_walk_pmd_entry,
190 .private = vma,
191 };
192
193 walk_page_range(start, end, &walk);
194
195 lru_add_drain(); /* Push any new pages onto the LRU now */
196}
197
198static void force_shm_swapin_readahead(struct vm_area_struct *vma,
199 unsigned long start, unsigned long end,
200 struct address_space *mapping)
201{
202 pgoff_t index;
203 struct page *page;
204 swp_entry_t swap;
205
206 for (; start < end; start += PAGE_SIZE) {
207 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
208
Johannes Weiner55231e52014-05-22 11:54:17 -0700209 page = find_get_entry(mapping, index);
Shaohua Li1998cc02013-02-22 16:32:31 -0800210 if (!radix_tree_exceptional_entry(page)) {
211 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300212 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800213 continue;
214 }
215 swap = radix_to_swp_entry(page);
216 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
217 NULL, 0);
218 if (page)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300219 put_page(page);
Shaohua Li1998cc02013-02-22 16:32:31 -0800220 }
221
222 lru_add_drain(); /* Push any new pages onto the LRU now */
223}
224#endif /* CONFIG_SWAP */
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226/*
227 * Schedule all required I/O operations. Do not wait for completion.
228 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700229static long madvise_willneed(struct vm_area_struct *vma,
230 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 unsigned long start, unsigned long end)
232{
233 struct file *file = vma->vm_file;
234
chenjieba32d7d2017-11-29 16:10:54 -0800235 *prev = vma;
Shaohua Li1998cc02013-02-22 16:32:31 -0800236#ifdef CONFIG_SWAP
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100237 if (!file) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100238 force_swapin_readahead(vma, start, end);
Shaohua Li1998cc02013-02-22 16:32:31 -0800239 return 0;
240 }
Shaohua Li1998cc02013-02-22 16:32:31 -0800241
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100242 if (shmem_mapping(file->f_mapping)) {
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100243 force_shm_swapin_readahead(vma, start, end,
244 file->f_mapping);
245 return 0;
246 }
247#else
Suzuki1bef4002005-10-11 08:29:06 -0700248 if (!file)
249 return -EBADF;
Christoph Hellwig97b713b2015-01-14 10:42:31 +0100250#endif
Suzuki1bef4002005-10-11 08:29:06 -0700251
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -0800252 if (IS_DAX(file_inode(file))) {
Carsten Ottefe77ba62005-06-23 22:05:29 -0700253 /* no bad return value, but ignore advice */
254 return 0;
255 }
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258 if (end > vma->vm_end)
259 end = vma->vm_end;
260 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
261
Wu Fengguangf7e839d2009-06-16 15:31:20 -0700262 force_page_cache_readahead(file->f_mapping, file, start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return 0;
264}
265
Minchan Kim854e9ed2016-01-15 16:54:53 -0800266static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
267 unsigned long end, struct mm_walk *walk)
268
269{
270 struct mmu_gather *tlb = walk->private;
271 struct mm_struct *mm = tlb->mm;
272 struct vm_area_struct *vma = walk->vma;
273 spinlock_t *ptl;
274 pte_t *orig_pte, *pte, ptent;
275 struct page *page;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800276 int nr_swap = 0;
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800277 unsigned long next;
Minchan Kim854e9ed2016-01-15 16:54:53 -0800278
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800279 next = pmd_addr_end(addr, end);
280 if (pmd_trans_huge(*pmd))
281 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
282 goto next;
283
Minchan Kim854e9ed2016-01-15 16:54:53 -0800284 if (pmd_trans_unstable(pmd))
285 return 0;
286
287 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Mel Gorman5a1eef72017-08-02 13:31:52 -0700288 flush_tlb_batched_pending(mm);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800289 arch_enter_lazy_mmu_mode();
290 for (; addr != end; pte++, addr += PAGE_SIZE) {
291 ptent = *pte;
292
Minchan Kim64b42bc2016-01-15 16:55:06 -0800293 if (pte_none(ptent))
Minchan Kim854e9ed2016-01-15 16:54:53 -0800294 continue;
Minchan Kim64b42bc2016-01-15 16:55:06 -0800295 /*
296 * If the pte has swp_entry, just clear page table to
297 * prevent swap-in which is more expensive rather than
298 * (page allocation + zeroing).
299 */
300 if (!pte_present(ptent)) {
301 swp_entry_t entry;
302
303 entry = pte_to_swp_entry(ptent);
304 if (non_swap_entry(entry))
305 continue;
306 nr_swap--;
307 free_swap_and_cache(entry);
308 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
309 continue;
310 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800311
312 page = vm_normal_page(vma, addr, ptent);
313 if (!page)
314 continue;
315
316 /*
317 * If pmd isn't transhuge but the page is THP and
318 * is owned by only this process, split it and
319 * deactivate all pages.
320 */
321 if (PageTransCompound(page)) {
322 if (page_mapcount(page) != 1)
323 goto out;
324 get_page(page);
325 if (!trylock_page(page)) {
326 put_page(page);
327 goto out;
328 }
329 pte_unmap_unlock(orig_pte, ptl);
330 if (split_huge_page(page)) {
331 unlock_page(page);
332 put_page(page);
333 pte_offset_map_lock(mm, pmd, addr, &ptl);
334 goto out;
335 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800336 unlock_page(page);
Eric Biggers0f49b052017-08-25 15:55:39 -0700337 put_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800338 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
339 pte--;
340 addr -= PAGE_SIZE;
341 continue;
342 }
343
344 VM_BUG_ON_PAGE(PageTransCompound(page), page);
345
346 if (PageSwapCache(page) || PageDirty(page)) {
347 if (!trylock_page(page))
348 continue;
349 /*
350 * If page is shared with others, we couldn't clear
351 * PG_dirty of the page.
352 */
353 if (page_mapcount(page) != 1) {
354 unlock_page(page);
355 continue;
356 }
357
358 if (PageSwapCache(page) && !try_to_free_swap(page)) {
359 unlock_page(page);
360 continue;
361 }
362
363 ClearPageDirty(page);
364 unlock_page(page);
365 }
366
367 if (pte_young(ptent) || pte_dirty(ptent)) {
368 /*
369 * Some of architecture(ex, PPC) don't update TLB
370 * with set_pte_at and tlb_remove_tlb_entry so for
371 * the portability, remap the pte with old|clean
372 * after pte clearing.
373 */
374 ptent = ptep_get_and_clear_full(mm, addr, pte,
375 tlb->fullmm);
376
377 ptent = pte_mkold(ptent);
378 ptent = pte_mkclean(ptent);
379 set_pte_at(mm, addr, pte, ptent);
Minchan Kim10853a02016-01-15 16:55:11 -0800380 if (PageActive(page))
381 deactivate_page(page);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800382 tlb_remove_tlb_entry(tlb, pte, addr);
383 }
384 }
385out:
Minchan Kim64b42bc2016-01-15 16:55:06 -0800386 if (nr_swap) {
387 if (current->mm == mm)
388 sync_mm_rss(mm);
389
390 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
391 }
Minchan Kim854e9ed2016-01-15 16:54:53 -0800392 arch_leave_lazy_mmu_mode();
393 pte_unmap_unlock(orig_pte, ptl);
394 cond_resched();
Minchan Kimb8d3c4c2016-01-15 16:55:42 -0800395next:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800396 return 0;
397}
398
399static void madvise_free_page_range(struct mmu_gather *tlb,
400 struct vm_area_struct *vma,
401 unsigned long addr, unsigned long end)
402{
403 struct mm_walk free_walk = {
404 .pmd_entry = madvise_free_pte_range,
405 .mm = vma->vm_mm,
406 .private = tlb,
407 };
408
Laurent Dufourdd2b4652018-04-17 16:33:15 +0200409 vm_write_begin(vma);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800410 tlb_start_vma(tlb, vma);
411 walk_page_range(addr, end, &free_walk);
412 tlb_end_vma(tlb, vma);
Laurent Dufourdd2b4652018-04-17 16:33:15 +0200413 vm_write_end(vma);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800414}
415
416static int madvise_free_single_vma(struct vm_area_struct *vma,
417 unsigned long start_addr, unsigned long end_addr)
418{
419 unsigned long start, end;
420 struct mm_struct *mm = vma->vm_mm;
421 struct mmu_gather tlb;
422
423 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
424 return -EINVAL;
425
426 /* MADV_FREE works for only anon vma at the moment */
427 if (!vma_is_anonymous(vma))
428 return -EINVAL;
429
430 start = max(vma->vm_start, start_addr);
431 if (start >= vma->vm_end)
432 return -EINVAL;
433 end = min(vma->vm_end, end_addr);
434 if (end <= vma->vm_start)
435 return -EINVAL;
436
437 lru_add_drain();
438 tlb_gather_mmu(&tlb, mm, start, end);
439 update_hiwater_rss(mm);
440
441 mmu_notifier_invalidate_range_start(mm, start, end);
442 madvise_free_page_range(&tlb, vma, start, end);
443 mmu_notifier_invalidate_range_end(mm, start, end);
444 tlb_finish_mmu(&tlb, start, end);
445
446 return 0;
447}
448
449static long madvise_free(struct vm_area_struct *vma,
450 struct vm_area_struct **prev,
451 unsigned long start, unsigned long end)
452{
453 *prev = vma;
454 return madvise_free_single_vma(vma, start, end);
455}
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/*
458 * Application no longer needs these pages. If the pages are dirty,
459 * it's OK to just throw them away. The app will be more careful about
460 * data it wants to keep. Be sure to free swap resources too. The
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700461 * zap_page_range call sets things up for shrink_active_list to actually free
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 * these pages later if no one else has touched them in the meantime,
463 * although we could add these pages to a global reuse list for
Fernando Luis Vazquez Cao7e6cbea2008-07-29 22:33:39 -0700464 * shrink_active_list to pick up before reclaiming other pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 *
466 * NB: This interface discards data rather than pushes it out to swap,
467 * as some implementations do. This has performance implications for
468 * applications like large transactional databases which want to discard
469 * pages in anonymous maps after committing to backing store the data
470 * that was kept in them. There is no reason to write this data out to
471 * the swap area if the application is discarding it.
472 *
473 * An interface that causes the system to free clean pages and flush
474 * dirty pages is already available as msync(MS_INVALIDATE).
475 */
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700476static long madvise_dontneed(struct vm_area_struct *vma,
477 struct vm_area_struct **prev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 unsigned long start, unsigned long end)
479{
Prasanna Meda05b74382005-06-21 17:14:37 -0700480 *prev = vma;
Kirill A. Shutemov7ade3742017-02-22 15:46:39 -0800481 if (!can_madv_dontneed_vma(vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 return -EINVAL;
483
Kirill A. Shutemov8a5f14a2015-02-10 14:09:49 -0800484 zap_page_range(vma, start, end - start, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return 0;
486}
487
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800488/*
489 * Application wants to free up the pages and associated backing store.
490 * This is effectively punching a hole into the middle of a file.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800491 */
492static long madvise_remove(struct vm_area_struct *vma,
Nick Piggin00e9fa22007-03-16 13:38:10 -0800493 struct vm_area_struct **prev,
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800494 unsigned long start, unsigned long end)
495{
Hugh Dickins3f31d072012-05-29 15:06:40 -0700496 loff_t offset;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700497 int error;
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700498 struct file *f;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800499
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700500 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
Nick Piggin00e9fa22007-03-16 13:38:10 -0800501
Mike Kravetz72079ba2015-09-08 15:01:57 -0700502 if (vma->vm_flags & VM_LOCKED)
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800503 return -EINVAL;
504
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700505 f = vma->vm_file;
506
507 if (!f || !f->f_mapping || !f->f_mapping->host) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800508 return -EINVAL;
509 }
510
Hugh Dickins69cf0fa2006-04-17 22:46:32 +0100511 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
512 return -EACCES;
513
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800514 offset = (loff_t)(start - vma->vm_start)
515 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700516
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700517 /*
518 * Filesystem's fallocate may need to take i_mutex. We need to
519 * explicitly grab a reference because the vma (and hence the
520 * vma's reference to the file) can go away as soon as we drop
521 * mmap_sem.
522 */
523 get_file(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700524 up_read(&current->mm->mmap_sem);
Anna Schumaker72c72bd2014-11-07 14:44:25 -0500525 error = vfs_fallocate(f,
Hugh Dickins3f31d072012-05-29 15:06:40 -0700526 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
527 offset, end - start);
Andy Lutomirski9ab42332012-07-05 16:00:11 -0700528 fput(f);
Nick Piggin0a27a142007-05-06 14:49:53 -0700529 down_read(&current->mm->mmap_sem);
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700530 return error;
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800531}
532
Andi Kleen9893e492009-09-16 11:50:17 +0200533#ifdef CONFIG_MEMORY_FAILURE
534/*
535 * Error injection support for memory error handling.
536 */
Andi Kleenafcf9382009-12-16 12:20:00 +0100537static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
Andi Kleen9893e492009-09-16 11:50:17 +0200538{
Wanpeng Li20cb6ca2013-09-30 13:45:21 -0700539 struct page *p;
Mel Gorman8cc3acf2017-08-31 16:15:30 -0700540 struct zone *zone;
541
Andi Kleen9893e492009-09-16 11:50:17 +0200542 if (!capable(CAP_SYS_ADMIN))
543 return -EPERM;
Wanpeng Li20cb6ca2013-09-30 13:45:21 -0700544 for (; start < end; start += PAGE_SIZE <<
545 compound_order(compound_head(p))) {
Andrew Morton325c4ef2013-09-11 14:23:03 -0700546 int ret;
547
548 ret = get_user_pages_fast(start, 1, 0, &p);
Andi Kleen9893e492009-09-16 11:50:17 +0200549 if (ret != 1)
550 return ret;
Andrew Morton325c4ef2013-09-11 14:23:03 -0700551
Wanpeng Li29b4eed2013-09-11 14:22:59 -0700552 if (PageHWPoison(p)) {
553 put_page(p);
554 continue;
555 }
Andi Kleenafcf9382009-12-16 12:20:00 +0100556 if (bhv == MADV_SOFT_OFFLINE) {
Wanpeng Lib194b8c2013-09-11 14:22:57 -0700557 pr_info("Soft offlining page %#lx at %#lx\n",
Andi Kleenafcf9382009-12-16 12:20:00 +0100558 page_to_pfn(p), start);
559 ret = soft_offline_page(p, MF_COUNT_INCREASED);
560 if (ret)
Wanpeng Li83024232013-09-11 14:23:02 -0700561 return ret;
Andi Kleenafcf9382009-12-16 12:20:00 +0100562 continue;
563 }
Wanpeng Lib194b8c2013-09-11 14:22:57 -0700564 pr_info("Injecting memory failure for page %#lx at %#lx\n",
Andi Kleen9893e492009-09-16 11:50:17 +0200565 page_to_pfn(p), start);
Naoya Horiguchi23a003b2016-03-15 14:56:36 -0700566 ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
567 if (ret)
568 return ret;
Andi Kleen9893e492009-09-16 11:50:17 +0200569 }
Mel Gorman8cc3acf2017-08-31 16:15:30 -0700570
571 /* Ensure that all poisoned pages are removed from per-cpu lists */
572 for_each_populated_zone(zone)
573 drain_all_pages(zone);
574
Andrew Morton325c4ef2013-09-11 14:23:03 -0700575 return 0;
Andi Kleen9893e492009-09-16 11:50:17 +0200576}
577#endif
578
suzuki165cd402005-07-27 11:43:59 -0700579static long
580madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
581 unsigned long start, unsigned long end, int behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 switch (behavior) {
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800584 case MADV_REMOVE:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700585 return madvise_remove(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 case MADV_WILLNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700587 return madvise_willneed(vma, prev, start, end);
Minchan Kim854e9ed2016-01-15 16:54:53 -0800588 case MADV_FREE:
589 /*
590 * XXX: In this implementation, MADV_FREE works like
591 * MADV_DONTNEED on swapless system or full swap.
592 */
593 if (get_nr_swap_pages() > 0)
594 return madvise_free(vma, prev, start, end);
595 /* passthrough */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 case MADV_DONTNEED:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700597 return madvise_dontneed(vma, prev, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 default:
Hugh Dickins3866ea92009-09-21 17:01:52 -0700599 return madvise_behavior(vma, prev, start, end, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601}
602
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700603static bool
Nick Piggin75927af2009-06-16 15:32:38 -0700604madvise_behavior_valid(int behavior)
605{
606 switch (behavior) {
607 case MADV_DOFORK:
608 case MADV_DONTFORK:
609 case MADV_NORMAL:
610 case MADV_SEQUENTIAL:
611 case MADV_RANDOM:
612 case MADV_REMOVE:
613 case MADV_WILLNEED:
614 case MADV_DONTNEED:
Minchan Kim854e9ed2016-01-15 16:54:53 -0800615 case MADV_FREE:
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700616#ifdef CONFIG_KSM
617 case MADV_MERGEABLE:
618 case MADV_UNMERGEABLE:
619#endif
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800620#ifdef CONFIG_TRANSPARENT_HUGEPAGE
621 case MADV_HUGEPAGE:
Andrea Arcangelia664b2d2011-01-13 15:47:17 -0800622 case MADV_NOHUGEPAGE:
Andrea Arcangeli0af4e982011-01-13 15:46:55 -0800623#endif
Jason Baronaccb61f2012-03-23 15:02:51 -0700624 case MADV_DONTDUMP:
625 case MADV_DODUMP:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700626 return true;
Nick Piggin75927af2009-06-16 15:32:38 -0700627
628 default:
Nicholas Krause1ecef9e2015-09-04 15:48:24 -0700629 return false;
Nick Piggin75927af2009-06-16 15:32:38 -0700630 }
631}
Hugh Dickins3866ea92009-09-21 17:01:52 -0700632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633/*
634 * The madvise(2) system call.
635 *
636 * Applications can use madvise() to advise the kernel how it should
637 * handle paging I/O in this VM area. The idea is to help the kernel
638 * use appropriate read-ahead and caching techniques. The information
639 * provided is advisory only, and can be safely disregarded by the
640 * kernel without affecting the correct operation of the application.
641 *
642 * behavior values:
643 * MADV_NORMAL - the default behavior is to read clusters. This
644 * results in some read-ahead and read-behind.
645 * MADV_RANDOM - the system should read the minimum amount of data
646 * on any access, since it is unlikely that the appli-
647 * cation will need more than what it asks for.
648 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
649 * once, so they can be aggressively read ahead, and
650 * can be freed soon after they are accessed.
651 * MADV_WILLNEED - the application is notifying the system to read
652 * some pages ahead.
653 * MADV_DONTNEED - the application is finished with the given range,
654 * so the kernel can free resources associated with it.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700655 * MADV_FREE - the application marks pages in the given range as lazy free,
656 * where actual purges are postponed until memory pressure happens.
Badari Pulavartyf6b3ec22006-01-06 00:10:38 -0800657 * MADV_REMOVE - the application wants to free up the given range of
658 * pages and associated backing store.
Hugh Dickins3866ea92009-09-21 17:01:52 -0700659 * MADV_DONTFORK - omit this area from child's address space when forking:
660 * typically, to avoid COWing pages pinned by get_user_pages().
661 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700662 * MADV_HWPOISON - trigger memory error handler as if the given memory range
663 * were corrupted by unrecoverable hardware memory failure.
664 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
Hugh Dickinsf8af4da2009-09-21 17:01:57 -0700665 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
666 * this area with pages of identical content from other such areas.
667 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
Naoya Horiguchid7206a72016-03-15 14:56:58 -0700668 * MADV_HUGEPAGE - the application wants to back the given range by transparent
669 * huge pages in the future. Existing pages might be coalesced and
670 * new pages might be allocated as THP.
671 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by
672 * transparent huge pages so the existing pages will not be
673 * coalesced into THP and new pages will not be allocated as THP.
674 * MADV_DONTDUMP - the application wants to prevent pages in the given range
675 * from being included in its core dump.
676 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 *
678 * return values:
679 * zero - success
680 * -EINVAL - start + len < 0, start is not page-aligned,
681 * "behavior" is not a valid value, or application
682 * is attempting to release locked or shared pages.
683 * -ENOMEM - addresses in the specified range are not currently
684 * mapped, or are outside the AS of the process.
685 * -EIO - an I/O error occurred while paging in data.
686 * -EBADF - map exists, but area maps something that isn't a file.
687 * -EAGAIN - a kernel resource was temporarily unavailable.
688 */
Heiko Carstens3480b252009-01-14 14:14:16 +0100689SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
Prasanna Meda05b74382005-06-21 17:14:37 -0700691 unsigned long end, tmp;
Vladimir Cernovec9bed92013-09-11 14:20:15 -0700692 struct vm_area_struct *vma, *prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 int unmapped_error = 0;
694 int error = -EINVAL;
Jason Baronf7977792007-07-15 23:38:21 -0700695 int write;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 size_t len;
Shaohua Li1998cc02013-02-22 16:32:31 -0800697 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Andi Kleen9893e492009-09-16 11:50:17 +0200699#ifdef CONFIG_MEMORY_FAILURE
Andi Kleenafcf9382009-12-16 12:20:00 +0100700 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
701 return madvise_hwpoison(behavior, start, start+len_in);
Andi Kleen9893e492009-09-16 11:50:17 +0200702#endif
Nick Piggin75927af2009-06-16 15:32:38 -0700703 if (!madvise_behavior_valid(behavior))
704 return error;
705
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700706 if (start & ~PAGE_MASK)
707 return error;
708 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
709
710 /* Check to see whether len was rounded up from small -ve to zero */
711 if (len_in && !len)
712 return error;
713
714 end = start + len;
715 if (end < start)
716 return error;
717
718 error = 0;
719 if (end == start)
720 return error;
721
Jason Baronf7977792007-07-15 23:38:21 -0700722 write = madvise_need_mmap_write(behavior);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700723 if (write) {
724 if (down_write_killable(&current->mm->mmap_sem))
725 return -EINTR;
726 } else {
Nick Piggin0a27a142007-05-06 14:49:53 -0700727 down_read(&current->mm->mmap_sem);
Michal Hockodc0ef0d2016-05-23 16:25:27 -0700728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 /*
731 * If the interval [start,end) covers some unmapped address
732 * ranges, just ignore them, but return -ENOMEM at the end.
Prasanna Meda05b74382005-06-21 17:14:37 -0700733 * - different from the way of handling in mlock etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 */
Prasanna Meda05b74382005-06-21 17:14:37 -0700735 vma = find_vma_prev(current->mm, start, &prev);
Hugh Dickins836d5ff2005-09-03 15:54:53 -0700736 if (vma && start > vma->vm_start)
737 prev = vma;
738
Shaohua Li1998cc02013-02-22 16:32:31 -0800739 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 for (;;) {
741 /* Still start < end. */
742 error = -ENOMEM;
743 if (!vma)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700744 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Prasanna Meda05b74382005-06-21 17:14:37 -0700746 /* Here start < (end|vma->vm_end). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (start < vma->vm_start) {
748 unmapped_error = -ENOMEM;
749 start = vma->vm_start;
Prasanna Meda05b74382005-06-21 17:14:37 -0700750 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700751 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753
Prasanna Meda05b74382005-06-21 17:14:37 -0700754 /* Here vma->vm_start <= start < (end|vma->vm_end) */
755 tmp = vma->vm_end;
756 if (end < tmp)
757 tmp = end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Prasanna Meda05b74382005-06-21 17:14:37 -0700759 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
760 error = madvise_vma(vma, &prev, start, tmp, behavior);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 if (error)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700762 goto out;
Prasanna Meda05b74382005-06-21 17:14:37 -0700763 start = tmp;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700764 if (prev && start < prev->vm_end)
Prasanna Meda05b74382005-06-21 17:14:37 -0700765 start = prev->vm_end;
766 error = unmapped_error;
767 if (start >= end)
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700768 goto out;
Hugh Dickins90ed52e2007-03-29 01:20:38 -0700769 if (prev)
770 vma = prev->vm_next;
771 else /* madvise_remove dropped mmap_sem */
772 vma = find_vma(current->mm, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774out:
Rasmus Villemoes84d96d82013-04-29 15:08:23 -0700775 blk_finish_plug(&plug);
Jason Baronf7977792007-07-15 23:38:21 -0700776 if (write)
Nick Piggin0a27a142007-05-06 14:49:53 -0700777 up_write(&current->mm->mmap_sem);
778 else
779 up_read(&current->mm->mmap_sem);
780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return error;
782}