blob: 0bf127b332e7e3e5f9c65dba4da4974438c50739 [file] [log] [blame]
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -07001#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/hugetlb.h>
7#include <linux/mm.h>
8#include <linux/pagemap.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/swapops.h>
12
13#include "internal.h"
14
15/**
16 * follow_page_mask - look up a page descriptor from a user-virtual address
17 * @vma: vm_area_struct mapping @address
18 * @address: virtual address to look up
19 * @flags: flags modifying lookup behaviour
20 * @page_mask: on output, *page_mask is set according to the size of the page
21 *
22 * @flags can have FOLL_ flags set, defined in <linux/mm.h>
23 *
24 * Returns the mapped (struct page *), %NULL if no mapping exists, or
25 * an error pointer if there is a mapping to something not represented
26 * by a page descriptor (see also vm_normal_page()).
27 */
28struct page *follow_page_mask(struct vm_area_struct *vma,
29 unsigned long address, unsigned int flags,
30 unsigned int *page_mask)
31{
32 pgd_t *pgd;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *ptep, pte;
36 spinlock_t *ptl;
37 struct page *page;
38 struct mm_struct *mm = vma->vm_mm;
39
40 *page_mask = 0;
41
42 page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
43 if (!IS_ERR(page)) {
44 BUG_ON(flags & FOLL_GET);
45 goto out;
46 }
47
48 page = NULL;
49 pgd = pgd_offset(mm, address);
50 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
51 goto no_page_table;
52
53 pud = pud_offset(pgd, address);
54 if (pud_none(*pud))
55 goto no_page_table;
56 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
57 if (flags & FOLL_GET)
58 goto out;
59 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
60 goto out;
61 }
62 if (unlikely(pud_bad(*pud)))
63 goto no_page_table;
64
65 pmd = pmd_offset(pud, address);
66 if (pmd_none(*pmd))
67 goto no_page_table;
68 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
69 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
70 if (flags & FOLL_GET) {
71 /*
72 * Refcount on tail pages are not well-defined and
73 * shouldn't be taken. The caller should handle a NULL
74 * return when trying to follow tail pages.
75 */
76 if (PageHead(page))
77 get_page(page);
78 else {
79 page = NULL;
80 goto out;
81 }
82 }
83 goto out;
84 }
85 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
86 goto no_page_table;
87 if (pmd_trans_huge(*pmd)) {
88 if (flags & FOLL_SPLIT) {
89 split_huge_page_pmd(vma, address, pmd);
90 goto split_fallthrough;
91 }
92 ptl = pmd_lock(mm, pmd);
93 if (likely(pmd_trans_huge(*pmd))) {
94 if (unlikely(pmd_trans_splitting(*pmd))) {
95 spin_unlock(ptl);
96 wait_split_huge_page(vma->anon_vma, pmd);
97 } else {
98 page = follow_trans_huge_pmd(vma, address,
99 pmd, flags);
100 spin_unlock(ptl);
101 *page_mask = HPAGE_PMD_NR - 1;
102 goto out;
103 }
104 } else
105 spin_unlock(ptl);
106 /* fall through */
107 }
108split_fallthrough:
109 if (unlikely(pmd_bad(*pmd)))
110 goto no_page_table;
111
112 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
113
114 pte = *ptep;
115 if (!pte_present(pte)) {
116 swp_entry_t entry;
117 /*
118 * KSM's break_ksm() relies upon recognizing a ksm page
119 * even while it is being migrated, so for that case we
120 * need migration_entry_wait().
121 */
122 if (likely(!(flags & FOLL_MIGRATION)))
123 goto no_page;
124 if (pte_none(pte) || pte_file(pte))
125 goto no_page;
126 entry = pte_to_swp_entry(pte);
127 if (!is_migration_entry(entry))
128 goto no_page;
129 pte_unmap_unlock(ptep, ptl);
130 migration_entry_wait(mm, pmd, address);
131 goto split_fallthrough;
132 }
133 if ((flags & FOLL_NUMA) && pte_numa(pte))
134 goto no_page;
135 if ((flags & FOLL_WRITE) && !pte_write(pte))
136 goto unlock;
137
138 page = vm_normal_page(vma, address, pte);
139 if (unlikely(!page)) {
140 if ((flags & FOLL_DUMP) ||
141 !is_zero_pfn(pte_pfn(pte)))
142 goto bad_page;
143 page = pte_page(pte);
144 }
145
146 if (flags & FOLL_GET)
147 get_page_foll(page);
148 if (flags & FOLL_TOUCH) {
149 if ((flags & FOLL_WRITE) &&
150 !pte_dirty(pte) && !PageDirty(page))
151 set_page_dirty(page);
152 /*
153 * pte_mkyoung() would be more correct here, but atomic care
154 * is needed to avoid losing the dirty bit: it is easier to use
155 * mark_page_accessed().
156 */
157 mark_page_accessed(page);
158 }
159 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
160 /*
161 * The preliminary mapping check is mainly to avoid the
162 * pointless overhead of lock_page on the ZERO_PAGE
163 * which might bounce very badly if there is contention.
164 *
165 * If the page is already locked, we don't need to
166 * handle it now - vmscan will handle it later if and
167 * when it attempts to reclaim the page.
168 */
169 if (page->mapping && trylock_page(page)) {
170 lru_add_drain(); /* push cached pages to LRU */
171 /*
172 * Because we lock page here, and migration is
173 * blocked by the pte's page reference, and we
174 * know the page is still mapped, we don't even
175 * need to check for file-cache page truncation.
176 */
177 mlock_vma_page(page);
178 unlock_page(page);
179 }
180 }
181unlock:
182 pte_unmap_unlock(ptep, ptl);
183out:
184 return page;
185
186bad_page:
187 pte_unmap_unlock(ptep, ptl);
188 return ERR_PTR(-EFAULT);
189
190no_page:
191 pte_unmap_unlock(ptep, ptl);
192 if (!pte_none(pte))
193 return page;
194
195no_page_table:
196 /*
197 * When core dumping an enormous anonymous area that nobody
198 * has touched so far, we don't want to allocate unnecessary pages or
199 * page tables. Return error instead of NULL to skip handle_mm_fault,
200 * then get_dump_page() will return NULL to leave a hole in the dump.
201 * But we can only make this optimization where a hole would surely
202 * be zero-filled if handle_mm_fault() actually did handle it.
203 */
204 if ((flags & FOLL_DUMP) &&
205 (!vma->vm_ops || !vma->vm_ops->fault))
206 return ERR_PTR(-EFAULT);
207 return page;
208}
209
210static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
211{
212 return stack_guard_page_start(vma, addr) ||
213 stack_guard_page_end(vma, addr+PAGE_SIZE);
214}
215
Kirill A. Shutemovf2b495c2014-06-04 16:08:11 -0700216static int get_gate_page(struct mm_struct *mm, unsigned long address,
217 unsigned int gup_flags, struct vm_area_struct **vma,
218 struct page **page)
219{
220 pgd_t *pgd;
221 pud_t *pud;
222 pmd_t *pmd;
223 pte_t *pte;
224 int ret = -EFAULT;
225
226 /* user gate pages are read-only */
227 if (gup_flags & FOLL_WRITE)
228 return -EFAULT;
229 if (address > TASK_SIZE)
230 pgd = pgd_offset_k(address);
231 else
232 pgd = pgd_offset_gate(mm, address);
233 BUG_ON(pgd_none(*pgd));
234 pud = pud_offset(pgd, address);
235 BUG_ON(pud_none(*pud));
236 pmd = pmd_offset(pud, address);
237 if (pmd_none(*pmd))
238 return -EFAULT;
239 VM_BUG_ON(pmd_trans_huge(*pmd));
240 pte = pte_offset_map(pmd, address);
241 if (pte_none(*pte))
242 goto unmap;
243 *vma = get_gate_vma(mm);
244 if (!page)
245 goto out;
246 *page = vm_normal_page(*vma, address, *pte);
247 if (!*page) {
248 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
249 goto unmap;
250 *page = pte_page(*pte);
251 }
252 get_page(*page);
253out:
254 ret = 0;
255unmap:
256 pte_unmap(pte);
257 return ret;
258}
259
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -0700260/**
261 * __get_user_pages() - pin user pages in memory
262 * @tsk: task_struct of target task
263 * @mm: mm_struct of target mm
264 * @start: starting user address
265 * @nr_pages: number of pages from start to pin
266 * @gup_flags: flags modifying pin behaviour
267 * @pages: array that receives pointers to the pages pinned.
268 * Should be at least nr_pages long. Or NULL, if caller
269 * only intends to ensure the pages are faulted in.
270 * @vmas: array of pointers to vmas corresponding to each page.
271 * Or NULL if the caller does not require them.
272 * @nonblocking: whether waiting for disk IO or mmap_sem contention
273 *
274 * Returns number of pages pinned. This may be fewer than the number
275 * requested. If nr_pages is 0 or negative, returns 0. If no pages
276 * were pinned, returns -errno. Each page returned must be released
277 * with a put_page() call when it is finished with. vmas will only
278 * remain valid while mmap_sem is held.
279 *
280 * Must be called with mmap_sem held for read or write.
281 *
282 * __get_user_pages walks a process's page tables and takes a reference to
283 * each struct page that each user address corresponds to at a given
284 * instant. That is, it takes the page that would be accessed if a user
285 * thread accesses the given user virtual address at that instant.
286 *
287 * This does not guarantee that the page exists in the user mappings when
288 * __get_user_pages returns, and there may even be a completely different
289 * page there in some cases (eg. if mmapped pagecache has been invalidated
290 * and subsequently re faulted). However it does guarantee that the page
291 * won't be freed completely. And mostly callers simply care that the page
292 * contains data that was valid *at some point in time*. Typically, an IO
293 * or similar operation cannot guarantee anything stronger anyway because
294 * locks can't be held over the syscall boundary.
295 *
296 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
297 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
298 * appropriate) must be called after the page is finished with, and
299 * before put_page is called.
300 *
301 * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
302 * or mmap_sem contention, and if waiting is needed to pin all pages,
303 * *@nonblocking will be set to 0.
304 *
305 * In most cases, get_user_pages or get_user_pages_fast should be used
306 * instead of __get_user_pages. __get_user_pages should be used only if
307 * you need some special @gup_flags.
308 */
309long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
310 unsigned long start, unsigned long nr_pages,
311 unsigned int gup_flags, struct page **pages,
312 struct vm_area_struct **vmas, int *nonblocking)
313{
314 long i;
315 unsigned long vm_flags;
316 unsigned int page_mask;
317
318 if (!nr_pages)
319 return 0;
320
321 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
322
323 /*
324 * If FOLL_FORCE is set then do not force a full fault as the hinting
325 * fault information is unrelated to the reference behaviour of a task
326 * using the address space
327 */
328 if (!(gup_flags & FOLL_FORCE))
329 gup_flags |= FOLL_NUMA;
330
331 i = 0;
332
333 do {
334 struct vm_area_struct *vma;
335
336 vma = find_extend_vma(mm, start);
337 if (!vma && in_gate_area(mm, start)) {
Kirill A. Shutemovf2b495c2014-06-04 16:08:11 -0700338 int ret;
339 ret = get_gate_page(mm, start & PAGE_MASK, gup_flags,
340 &vma, pages ? &pages[i] : NULL);
341 if (ret)
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -0700342 goto efault;
Kirill A. Shutemov4bbd4c72014-06-04 16:08:10 -0700343 page_mask = 0;
344 goto next_page;
345 }
346
347 if (!vma)
348 goto efault;
349 vm_flags = vma->vm_flags;
350 if (vm_flags & (VM_IO | VM_PFNMAP))
351 goto efault;
352
353 if (gup_flags & FOLL_WRITE) {
354 if (!(vm_flags & VM_WRITE)) {
355 if (!(gup_flags & FOLL_FORCE))
356 goto efault;
357 /*
358 * We used to let the write,force case do COW
359 * in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
360 * ptrace could set a breakpoint in a read-only
361 * mapping of an executable, without corrupting
362 * the file (yet only when that file had been
363 * opened for writing!). Anon pages in shared
364 * mappings are surprising: now just reject it.
365 */
366 if (!is_cow_mapping(vm_flags)) {
367 WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
368 goto efault;
369 }
370 }
371 } else {
372 if (!(vm_flags & VM_READ)) {
373 if (!(gup_flags & FOLL_FORCE))
374 goto efault;
375 /*
376 * Is there actually any vma we can reach here
377 * which does not have VM_MAYREAD set?
378 */
379 if (!(vm_flags & VM_MAYREAD))
380 goto efault;
381 }
382 }
383
384 if (is_vm_hugetlb_page(vma)) {
385 i = follow_hugetlb_page(mm, vma, pages, vmas,
386 &start, &nr_pages, i, gup_flags);
387 continue;
388 }
389
390 do {
391 struct page *page;
392 unsigned int foll_flags = gup_flags;
393 unsigned int page_increm;
394
395 /*
396 * If we have a pending SIGKILL, don't keep faulting
397 * pages and potentially allocating memory.
398 */
399 if (unlikely(fatal_signal_pending(current)))
400 return i ? i : -ERESTARTSYS;
401
402 cond_resched();
403 while (!(page = follow_page_mask(vma, start,
404 foll_flags, &page_mask))) {
405 int ret;
406 unsigned int fault_flags = 0;
407
408 /* For mlock, just skip the stack guard page. */
409 if (foll_flags & FOLL_MLOCK) {
410 if (stack_guard_page(vma, start))
411 goto next_page;
412 }
413 if (foll_flags & FOLL_WRITE)
414 fault_flags |= FAULT_FLAG_WRITE;
415 if (nonblocking)
416 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
417 if (foll_flags & FOLL_NOWAIT)
418 fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
419
420 ret = handle_mm_fault(mm, vma, start,
421 fault_flags);
422
423 if (ret & VM_FAULT_ERROR) {
424 if (ret & VM_FAULT_OOM)
425 return i ? i : -ENOMEM;
426 if (ret & (VM_FAULT_HWPOISON |
427 VM_FAULT_HWPOISON_LARGE)) {
428 if (i)
429 return i;
430 else if (gup_flags & FOLL_HWPOISON)
431 return -EHWPOISON;
432 else
433 return -EFAULT;
434 }
435 if (ret & VM_FAULT_SIGBUS)
436 goto efault;
437 BUG();
438 }
439
440 if (tsk) {
441 if (ret & VM_FAULT_MAJOR)
442 tsk->maj_flt++;
443 else
444 tsk->min_flt++;
445 }
446
447 if (ret & VM_FAULT_RETRY) {
448 if (nonblocking)
449 *nonblocking = 0;
450 return i;
451 }
452
453 /*
454 * The VM_FAULT_WRITE bit tells us that
455 * do_wp_page has broken COW when necessary,
456 * even if maybe_mkwrite decided not to set
457 * pte_write. We can thus safely do subsequent
458 * page lookups as if they were reads. But only
459 * do so when looping for pte_write is futile:
460 * in some cases userspace may also be wanting
461 * to write to the gotten user page, which a
462 * read fault here might prevent (a readonly
463 * page might get reCOWed by userspace write).
464 */
465 if ((ret & VM_FAULT_WRITE) &&
466 !(vma->vm_flags & VM_WRITE))
467 foll_flags &= ~FOLL_WRITE;
468
469 cond_resched();
470 }
471 if (IS_ERR(page))
472 return i ? i : PTR_ERR(page);
473 if (pages) {
474 pages[i] = page;
475
476 flush_anon_page(vma, page, start);
477 flush_dcache_page(page);
478 page_mask = 0;
479 }
480next_page:
481 if (vmas) {
482 vmas[i] = vma;
483 page_mask = 0;
484 }
485 page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
486 if (page_increm > nr_pages)
487 page_increm = nr_pages;
488 i += page_increm;
489 start += page_increm * PAGE_SIZE;
490 nr_pages -= page_increm;
491 } while (nr_pages && start < vma->vm_end);
492 } while (nr_pages);
493 return i;
494efault:
495 return i ? : -EFAULT;
496}
497EXPORT_SYMBOL(__get_user_pages);
498
499/*
500 * fixup_user_fault() - manually resolve a user page fault
501 * @tsk: the task_struct to use for page fault accounting, or
502 * NULL if faults are not to be recorded.
503 * @mm: mm_struct of target mm
504 * @address: user address
505 * @fault_flags:flags to pass down to handle_mm_fault()
506 *
507 * This is meant to be called in the specific scenario where for locking reasons
508 * we try to access user memory in atomic context (within a pagefault_disable()
509 * section), this returns -EFAULT, and we want to resolve the user fault before
510 * trying again.
511 *
512 * Typically this is meant to be used by the futex code.
513 *
514 * The main difference with get_user_pages() is that this function will
515 * unconditionally call handle_mm_fault() which will in turn perform all the
516 * necessary SW fixup of the dirty and young bits in the PTE, while
517 * handle_mm_fault() only guarantees to update these in the struct page.
518 *
519 * This is important for some architectures where those bits also gate the
520 * access permission to the page because they are maintained in software. On
521 * such architectures, gup() will not be enough to make a subsequent access
522 * succeed.
523 *
524 * This should be called with the mm_sem held for read.
525 */
526int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
527 unsigned long address, unsigned int fault_flags)
528{
529 struct vm_area_struct *vma;
530 vm_flags_t vm_flags;
531 int ret;
532
533 vma = find_extend_vma(mm, address);
534 if (!vma || address < vma->vm_start)
535 return -EFAULT;
536
537 vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
538 if (!(vm_flags & vma->vm_flags))
539 return -EFAULT;
540
541 ret = handle_mm_fault(mm, vma, address, fault_flags);
542 if (ret & VM_FAULT_ERROR) {
543 if (ret & VM_FAULT_OOM)
544 return -ENOMEM;
545 if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
546 return -EHWPOISON;
547 if (ret & VM_FAULT_SIGBUS)
548 return -EFAULT;
549 BUG();
550 }
551 if (tsk) {
552 if (ret & VM_FAULT_MAJOR)
553 tsk->maj_flt++;
554 else
555 tsk->min_flt++;
556 }
557 return 0;
558}
559
560/*
561 * get_user_pages() - pin user pages in memory
562 * @tsk: the task_struct to use for page fault accounting, or
563 * NULL if faults are not to be recorded.
564 * @mm: mm_struct of target mm
565 * @start: starting user address
566 * @nr_pages: number of pages from start to pin
567 * @write: whether pages will be written to by the caller
568 * @force: whether to force access even when user mapping is currently
569 * protected (but never forces write access to shared mapping).
570 * @pages: array that receives pointers to the pages pinned.
571 * Should be at least nr_pages long. Or NULL, if caller
572 * only intends to ensure the pages are faulted in.
573 * @vmas: array of pointers to vmas corresponding to each page.
574 * Or NULL if the caller does not require them.
575 *
576 * Returns number of pages pinned. This may be fewer than the number
577 * requested. If nr_pages is 0 or negative, returns 0. If no pages
578 * were pinned, returns -errno. Each page returned must be released
579 * with a put_page() call when it is finished with. vmas will only
580 * remain valid while mmap_sem is held.
581 *
582 * Must be called with mmap_sem held for read or write.
583 *
584 * get_user_pages walks a process's page tables and takes a reference to
585 * each struct page that each user address corresponds to at a given
586 * instant. That is, it takes the page that would be accessed if a user
587 * thread accesses the given user virtual address at that instant.
588 *
589 * This does not guarantee that the page exists in the user mappings when
590 * get_user_pages returns, and there may even be a completely different
591 * page there in some cases (eg. if mmapped pagecache has been invalidated
592 * and subsequently re faulted). However it does guarantee that the page
593 * won't be freed completely. And mostly callers simply care that the page
594 * contains data that was valid *at some point in time*. Typically, an IO
595 * or similar operation cannot guarantee anything stronger anyway because
596 * locks can't be held over the syscall boundary.
597 *
598 * If write=0, the page must not be written to. If the page is written to,
599 * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
600 * after the page is finished with, and before put_page is called.
601 *
602 * get_user_pages is typically used for fewer-copy IO operations, to get a
603 * handle on the memory by some means other than accesses via the user virtual
604 * addresses. The pages may be submitted for DMA to devices or accessed via
605 * their kernel linear mapping (via the kmap APIs). Care should be taken to
606 * use the correct cache flushing APIs.
607 *
608 * See also get_user_pages_fast, for performance critical applications.
609 */
610long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
611 unsigned long start, unsigned long nr_pages, int write,
612 int force, struct page **pages, struct vm_area_struct **vmas)
613{
614 int flags = FOLL_TOUCH;
615
616 if (pages)
617 flags |= FOLL_GET;
618 if (write)
619 flags |= FOLL_WRITE;
620 if (force)
621 flags |= FOLL_FORCE;
622
623 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
624 NULL);
625}
626EXPORT_SYMBOL(get_user_pages);
627
628/**
629 * get_dump_page() - pin user page in memory while writing it to core dump
630 * @addr: user address
631 *
632 * Returns struct page pointer of user page pinned for dump,
633 * to be freed afterwards by page_cache_release() or put_page().
634 *
635 * Returns NULL on any kind of failure - a hole must then be inserted into
636 * the corefile, to preserve alignment with its headers; and also returns
637 * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
638 * allowing a hole to be left in the corefile to save diskspace.
639 *
640 * Called without mmap_sem, but after all other threads have been killed.
641 */
642#ifdef CONFIG_ELF_CORE
643struct page *get_dump_page(unsigned long addr)
644{
645 struct vm_area_struct *vma;
646 struct page *page;
647
648 if (__get_user_pages(current, current->mm, addr, 1,
649 FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
650 NULL) < 1)
651 return NULL;
652 flush_cache_page(vma, addr, page_to_pfn(page));
653 return page;
654}
655#endif /* CONFIG_ELF_CORE */