Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/mm/memory.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
| 5 | */ |
| 6 | |
| 7 | /* |
| 8 | * demand-loading started 01.12.91 - seems it is high on the list of |
| 9 | * things wanted, and it should be easy to implement. - Linus |
| 10 | */ |
| 11 | |
| 12 | /* |
| 13 | * Ok, demand-loading was easy, shared pages a little bit tricker. Shared |
| 14 | * pages started 02.12.91, seems to work. - Linus. |
| 15 | * |
| 16 | * Tested sharing by executing about 30 /bin/sh: under the old kernel it |
| 17 | * would have taken more than the 6M I have free, but it worked well as |
| 18 | * far as I could see. |
| 19 | * |
| 20 | * Also corrected some "invalidate()"s - I wasn't doing enough of them. |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Real VM (paging to/from disk) started 18.12.91. Much more work and |
| 25 | * thought has to go into this. Oh, well.. |
| 26 | * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. |
| 27 | * Found it. Everything seems to work now. |
| 28 | * 20.12.91 - Ok, making the swap-device changeable like the root. |
| 29 | */ |
| 30 | |
| 31 | /* |
| 32 | * 05.04.94 - Multi-page memory management added for v1.1. |
| 33 | * Idea by Alex Bligh (alex@cconcepts.co.uk) |
| 34 | * |
| 35 | * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG |
| 36 | * (Gerhard.Wichert@pdb.siemens.de) |
| 37 | * |
| 38 | * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) |
| 39 | */ |
| 40 | |
| 41 | #include <linux/kernel_stat.h> |
| 42 | #include <linux/mm.h> |
| 43 | #include <linux/hugetlb.h> |
| 44 | #include <linux/mman.h> |
| 45 | #include <linux/swap.h> |
| 46 | #include <linux/highmem.h> |
| 47 | #include <linux/pagemap.h> |
Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 48 | #include <linux/ksm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #include <linux/rmap.h> |
Paul Gortmaker | b95f1b31 | 2011-10-16 02:01:52 -0400 | [diff] [blame] | 50 | #include <linux/export.h> |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 51 | #include <linux/delayacct.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #include <linux/init.h> |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 53 | #include <linux/pfn_t.h> |
Peter Zijlstra | edc79b2 | 2006-09-25 23:30:58 -0700 | [diff] [blame] | 54 | #include <linux/writeback.h> |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 55 | #include <linux/memcontrol.h> |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 56 | #include <linux/mmu_notifier.h> |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 57 | #include <linux/kallsyms.h> |
| 58 | #include <linux/swapops.h> |
| 59 | #include <linux/elf.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 60 | #include <linux/gfp.h> |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 61 | #include <linux/migrate.h> |
Andy Shevchenko | 2fbc57c | 2012-12-17 16:01:23 -0800 | [diff] [blame] | 62 | #include <linux/string.h> |
Dan Williams | 0abdd7a | 2014-01-21 15:48:12 -0800 | [diff] [blame] | 63 | #include <linux/dma-debug.h> |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 64 | #include <linux/debugfs.h> |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 65 | #include <linux/userfaultfd_k.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Alexey Dobriyan | 6952b61 | 2009-09-18 23:55:55 +0400 | [diff] [blame] | 67 | #include <asm/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #include <asm/pgalloc.h> |
| 69 | #include <asm/uaccess.h> |
| 70 | #include <asm/tlb.h> |
| 71 | #include <asm/tlbflush.h> |
| 72 | #include <asm/pgtable.h> |
| 73 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 74 | #include "internal.h" |
| 75 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 76 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
| 77 | #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. |
Peter Zijlstra | 75980e9 | 2013-02-22 16:34:32 -0800 | [diff] [blame] | 78 | #endif |
| 79 | |
Andy Whitcroft | d41dee3 | 2005-06-23 00:07:54 -0700 | [diff] [blame] | 80 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
| 82 | unsigned long max_mapnr; |
| 83 | struct page *mem_map; |
| 84 | |
| 85 | EXPORT_SYMBOL(max_mapnr); |
| 86 | EXPORT_SYMBOL(mem_map); |
| 87 | #endif |
| 88 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | /* |
| 90 | * A number of key systems in x86 including ioremap() rely on the assumption |
| 91 | * that high_memory defines the upper bound on direct map memory, then end |
| 92 | * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and |
| 93 | * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL |
| 94 | * and ZONE_HIGHMEM. |
| 95 | */ |
| 96 | void * high_memory; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | EXPORT_SYMBOL(high_memory); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Ingo Molnar | 32a9323 | 2008-02-06 22:39:44 +0100 | [diff] [blame] | 100 | /* |
| 101 | * Randomize the address space (stacks, mmaps, brk, etc.). |
| 102 | * |
| 103 | * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, |
| 104 | * as ancient (libc5 based) binaries can segfault. ) |
| 105 | */ |
| 106 | int randomize_va_space __read_mostly = |
| 107 | #ifdef CONFIG_COMPAT_BRK |
| 108 | 1; |
| 109 | #else |
| 110 | 2; |
| 111 | #endif |
Andi Kleen | a62eaf1 | 2006-02-16 23:41:58 +0100 | [diff] [blame] | 112 | |
| 113 | static int __init disable_randmaps(char *s) |
| 114 | { |
| 115 | randomize_va_space = 0; |
OGAWA Hirofumi | 9b41046 | 2006-03-31 02:30:33 -0800 | [diff] [blame] | 116 | return 1; |
Andi Kleen | a62eaf1 | 2006-02-16 23:41:58 +0100 | [diff] [blame] | 117 | } |
| 118 | __setup("norandmaps", disable_randmaps); |
| 119 | |
Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 120 | unsigned long zero_pfn __read_mostly; |
Hugh Dickins | 03f6462 | 2009-09-21 17:03:35 -0700 | [diff] [blame] | 121 | unsigned long highest_memmap_pfn __read_mostly; |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 122 | |
Ard Biesheuvel | 0b70068 | 2014-09-12 22:17:23 +0200 | [diff] [blame] | 123 | EXPORT_SYMBOL(zero_pfn); |
| 124 | |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 125 | /* |
| 126 | * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() |
| 127 | */ |
| 128 | static int __init init_zero_pfn(void) |
| 129 | { |
| 130 | zero_pfn = page_to_pfn(ZERO_PAGE(0)); |
| 131 | return 0; |
| 132 | } |
| 133 | core_initcall(init_zero_pfn); |
Andi Kleen | a62eaf1 | 2006-02-16 23:41:58 +0100 | [diff] [blame] | 134 | |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 135 | |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 136 | #if defined(SPLIT_RSS_COUNTING) |
| 137 | |
David Rientjes | ea48cf7 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 138 | void sync_mm_rss(struct mm_struct *mm) |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 139 | { |
| 140 | int i; |
| 141 | |
| 142 | for (i = 0; i < NR_MM_COUNTERS; i++) { |
David Rientjes | 05af2e1 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 143 | if (current->rss_stat.count[i]) { |
| 144 | add_mm_counter(mm, i, current->rss_stat.count[i]); |
| 145 | current->rss_stat.count[i] = 0; |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 146 | } |
| 147 | } |
David Rientjes | 05af2e1 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 148 | current->rss_stat.events = 0; |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) |
| 152 | { |
| 153 | struct task_struct *task = current; |
| 154 | |
| 155 | if (likely(task->mm == mm)) |
| 156 | task->rss_stat.count[member] += val; |
| 157 | else |
| 158 | add_mm_counter(mm, member, val); |
| 159 | } |
| 160 | #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) |
| 161 | #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) |
| 162 | |
| 163 | /* sync counter once per 64 page faults */ |
| 164 | #define TASK_RSS_EVENTS_THRESH (64) |
| 165 | static void check_sync_rss_stat(struct task_struct *task) |
| 166 | { |
| 167 | if (unlikely(task != current)) |
| 168 | return; |
| 169 | if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH)) |
David Rientjes | ea48cf7 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 170 | sync_mm_rss(task->mm); |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 171 | } |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 172 | #else /* SPLIT_RSS_COUNTING */ |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 173 | |
| 174 | #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) |
| 175 | #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) |
| 176 | |
| 177 | static void check_sync_rss_stat(struct task_struct *task) |
| 178 | { |
| 179 | } |
| 180 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 181 | #endif /* SPLIT_RSS_COUNTING */ |
| 182 | |
| 183 | #ifdef HAVE_GENERIC_MMU_GATHER |
| 184 | |
Nicholas Krause | ca1d6c7 | 2015-09-04 15:48:22 -0700 | [diff] [blame] | 185 | static bool tlb_next_batch(struct mmu_gather *tlb) |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 186 | { |
| 187 | struct mmu_gather_batch *batch; |
| 188 | |
| 189 | batch = tlb->active; |
| 190 | if (batch->next) { |
| 191 | tlb->active = batch->next; |
Nicholas Krause | ca1d6c7 | 2015-09-04 15:48:22 -0700 | [diff] [blame] | 192 | return true; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 195 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) |
Nicholas Krause | ca1d6c7 | 2015-09-04 15:48:22 -0700 | [diff] [blame] | 196 | return false; |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 197 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 198 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
| 199 | if (!batch) |
Nicholas Krause | ca1d6c7 | 2015-09-04 15:48:22 -0700 | [diff] [blame] | 200 | return false; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 201 | |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 202 | tlb->batch_count++; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 203 | batch->next = NULL; |
| 204 | batch->nr = 0; |
| 205 | batch->max = MAX_GATHER_BATCH; |
| 206 | |
| 207 | tlb->active->next = batch; |
| 208 | tlb->active = batch; |
| 209 | |
Nicholas Krause | ca1d6c7 | 2015-09-04 15:48:22 -0700 | [diff] [blame] | 210 | return true; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | /* tlb_gather_mmu |
| 214 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
| 215 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
| 216 | * users and we're going to destroy the full address space (exit/execve). |
| 217 | */ |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 218 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 219 | { |
| 220 | tlb->mm = mm; |
| 221 | |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 222 | /* Is it from 0 to ~0? */ |
| 223 | tlb->fullmm = !(start | (end+1)); |
Dave Hansen | 1de14c3 | 2013-04-12 16:23:54 -0700 | [diff] [blame] | 224 | tlb->need_flush_all = 0; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 225 | tlb->local.next = NULL; |
| 226 | tlb->local.nr = 0; |
| 227 | tlb->local.max = ARRAY_SIZE(tlb->__pages); |
| 228 | tlb->active = &tlb->local; |
Michal Hocko | 53a59fc | 2013-01-04 15:35:12 -0800 | [diff] [blame] | 229 | tlb->batch_count = 0; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 230 | |
| 231 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 232 | tlb->batch = NULL; |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 233 | #endif |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 234 | |
| 235 | __tlb_reset_range(tlb); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 236 | } |
| 237 | |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 238 | static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 239 | { |
Will Deacon | 721c21c | 2015-01-12 19:10:55 +0000 | [diff] [blame] | 240 | if (!tlb->end) |
| 241 | return; |
| 242 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 243 | tlb_flush(tlb); |
Joerg Roedel | 34ee645 | 2014-11-13 13:46:09 +1100 | [diff] [blame] | 244 | mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 245 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 246 | tlb_table_flush(tlb); |
| 247 | #endif |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 248 | __tlb_reset_range(tlb); |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
| 252 | { |
| 253 | struct mmu_gather_batch *batch; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 254 | |
Will Deacon | 721c21c | 2015-01-12 19:10:55 +0000 | [diff] [blame] | 255 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 256 | free_pages_and_swap_cache(batch->pages, batch->nr); |
| 257 | batch->nr = 0; |
| 258 | } |
| 259 | tlb->active = &tlb->local; |
| 260 | } |
| 261 | |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 262 | void tlb_flush_mmu(struct mmu_gather *tlb) |
| 263 | { |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 264 | tlb_flush_mmu_tlbonly(tlb); |
| 265 | tlb_flush_mmu_free(tlb); |
| 266 | } |
| 267 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 268 | /* tlb_finish_mmu |
| 269 | * Called at the end of the shootdown operation to free up any resources |
| 270 | * that were required. |
| 271 | */ |
| 272 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
| 273 | { |
| 274 | struct mmu_gather_batch *batch, *next; |
| 275 | |
| 276 | tlb_flush_mmu(tlb); |
| 277 | |
| 278 | /* keep the page table cache within bounds */ |
| 279 | check_pgt_cache(); |
| 280 | |
| 281 | for (batch = tlb->local.next; batch; batch = next) { |
| 282 | next = batch->next; |
| 283 | free_pages((unsigned long)batch, 0); |
| 284 | } |
| 285 | tlb->local.next = NULL; |
| 286 | } |
| 287 | |
| 288 | /* __tlb_remove_page |
| 289 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while |
| 290 | * handling the additional races in SMP caused by other CPUs caching valid |
| 291 | * mappings in their TLBs. Returns the number of free page slots left. |
| 292 | * When out of page slots we must call tlb_flush_mmu(). |
| 293 | */ |
| 294 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
| 295 | { |
| 296 | struct mmu_gather_batch *batch; |
| 297 | |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 298 | VM_BUG_ON(!tlb->end); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 299 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 300 | batch = tlb->active; |
| 301 | batch->pages[batch->nr++] = page; |
| 302 | if (batch->nr == batch->max) { |
| 303 | if (!tlb_next_batch(tlb)) |
| 304 | return 0; |
Shaohua Li | 0b43c3a | 2011-07-08 15:39:41 -0700 | [diff] [blame] | 305 | batch = tlb->active; |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 306 | } |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 307 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 308 | |
| 309 | return batch->max - batch->nr; |
| 310 | } |
| 311 | |
| 312 | #endif /* HAVE_GENERIC_MMU_GATHER */ |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 313 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 314 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
| 315 | |
| 316 | /* |
| 317 | * See the comment near struct mmu_table_batch. |
| 318 | */ |
| 319 | |
| 320 | static void tlb_remove_table_smp_sync(void *arg) |
| 321 | { |
| 322 | /* Simply deliver the interrupt */ |
| 323 | } |
| 324 | |
| 325 | static void tlb_remove_table_one(void *table) |
| 326 | { |
| 327 | /* |
| 328 | * This isn't an RCU grace period and hence the page-tables cannot be |
| 329 | * assumed to be actually RCU-freed. |
| 330 | * |
| 331 | * It is however sufficient for software page-table walkers that rely on |
| 332 | * IRQ disabling. See the comment near struct mmu_table_batch. |
| 333 | */ |
| 334 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); |
| 335 | __tlb_remove_table(table); |
| 336 | } |
| 337 | |
| 338 | static void tlb_remove_table_rcu(struct rcu_head *head) |
| 339 | { |
| 340 | struct mmu_table_batch *batch; |
| 341 | int i; |
| 342 | |
| 343 | batch = container_of(head, struct mmu_table_batch, rcu); |
| 344 | |
| 345 | for (i = 0; i < batch->nr; i++) |
| 346 | __tlb_remove_table(batch->tables[i]); |
| 347 | |
| 348 | free_page((unsigned long)batch); |
| 349 | } |
| 350 | |
| 351 | void tlb_table_flush(struct mmu_gather *tlb) |
| 352 | { |
| 353 | struct mmu_table_batch **batch = &tlb->batch; |
| 354 | |
| 355 | if (*batch) { |
| 356 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
| 357 | *batch = NULL; |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | void tlb_remove_table(struct mmu_gather *tlb, void *table) |
| 362 | { |
| 363 | struct mmu_table_batch **batch = &tlb->batch; |
| 364 | |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 365 | /* |
| 366 | * When there's less then two users of this mm there cannot be a |
| 367 | * concurrent page-table walk. |
| 368 | */ |
| 369 | if (atomic_read(&tlb->mm->mm_users) < 2) { |
| 370 | __tlb_remove_table(table); |
| 371 | return; |
| 372 | } |
| 373 | |
| 374 | if (*batch == NULL) { |
| 375 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
| 376 | if (*batch == NULL) { |
| 377 | tlb_remove_table_one(table); |
| 378 | return; |
| 379 | } |
| 380 | (*batch)->nr = 0; |
| 381 | } |
| 382 | (*batch)->tables[(*batch)->nr++] = table; |
| 383 | if ((*batch)->nr == MAX_TABLE_BATCH) |
| 384 | tlb_table_flush(tlb); |
| 385 | } |
| 386 | |
Peter Zijlstra | 9547d01 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 387 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ |
Peter Zijlstra | 2672391 | 2011-05-24 17:12:00 -0700 | [diff] [blame] | 388 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | * Note: this doesn't free the actual pages themselves. That |
| 391 | * has been handled earlier when unmapping all the memory regions. |
| 392 | */ |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 393 | static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, |
| 394 | unsigned long addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 396 | pgtable_t token = pmd_pgtable(*pmd); |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 397 | pmd_clear(pmd); |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 398 | pte_free_tlb(tlb, token, addr); |
Kirill A. Shutemov | e1f56c8 | 2013-11-14 14:30:48 -0800 | [diff] [blame] | 399 | atomic_long_dec(&tlb->mm->nr_ptes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | } |
| 401 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 402 | static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, |
| 403 | unsigned long addr, unsigned long end, |
| 404 | unsigned long floor, unsigned long ceiling) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | { |
| 406 | pmd_t *pmd; |
| 407 | unsigned long next; |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 408 | unsigned long start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 410 | start = addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | pmd = pmd_offset(pud, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | do { |
| 413 | next = pmd_addr_end(addr, end); |
| 414 | if (pmd_none_or_clear_bad(pmd)) |
| 415 | continue; |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 416 | free_pte_range(tlb, pmd, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } while (pmd++, addr = next, addr != end); |
| 418 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 419 | start &= PUD_MASK; |
| 420 | if (start < floor) |
| 421 | return; |
| 422 | if (ceiling) { |
| 423 | ceiling &= PUD_MASK; |
| 424 | if (!ceiling) |
| 425 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | } |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 427 | if (end - 1 > ceiling - 1) |
| 428 | return; |
| 429 | |
| 430 | pmd = pmd_offset(pud, start); |
| 431 | pud_clear(pud); |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 432 | pmd_free_tlb(tlb, pmd, start); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 433 | mm_dec_nr_pmds(tlb->mm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 436 | static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, |
| 437 | unsigned long addr, unsigned long end, |
| 438 | unsigned long floor, unsigned long ceiling) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | { |
| 440 | pud_t *pud; |
| 441 | unsigned long next; |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 442 | unsigned long start; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 444 | start = addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | pud = pud_offset(pgd, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | do { |
| 447 | next = pud_addr_end(addr, end); |
| 448 | if (pud_none_or_clear_bad(pud)) |
| 449 | continue; |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 450 | free_pmd_range(tlb, pud, addr, next, floor, ceiling); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | } while (pud++, addr = next, addr != end); |
| 452 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 453 | start &= PGDIR_MASK; |
| 454 | if (start < floor) |
| 455 | return; |
| 456 | if (ceiling) { |
| 457 | ceiling &= PGDIR_MASK; |
| 458 | if (!ceiling) |
| 459 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | } |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 461 | if (end - 1 > ceiling - 1) |
| 462 | return; |
| 463 | |
| 464 | pud = pud_offset(pgd, start); |
| 465 | pgd_clear(pgd); |
Benjamin Herrenschmidt | 9e1b32c | 2009-07-22 15:44:28 +1000 | [diff] [blame] | 466 | pud_free_tlb(tlb, pud, start); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | } |
| 468 | |
| 469 | /* |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 470 | * This function frees user-level page tables of a process. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | */ |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 472 | void free_pgd_range(struct mmu_gather *tlb, |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 473 | unsigned long addr, unsigned long end, |
| 474 | unsigned long floor, unsigned long ceiling) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { |
| 476 | pgd_t *pgd; |
| 477 | unsigned long next; |
| 478 | |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 479 | /* |
| 480 | * The next few lines have given us lots of grief... |
| 481 | * |
| 482 | * Why are we testing PMD* at this top level? Because often |
| 483 | * there will be no work to do at all, and we'd prefer not to |
| 484 | * go all the way down to the bottom just to discover that. |
| 485 | * |
| 486 | * Why all these "- 1"s? Because 0 represents both the bottom |
| 487 | * of the address space and the top of it (using -1 for the |
| 488 | * top wouldn't help much: the masks would do the wrong thing). |
| 489 | * The rule is that addr 0 and floor 0 refer to the bottom of |
| 490 | * the address space, but end 0 and ceiling 0 refer to the top |
| 491 | * Comparisons need to use "end - 1" and "ceiling - 1" (though |
| 492 | * that end 0 case should be mythical). |
| 493 | * |
| 494 | * Wherever addr is brought up or ceiling brought down, we must |
| 495 | * be careful to reject "the opposite 0" before it confuses the |
| 496 | * subsequent tests. But what about where end is brought down |
| 497 | * by PMD_SIZE below? no, end can't go down to 0 there. |
| 498 | * |
| 499 | * Whereas we round start (addr) and ceiling down, by different |
| 500 | * masks at different levels, in order to test whether a table |
| 501 | * now has no other vmas using it, so can be freed, we don't |
| 502 | * bother to round floor or end up - the tests don't need that. |
| 503 | */ |
| 504 | |
| 505 | addr &= PMD_MASK; |
| 506 | if (addr < floor) { |
| 507 | addr += PMD_SIZE; |
| 508 | if (!addr) |
| 509 | return; |
| 510 | } |
| 511 | if (ceiling) { |
| 512 | ceiling &= PMD_MASK; |
| 513 | if (!ceiling) |
| 514 | return; |
| 515 | } |
| 516 | if (end - 1 > ceiling - 1) |
| 517 | end -= PMD_SIZE; |
| 518 | if (addr > end - 1) |
| 519 | return; |
| 520 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 521 | pgd = pgd_offset(tlb->mm, addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | do { |
| 523 | next = pgd_addr_end(addr, end); |
| 524 | if (pgd_none_or_clear_bad(pgd)) |
| 525 | continue; |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 526 | free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | } while (pgd++, addr = next, addr != end); |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 528 | } |
| 529 | |
Jan Beulich | 42b7772 | 2008-07-23 21:27:10 -0700 | [diff] [blame] | 530 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Hugh Dickins | 3bf5ee9 | 2005-04-19 13:29:16 -0700 | [diff] [blame] | 531 | unsigned long floor, unsigned long ceiling) |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 532 | { |
| 533 | while (vma) { |
| 534 | struct vm_area_struct *next = vma->vm_next; |
| 535 | unsigned long addr = vma->vm_start; |
| 536 | |
Hugh Dickins | 8f4f8c1 | 2005-10-29 18:16:29 -0700 | [diff] [blame] | 537 | /* |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 538 | * Hide vma from rmap and truncate_pagecache before freeing |
| 539 | * pgtables |
Hugh Dickins | 8f4f8c1 | 2005-10-29 18:16:29 -0700 | [diff] [blame] | 540 | */ |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 541 | unlink_anon_vmas(vma); |
Hugh Dickins | 8f4f8c1 | 2005-10-29 18:16:29 -0700 | [diff] [blame] | 542 | unlink_file_vma(vma); |
| 543 | |
David Gibson | 9da61ae | 2006-03-22 00:08:57 -0800 | [diff] [blame] | 544 | if (is_vm_hugetlb_page(vma)) { |
Hugh Dickins | 3bf5ee9 | 2005-04-19 13:29:16 -0700 | [diff] [blame] | 545 | hugetlb_free_pgd_range(tlb, addr, vma->vm_end, |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 546 | floor, next? next->vm_start: ceiling); |
Hugh Dickins | 3bf5ee9 | 2005-04-19 13:29:16 -0700 | [diff] [blame] | 547 | } else { |
| 548 | /* |
| 549 | * Optimization: gather nearby vmas into one call down |
| 550 | */ |
| 551 | while (next && next->vm_start <= vma->vm_end + PMD_SIZE |
David Gibson | 4866920 | 2006-03-22 00:08:58 -0800 | [diff] [blame] | 552 | && !is_vm_hugetlb_page(next)) { |
Hugh Dickins | 3bf5ee9 | 2005-04-19 13:29:16 -0700 | [diff] [blame] | 553 | vma = next; |
| 554 | next = vma->vm_next; |
Rik van Riel | 5beb493 | 2010-03-05 13:42:07 -0800 | [diff] [blame] | 555 | unlink_anon_vmas(vma); |
Hugh Dickins | 8f4f8c1 | 2005-10-29 18:16:29 -0700 | [diff] [blame] | 556 | unlink_file_vma(vma); |
Hugh Dickins | 3bf5ee9 | 2005-04-19 13:29:16 -0700 | [diff] [blame] | 557 | } |
| 558 | free_pgd_range(tlb, addr, vma->vm_end, |
| 559 | floor, next? next->vm_start: ceiling); |
| 560 | } |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 561 | vma = next; |
| 562 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | } |
| 564 | |
Andrea Arcangeli | 8ac1f83 | 2011-01-13 15:46:43 -0800 | [diff] [blame] | 565 | int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
| 566 | pmd_t *pmd, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 568 | spinlock_t *ptl; |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 569 | pgtable_t new = pte_alloc_one(mm, address); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 570 | if (!new) |
| 571 | return -ENOMEM; |
| 572 | |
Nick Piggin | 362a61a | 2008-05-14 06:37:36 +0200 | [diff] [blame] | 573 | /* |
| 574 | * Ensure all pte setup (eg. pte page lock and page clearing) are |
| 575 | * visible before the pte is made visible to other CPUs by being |
| 576 | * put into page tables. |
| 577 | * |
| 578 | * The other side of the story is the pointer chasing in the page |
| 579 | * table walking code (when walking the page table without locking; |
| 580 | * ie. most of the time). Fortunately, these data accesses consist |
| 581 | * of a chain of data-dependent loads, meaning most CPUs (alpha |
| 582 | * being the notable exception) will already guarantee loads are |
| 583 | * seen in-order. See the alpha page table accessors for the |
| 584 | * smp_read_barrier_depends() barriers in page table walking code. |
| 585 | */ |
| 586 | smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ |
| 587 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 588 | ptl = pmd_lock(mm, pmd); |
Andrea Arcangeli | 8ac1f83 | 2011-01-13 15:46:43 -0800 | [diff] [blame] | 589 | if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ |
Kirill A. Shutemov | e1f56c8 | 2013-11-14 14:30:48 -0800 | [diff] [blame] | 590 | atomic_long_inc(&mm->nr_ptes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | pmd_populate(mm, pmd, new); |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 592 | new = NULL; |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 593 | } |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 594 | spin_unlock(ptl); |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 595 | if (new) |
| 596 | pte_free(mm, new); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 597 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | } |
| 599 | |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 600 | int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 601 | { |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 602 | pte_t *new = pte_alloc_one_kernel(&init_mm, address); |
| 603 | if (!new) |
| 604 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
Nick Piggin | 362a61a | 2008-05-14 06:37:36 +0200 | [diff] [blame] | 606 | smp_wmb(); /* See comment in __pte_alloc */ |
| 607 | |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 608 | spin_lock(&init_mm.page_table_lock); |
Andrea Arcangeli | 8ac1f83 | 2011-01-13 15:46:43 -0800 | [diff] [blame] | 609 | if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 610 | pmd_populate_kernel(&init_mm, pmd, new); |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 611 | new = NULL; |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 612 | } |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 613 | spin_unlock(&init_mm.page_table_lock); |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 614 | if (new) |
| 615 | pte_free_kernel(&init_mm, new); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 616 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | } |
| 618 | |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 619 | static inline void init_rss_vec(int *rss) |
Hugh Dickins | ae85976 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 620 | { |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 621 | memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); |
| 622 | } |
| 623 | |
| 624 | static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) |
| 625 | { |
| 626 | int i; |
| 627 | |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 628 | if (current->mm == mm) |
David Rientjes | 05af2e1 | 2012-03-21 16:34:13 -0700 | [diff] [blame] | 629 | sync_mm_rss(mm); |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 630 | for (i = 0; i < NR_MM_COUNTERS; i++) |
| 631 | if (rss[i]) |
| 632 | add_mm_counter(mm, i, rss[i]); |
Hugh Dickins | ae85976 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 633 | } |
| 634 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 635 | /* |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 636 | * This function is called to print an error when a bad pte |
| 637 | * is found. For example, we might have a PFN-mapped pte in |
| 638 | * a region that doesn't allow it. |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 639 | * |
| 640 | * The calling function must still handle the error. |
| 641 | */ |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 642 | static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, |
| 643 | pte_t pte, struct page *page) |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 644 | { |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 645 | pgd_t *pgd = pgd_offset(vma->vm_mm, addr); |
| 646 | pud_t *pud = pud_offset(pgd, addr); |
| 647 | pmd_t *pmd = pmd_offset(pud, addr); |
| 648 | struct address_space *mapping; |
| 649 | pgoff_t index; |
Hugh Dickins | d936cf9 | 2009-01-06 14:40:12 -0800 | [diff] [blame] | 650 | static unsigned long resume; |
| 651 | static unsigned long nr_shown; |
| 652 | static unsigned long nr_unshown; |
| 653 | |
| 654 | /* |
| 655 | * Allow a burst of 60 reports, then keep quiet for that minute; |
| 656 | * or allow a steady drip of one report per second. |
| 657 | */ |
| 658 | if (nr_shown == 60) { |
| 659 | if (time_before(jiffies, resume)) { |
| 660 | nr_unshown++; |
| 661 | return; |
| 662 | } |
| 663 | if (nr_unshown) { |
Hugh Dickins | 1e9e636 | 2009-01-06 14:40:13 -0800 | [diff] [blame] | 664 | printk(KERN_ALERT |
| 665 | "BUG: Bad page map: %lu messages suppressed\n", |
Hugh Dickins | d936cf9 | 2009-01-06 14:40:12 -0800 | [diff] [blame] | 666 | nr_unshown); |
| 667 | nr_unshown = 0; |
| 668 | } |
| 669 | nr_shown = 0; |
| 670 | } |
| 671 | if (nr_shown++ == 0) |
| 672 | resume = jiffies + 60 * HZ; |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 673 | |
| 674 | mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; |
| 675 | index = linear_page_index(vma, addr); |
| 676 | |
Hugh Dickins | 1e9e636 | 2009-01-06 14:40:13 -0800 | [diff] [blame] | 677 | printk(KERN_ALERT |
| 678 | "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 679 | current->comm, |
| 680 | (long long)pte_val(pte), (long long)pmd_val(*pmd)); |
Wu Fengguang | 718a382 | 2010-03-10 15:20:43 -0800 | [diff] [blame] | 681 | if (page) |
Dave Hansen | f0b791a | 2014-01-23 15:52:49 -0800 | [diff] [blame] | 682 | dump_page(page, "bad pte"); |
Hugh Dickins | 1e9e636 | 2009-01-06 14:40:13 -0800 | [diff] [blame] | 683 | printk(KERN_ALERT |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 684 | "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n", |
| 685 | (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); |
| 686 | /* |
| 687 | * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y |
| 688 | */ |
Konstantin Khlebnikov | 2682582 | 2015-04-15 16:15:08 -0700 | [diff] [blame] | 689 | pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", |
| 690 | vma->vm_file, |
| 691 | vma->vm_ops ? vma->vm_ops->fault : NULL, |
| 692 | vma->vm_file ? vma->vm_file->f_op->mmap : NULL, |
| 693 | mapping ? mapping->a_ops->readpage : NULL); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 694 | dump_stack(); |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 695 | add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 696 | } |
| 697 | |
| 698 | /* |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 699 | * vm_normal_page -- This function gets the "struct page" associated with a pte. |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 700 | * |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 701 | * "Special" mappings do not wish to be associated with a "struct page" (either |
| 702 | * it doesn't exist, or it exists but they don't want to touch it). In this |
| 703 | * case, NULL is returned here. "Normal" mappings do have a struct page. |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 704 | * |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 705 | * There are 2 broad cases. Firstly, an architecture may define a pte_special() |
| 706 | * pte bit, in which case this function is trivial. Secondly, an architecture |
| 707 | * may not have a spare pte bit, which requires a more complicated scheme, |
| 708 | * described below. |
| 709 | * |
| 710 | * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a |
| 711 | * special mapping (even if there are underlying and valid "struct pages"). |
| 712 | * COWed pages of a VM_PFNMAP are always normal. |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 713 | * |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 714 | * The way we recognize COWed pages within VM_PFNMAP mappings is through the |
| 715 | * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 716 | * set, and the vm_pgoff will point to the first PFN mapped: thus every special |
| 717 | * mapping will always honor the rule |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 718 | * |
| 719 | * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) |
| 720 | * |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 721 | * And for normal mappings this is false. |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 722 | * |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 723 | * This restricts such mappings to be a linear translation from virtual address |
| 724 | * to pfn. To get around this restriction, we allow arbitrary mappings so long |
| 725 | * as the vma is not a COW mapping; in that case, we know that all ptes are |
| 726 | * special (because none can have been COWed). |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 727 | * |
| 728 | * |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 729 | * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. |
| 730 | * |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 731 | * VM_MIXEDMAP mappings can likewise contain memory with or without "struct |
| 732 | * page" backing, however the difference is that _all_ pages with a struct |
| 733 | * page (that is, those where pfn_valid is true) are refcounted and considered |
| 734 | * normal pages by the VM. The disadvantage is that pages are refcounted |
| 735 | * (which can be slower and simply not an option for some PFNMAP users). The |
| 736 | * advantage is that we don't have to follow the strict linearity rule of |
| 737 | * PFNMAP mappings in order to support COWable mappings. |
| 738 | * |
Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 739 | */ |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 740 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
| 741 | # define HAVE_PTE_SPECIAL 1 |
| 742 | #else |
| 743 | # define HAVE_PTE_SPECIAL 0 |
| 744 | #endif |
| 745 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
| 746 | pte_t pte) |
Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 747 | { |
Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 748 | unsigned long pfn = pte_pfn(pte); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 749 | |
| 750 | if (HAVE_PTE_SPECIAL) { |
Hugh Dickins | b38af47 | 2014-08-29 15:18:44 -0700 | [diff] [blame] | 751 | if (likely(!pte_special(pte))) |
Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 752 | goto check_pfn; |
David Vrabel | 667a0a0 | 2014-12-18 14:48:15 +0000 | [diff] [blame] | 753 | if (vma->vm_ops && vma->vm_ops->find_special_page) |
| 754 | return vma->vm_ops->find_special_page(vma, addr); |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 755 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
| 756 | return NULL; |
Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 757 | if (!is_zero_pfn(pfn)) |
Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 758 | print_bad_pte(vma, addr, pte, NULL); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 759 | return NULL; |
| 760 | } |
| 761 | |
| 762 | /* !HAVE_PTE_SPECIAL case follows: */ |
| 763 | |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 764 | if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { |
| 765 | if (vma->vm_flags & VM_MIXEDMAP) { |
| 766 | if (!pfn_valid(pfn)) |
| 767 | return NULL; |
| 768 | goto out; |
| 769 | } else { |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 770 | unsigned long off; |
| 771 | off = (addr - vma->vm_start) >> PAGE_SHIFT; |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 772 | if (pfn == vma->vm_pgoff + off) |
| 773 | return NULL; |
| 774 | if (!is_cow_mapping(vma->vm_flags)) |
| 775 | return NULL; |
| 776 | } |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 777 | } |
| 778 | |
Hugh Dickins | b38af47 | 2014-08-29 15:18:44 -0700 | [diff] [blame] | 779 | if (is_zero_pfn(pfn)) |
| 780 | return NULL; |
Hugh Dickins | 22b31ee | 2009-01-06 14:40:09 -0800 | [diff] [blame] | 781 | check_pfn: |
| 782 | if (unlikely(pfn > highest_memmap_pfn)) { |
| 783 | print_bad_pte(vma, addr, pte, NULL); |
| 784 | return NULL; |
| 785 | } |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 786 | |
| 787 | /* |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 788 | * NOTE! We still have PageReserved() pages in the page tables. |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 789 | * eg. VDSO mappings can cause them to exist. |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 790 | */ |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 791 | out: |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 792 | return pfn_to_page(pfn); |
Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 793 | } |
| 794 | |
| 795 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | * copy one vm_area from one task to the other. Assumes the page tables |
| 797 | * already present in the new task to be cleared in the whole range |
| 798 | * covered by this vma. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 799 | */ |
| 800 | |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 801 | static inline unsigned long |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 803 | pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, |
Hugh Dickins | 8c10376 | 2005-10-29 18:16:13 -0700 | [diff] [blame] | 804 | unsigned long addr, int *rss) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | { |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 806 | unsigned long vm_flags = vma->vm_flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | pte_t pte = *src_pte; |
| 808 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 809 | |
| 810 | /* pte contains position in swap or file, so copy. */ |
| 811 | if (unlikely(!pte_present(pte))) { |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 812 | swp_entry_t entry = pte_to_swp_entry(pte); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 813 | |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 814 | if (likely(!non_swap_entry(entry))) { |
| 815 | if (swap_duplicate(entry) < 0) |
| 816 | return entry.val; |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 817 | |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 818 | /* make sure dst_mm is on swapoff's mmlist. */ |
| 819 | if (unlikely(list_empty(&dst_mm->mmlist))) { |
| 820 | spin_lock(&mmlist_lock); |
| 821 | if (list_empty(&dst_mm->mmlist)) |
| 822 | list_add(&dst_mm->mmlist, |
| 823 | &src_mm->mmlist); |
| 824 | spin_unlock(&mmlist_lock); |
| 825 | } |
| 826 | rss[MM_SWAPENTS]++; |
| 827 | } else if (is_migration_entry(entry)) { |
| 828 | page = migration_entry_to_page(entry); |
Konstantin Khlebnikov | 9f9f1ac | 2012-01-20 14:34:24 -0800 | [diff] [blame] | 829 | |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 830 | rss[mm_counter(page)]++; |
Konstantin Khlebnikov | 9f9f1ac | 2012-01-20 14:34:24 -0800 | [diff] [blame] | 831 | |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 832 | if (is_write_migration_entry(entry) && |
| 833 | is_cow_mapping(vm_flags)) { |
| 834 | /* |
| 835 | * COW mappings require pages in both |
| 836 | * parent and child to be set to read. |
| 837 | */ |
| 838 | make_migration_entry_read(&entry); |
| 839 | pte = swp_entry_to_pte(entry); |
| 840 | if (pte_swp_soft_dirty(*src_pte)) |
| 841 | pte = pte_swp_mksoft_dirty(pte); |
| 842 | set_pte_at(src_mm, addr, src_pte, pte); |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 843 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 844 | } |
Hugh Dickins | ae85976 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 845 | goto out_set_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | } |
| 847 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | /* |
| 849 | * If it's a COW mapping, write protect it both |
| 850 | * in the parent and the child |
| 851 | */ |
Linus Torvalds | 6712117 | 2005-12-11 20:38:17 -0800 | [diff] [blame] | 852 | if (is_cow_mapping(vm_flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 853 | ptep_set_wrprotect(src_mm, addr, src_pte); |
Zachary Amsden | 3dc9079 | 2006-09-30 23:29:30 -0700 | [diff] [blame] | 854 | pte = pte_wrprotect(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } |
| 856 | |
| 857 | /* |
| 858 | * If it's a shared mapping, mark it clean in |
| 859 | * the child |
| 860 | */ |
| 861 | if (vm_flags & VM_SHARED) |
| 862 | pte = pte_mkclean(pte); |
| 863 | pte = pte_mkold(pte); |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 864 | |
| 865 | page = vm_normal_page(vma, addr, pte); |
| 866 | if (page) { |
| 867 | get_page(page); |
Kirill A. Shutemov | 53f9263 | 2016-01-15 16:53:42 -0800 | [diff] [blame] | 868 | page_dup_rmap(page, false); |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 869 | rss[mm_counter(page)]++; |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 870 | } |
Hugh Dickins | ae85976 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 871 | |
| 872 | out_set_pte: |
| 873 | set_pte_at(dst_mm, addr, dst_pte, pte); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 874 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 875 | } |
| 876 | |
Jerome Marchand | 21bda26 | 2014-08-06 16:06:56 -0700 | [diff] [blame] | 877 | static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 878 | pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, |
| 879 | unsigned long addr, unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 880 | { |
Daisuke Nishimura | c36987e | 2009-10-26 16:50:23 -0700 | [diff] [blame] | 881 | pte_t *orig_src_pte, *orig_dst_pte; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | pte_t *src_pte, *dst_pte; |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 883 | spinlock_t *src_ptl, *dst_ptl; |
Hugh Dickins | e040f21 | 2005-10-29 18:15:53 -0700 | [diff] [blame] | 884 | int progress = 0; |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 885 | int rss[NR_MM_COUNTERS]; |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 886 | swp_entry_t entry = (swp_entry_t){0}; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | |
| 888 | again: |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 889 | init_rss_vec(rss); |
| 890 | |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 891 | dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 892 | if (!dst_pte) |
| 893 | return -ENOMEM; |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 894 | src_pte = pte_offset_map(src_pmd, addr); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 895 | src_ptl = pte_lockptr(src_mm, src_pmd); |
Ingo Molnar | f20dc5f | 2006-07-03 00:25:08 -0700 | [diff] [blame] | 896 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
Daisuke Nishimura | c36987e | 2009-10-26 16:50:23 -0700 | [diff] [blame] | 897 | orig_src_pte = src_pte; |
| 898 | orig_dst_pte = dst_pte; |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 899 | arch_enter_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | do { |
| 902 | /* |
| 903 | * We are holding two locks at this point - either of them |
| 904 | * could generate latencies in another task on another CPU. |
| 905 | */ |
Hugh Dickins | e040f21 | 2005-10-29 18:15:53 -0700 | [diff] [blame] | 906 | if (progress >= 32) { |
| 907 | progress = 0; |
| 908 | if (need_resched() || |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 909 | spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) |
Hugh Dickins | e040f21 | 2005-10-29 18:15:53 -0700 | [diff] [blame] | 910 | break; |
| 911 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 912 | if (pte_none(*src_pte)) { |
| 913 | progress++; |
| 914 | continue; |
| 915 | } |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 916 | entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, |
| 917 | vma, addr, rss); |
| 918 | if (entry.val) |
| 919 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 | progress += 8; |
| 921 | } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 | |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 923 | arch_leave_lazy_mmu_mode(); |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 924 | spin_unlock(src_ptl); |
Peter Zijlstra | ece0e2b | 2010-10-26 14:21:52 -0700 | [diff] [blame] | 925 | pte_unmap(orig_src_pte); |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 926 | add_mm_rss_vec(dst_mm, rss); |
Daisuke Nishimura | c36987e | 2009-10-26 16:50:23 -0700 | [diff] [blame] | 927 | pte_unmap_unlock(orig_dst_pte, dst_ptl); |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 928 | cond_resched(); |
Hugh Dickins | 570a335b | 2009-12-14 17:58:46 -0800 | [diff] [blame] | 929 | |
| 930 | if (entry.val) { |
| 931 | if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) |
| 932 | return -ENOMEM; |
| 933 | progress = 0; |
| 934 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | if (addr != end) |
| 936 | goto again; |
| 937 | return 0; |
| 938 | } |
| 939 | |
| 940 | static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 941 | pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, |
| 942 | unsigned long addr, unsigned long end) |
| 943 | { |
| 944 | pmd_t *src_pmd, *dst_pmd; |
| 945 | unsigned long next; |
| 946 | |
| 947 | dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); |
| 948 | if (!dst_pmd) |
| 949 | return -ENOMEM; |
| 950 | src_pmd = pmd_offset(src_pud, addr); |
| 951 | do { |
| 952 | next = pmd_addr_end(addr, end); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 953 | if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 954 | int err; |
Andrea Arcangeli | 14d1a55 | 2011-01-13 15:47:15 -0800 | [diff] [blame] | 955 | VM_BUG_ON(next-addr != HPAGE_PMD_SIZE); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 956 | err = copy_huge_pmd(dst_mm, src_mm, |
| 957 | dst_pmd, src_pmd, addr, vma); |
| 958 | if (err == -ENOMEM) |
| 959 | return -ENOMEM; |
| 960 | if (!err) |
| 961 | continue; |
| 962 | /* fall through */ |
| 963 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | if (pmd_none_or_clear_bad(src_pmd)) |
| 965 | continue; |
| 966 | if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, |
| 967 | vma, addr, next)) |
| 968 | return -ENOMEM; |
| 969 | } while (dst_pmd++, src_pmd++, addr = next, addr != end); |
| 970 | return 0; |
| 971 | } |
| 972 | |
| 973 | static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 974 | pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, |
| 975 | unsigned long addr, unsigned long end) |
| 976 | { |
| 977 | pud_t *src_pud, *dst_pud; |
| 978 | unsigned long next; |
| 979 | |
| 980 | dst_pud = pud_alloc(dst_mm, dst_pgd, addr); |
| 981 | if (!dst_pud) |
| 982 | return -ENOMEM; |
| 983 | src_pud = pud_offset(src_pgd, addr); |
| 984 | do { |
| 985 | next = pud_addr_end(addr, end); |
| 986 | if (pud_none_or_clear_bad(src_pud)) |
| 987 | continue; |
| 988 | if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, |
| 989 | vma, addr, next)) |
| 990 | return -ENOMEM; |
| 991 | } while (dst_pud++, src_pud++, addr = next, addr != end); |
| 992 | return 0; |
| 993 | } |
| 994 | |
| 995 | int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 996 | struct vm_area_struct *vma) |
| 997 | { |
| 998 | pgd_t *src_pgd, *dst_pgd; |
| 999 | unsigned long next; |
| 1000 | unsigned long addr = vma->vm_start; |
| 1001 | unsigned long end = vma->vm_end; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1002 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 1003 | unsigned long mmun_end; /* For mmu_notifiers */ |
| 1004 | bool is_cow; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1005 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | |
Nick Piggin | d992895 | 2005-08-28 16:49:11 +1000 | [diff] [blame] | 1007 | /* |
| 1008 | * Don't copy ptes where a page fault will fill them correctly. |
| 1009 | * Fork becomes much lighter when there are big shared or private |
| 1010 | * readonly mappings. The tradeoff is that copy_page_range is more |
| 1011 | * efficient than faulting. |
| 1012 | */ |
Kirill A. Shutemov | 0661a33 | 2015-02-10 14:10:04 -0800 | [diff] [blame] | 1013 | if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && |
| 1014 | !vma->anon_vma) |
| 1015 | return 0; |
Nick Piggin | d992895 | 2005-08-28 16:49:11 +1000 | [diff] [blame] | 1016 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | if (is_vm_hugetlb_page(vma)) |
| 1018 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
| 1019 | |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1020 | if (unlikely(vma->vm_flags & VM_PFNMAP)) { |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1021 | /* |
| 1022 | * We do not free on error cases below as remove_vma |
| 1023 | * gets called on error from higher level routine |
| 1024 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1025 | ret = track_pfn_copy(vma); |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1026 | if (ret) |
| 1027 | return ret; |
| 1028 | } |
| 1029 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1030 | /* |
| 1031 | * We need to invalidate the secondary MMU mappings only when |
| 1032 | * there could be a permission downgrade on the ptes of the |
| 1033 | * parent mm. And a permission downgrade will only happen if |
| 1034 | * is_cow_mapping() returns true. |
| 1035 | */ |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1036 | is_cow = is_cow_mapping(vma->vm_flags); |
| 1037 | mmun_start = addr; |
| 1038 | mmun_end = end; |
| 1039 | if (is_cow) |
| 1040 | mmu_notifier_invalidate_range_start(src_mm, mmun_start, |
| 1041 | mmun_end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1042 | |
| 1043 | ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | dst_pgd = pgd_offset(dst_mm, addr); |
| 1045 | src_pgd = pgd_offset(src_mm, addr); |
| 1046 | do { |
| 1047 | next = pgd_addr_end(addr, end); |
| 1048 | if (pgd_none_or_clear_bad(src_pgd)) |
| 1049 | continue; |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1050 | if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, |
| 1051 | vma, addr, next))) { |
| 1052 | ret = -ENOMEM; |
| 1053 | break; |
| 1054 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1055 | } while (dst_pgd++, src_pgd++, addr = next, addr != end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1056 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1057 | if (is_cow) |
| 1058 | mmu_notifier_invalidate_range_end(src_mm, mmun_start, mmun_end); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1059 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | } |
| 1061 | |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1062 | static unsigned long zap_pte_range(struct mmu_gather *tlb, |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1063 | struct vm_area_struct *vma, pmd_t *pmd, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | unsigned long addr, unsigned long end, |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1065 | struct zap_details *details) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | { |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1067 | struct mm_struct *mm = tlb->mm; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 1068 | int force_flush = 0; |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 1069 | int rss[NR_MM_COUNTERS]; |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1070 | spinlock_t *ptl; |
Steven Rostedt | 5f1a190 | 2011-06-15 15:08:23 -0700 | [diff] [blame] | 1071 | pte_t *start_pte; |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1072 | pte_t *pte; |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1073 | swp_entry_t entry; |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 1074 | |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 1075 | again: |
Peter Zijlstra | e303297 | 2011-05-24 17:12:01 -0700 | [diff] [blame] | 1076 | init_rss_vec(rss); |
Steven Rostedt | 5f1a190 | 2011-06-15 15:08:23 -0700 | [diff] [blame] | 1077 | start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); |
| 1078 | pte = start_pte; |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 1079 | arch_enter_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1080 | do { |
| 1081 | pte_t ptent = *pte; |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1082 | if (pte_none(ptent)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | continue; |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1084 | } |
Hugh Dickins | 6f5e6b9 | 2006-03-16 23:04:09 -0800 | [diff] [blame] | 1085 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1086 | if (pte_present(ptent)) { |
Hugh Dickins | ee498ed | 2005-11-21 21:32:18 -0800 | [diff] [blame] | 1087 | struct page *page; |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1088 | |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1089 | page = vm_normal_page(vma, addr, ptent); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | if (unlikely(details) && page) { |
| 1091 | /* |
| 1092 | * unmap_shared_mapping_pages() wants to |
| 1093 | * invalidate cache without truncating: |
| 1094 | * unmap shared but keep private pages. |
| 1095 | */ |
| 1096 | if (details->check_mapping && |
| 1097 | details->check_mapping != page->mapping) |
| 1098 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | } |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1100 | ptent = ptep_get_and_clear_full(mm, addr, pte, |
Zachary Amsden | a600388 | 2005-09-03 15:55:04 -0700 | [diff] [blame] | 1101 | tlb->fullmm); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | tlb_remove_tlb_entry(tlb, pte, addr); |
| 1103 | if (unlikely(!page)) |
| 1104 | continue; |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 1105 | |
| 1106 | if (!PageAnon(page)) { |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1107 | if (pte_dirty(ptent)) { |
| 1108 | force_flush = 1; |
Hugh Dickins | 6237bcd | 2005-10-29 18:15:54 -0700 | [diff] [blame] | 1109 | set_page_dirty(page); |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1110 | } |
Johannes Weiner | 4917e5d | 2009-01-06 14:39:17 -0800 | [diff] [blame] | 1111 | if (pte_young(ptent) && |
Joe Perches | 64363aa | 2013-07-08 16:00:18 -0700 | [diff] [blame] | 1112 | likely(!(vma->vm_flags & VM_SEQ_READ))) |
Nick Piggin | bf3f3bc | 2009-01-06 14:38:55 -0800 | [diff] [blame] | 1113 | mark_page_accessed(page); |
Hugh Dickins | 6237bcd | 2005-10-29 18:15:54 -0700 | [diff] [blame] | 1114 | } |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 1115 | rss[mm_counter(page)]--; |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1116 | page_remove_rmap(page, false); |
Hugh Dickins | 3dc1474 | 2009-01-06 14:40:08 -0800 | [diff] [blame] | 1117 | if (unlikely(page_mapcount(page) < 0)) |
| 1118 | print_bad_pte(vma, addr, ptent, page); |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1119 | if (unlikely(!__tlb_remove_page(tlb, page))) { |
| 1120 | force_flush = 1; |
Will Deacon | ce9ec37 | 2014-10-28 13:16:28 -0700 | [diff] [blame] | 1121 | addr += PAGE_SIZE; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 1122 | break; |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1123 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | continue; |
| 1125 | } |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1126 | /* If details->check_mapping, we leave swap entries. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1127 | if (unlikely(details)) |
| 1128 | continue; |
KAMEZAWA Hiroyuki | b084d43 | 2010-03-05 13:41:42 -0800 | [diff] [blame] | 1129 | |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1130 | entry = pte_to_swp_entry(ptent); |
| 1131 | if (!non_swap_entry(entry)) |
| 1132 | rss[MM_SWAPENTS]--; |
| 1133 | else if (is_migration_entry(entry)) { |
| 1134 | struct page *page; |
Konstantin Khlebnikov | 9f9f1ac | 2012-01-20 14:34:24 -0800 | [diff] [blame] | 1135 | |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1136 | page = migration_entry_to_page(entry); |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 1137 | rss[mm_counter(page)]--; |
KAMEZAWA Hiroyuki | b084d43 | 2010-03-05 13:41:42 -0800 | [diff] [blame] | 1138 | } |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1139 | if (unlikely(!free_swap_and_cache(entry))) |
| 1140 | print_bad_pte(vma, addr, ptent, NULL); |
Zachary Amsden | 9888a1c | 2006-09-30 23:29:31 -0700 | [diff] [blame] | 1141 | pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1142 | } while (pte++, addr += PAGE_SIZE, addr != end); |
Hugh Dickins | ae85976 | 2005-10-29 18:16:05 -0700 | [diff] [blame] | 1143 | |
KAMEZAWA Hiroyuki | d559db0 | 2010-03-05 13:41:39 -0800 | [diff] [blame] | 1144 | add_mm_rss_vec(mm, rss); |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 1145 | arch_leave_lazy_mmu_mode(); |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1146 | |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1147 | /* Do the actual TLB flush before dropping ptl */ |
Will Deacon | fb7332a | 2014-10-29 10:03:09 +0000 | [diff] [blame] | 1148 | if (force_flush) |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1149 | tlb_flush_mmu_tlbonly(tlb); |
Linus Torvalds | 1cf35d4 | 2014-04-25 16:05:40 -0700 | [diff] [blame] | 1150 | pte_unmap_unlock(start_pte, ptl); |
| 1151 | |
| 1152 | /* |
| 1153 | * If we forced a TLB flush (either due to running out of |
| 1154 | * batch buffers or because we needed to flush dirty TLB |
| 1155 | * entries before releasing the ptl), free the batched |
| 1156 | * memory too. Restart if we didn't do everything. |
| 1157 | */ |
| 1158 | if (force_flush) { |
| 1159 | force_flush = 0; |
| 1160 | tlb_flush_mmu_free(tlb); |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 1161 | |
| 1162 | if (addr != end) |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 1163 | goto again; |
| 1164 | } |
| 1165 | |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1166 | return addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1167 | } |
| 1168 | |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1169 | static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1170 | struct vm_area_struct *vma, pud_t *pud, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1171 | unsigned long addr, unsigned long end, |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1172 | struct zap_details *details) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | { |
| 1174 | pmd_t *pmd; |
| 1175 | unsigned long next; |
| 1176 | |
| 1177 | pmd = pmd_offset(pud, addr); |
| 1178 | do { |
| 1179 | next = pmd_addr_end(addr, end); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 1180 | if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 1181 | if (next - addr != HPAGE_PMD_SIZE) { |
David Rientjes | e0897d7 | 2012-06-20 12:53:00 -0700 | [diff] [blame] | 1182 | #ifdef CONFIG_DEBUG_VM |
| 1183 | if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { |
| 1184 | pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n", |
| 1185 | __func__, addr, end, |
| 1186 | vma->vm_start, |
| 1187 | vma->vm_end); |
| 1188 | BUG(); |
| 1189 | } |
| 1190 | #endif |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 1191 | split_huge_pmd(vma, pmd, addr); |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 1192 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 1193 | goto next; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1194 | /* fall through */ |
| 1195 | } |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 1196 | /* |
| 1197 | * Here there can be other concurrent MADV_DONTNEED or |
| 1198 | * trans huge page faults running, and if the pmd is |
| 1199 | * none or trans huge it can change under us. This is |
| 1200 | * because MADV_DONTNEED holds the mmap_sem in read |
| 1201 | * mode. |
| 1202 | */ |
| 1203 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
| 1204 | goto next; |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1205 | next = zap_pte_range(tlb, vma, pmd, addr, next, details); |
Andrea Arcangeli | 1a5a990 | 2012-03-21 16:33:42 -0700 | [diff] [blame] | 1206 | next: |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1207 | cond_resched(); |
| 1208 | } while (pmd++, addr = next, addr != end); |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1209 | |
| 1210 | return addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | } |
| 1212 | |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1213 | static inline unsigned long zap_pud_range(struct mmu_gather *tlb, |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 1214 | struct vm_area_struct *vma, pgd_t *pgd, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | unsigned long addr, unsigned long end, |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1216 | struct zap_details *details) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | { |
| 1218 | pud_t *pud; |
| 1219 | unsigned long next; |
| 1220 | |
| 1221 | pud = pud_offset(pgd, addr); |
| 1222 | do { |
| 1223 | next = pud_addr_end(addr, end); |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1224 | if (pud_none_or_clear_bad(pud)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | continue; |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1226 | next = zap_pmd_range(tlb, vma, pud, addr, next, details); |
| 1227 | } while (pud++, addr = next, addr != end); |
Robin Holt | 51c6f66 | 2005-11-13 16:06:42 -0800 | [diff] [blame] | 1228 | |
| 1229 | return addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | } |
| 1231 | |
Al Viro | 038c7aa | 2012-03-05 13:25:09 -0500 | [diff] [blame] | 1232 | static void unmap_page_range(struct mmu_gather *tlb, |
| 1233 | struct vm_area_struct *vma, |
| 1234 | unsigned long addr, unsigned long end, |
| 1235 | struct zap_details *details) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1236 | { |
| 1237 | pgd_t *pgd; |
| 1238 | unsigned long next; |
| 1239 | |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1240 | if (details && !details->check_mapping) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | details = NULL; |
| 1242 | |
| 1243 | BUG_ON(addr >= end); |
| 1244 | tlb_start_vma(tlb, vma); |
| 1245 | pgd = pgd_offset(vma->vm_mm, addr); |
| 1246 | do { |
| 1247 | next = pgd_addr_end(addr, end); |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1248 | if (pgd_none_or_clear_bad(pgd)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | continue; |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 1250 | next = zap_pud_range(tlb, vma, pgd, addr, next, details); |
| 1251 | } while (pgd++, addr = next, addr != end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1252 | tlb_end_vma(tlb, vma); |
| 1253 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1255 | |
| 1256 | static void unmap_single_vma(struct mmu_gather *tlb, |
| 1257 | struct vm_area_struct *vma, unsigned long start_addr, |
Linus Torvalds | 4f74d2c | 2012-05-06 13:54:06 -0700 | [diff] [blame] | 1258 | unsigned long end_addr, |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1259 | struct zap_details *details) |
| 1260 | { |
| 1261 | unsigned long start = max(vma->vm_start, start_addr); |
| 1262 | unsigned long end; |
| 1263 | |
| 1264 | if (start >= vma->vm_end) |
| 1265 | return; |
| 1266 | end = min(vma->vm_end, end_addr); |
| 1267 | if (end <= vma->vm_start) |
| 1268 | return; |
| 1269 | |
Srikar Dronamraju | cbc91f7 | 2012-04-11 16:05:27 +0530 | [diff] [blame] | 1270 | if (vma->vm_file) |
| 1271 | uprobe_munmap(vma, start, end); |
| 1272 | |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1273 | if (unlikely(vma->vm_flags & VM_PFNMAP)) |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1274 | untrack_pfn(vma, 0, 0); |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1275 | |
| 1276 | if (start != end) { |
| 1277 | if (unlikely(is_vm_hugetlb_page(vma))) { |
| 1278 | /* |
| 1279 | * It is undesirable to test vma->vm_file as it |
| 1280 | * should be non-null for valid hugetlb area. |
| 1281 | * However, vm_file will be NULL in the error |
Davidlohr Bueso | 7aa6b4a | 2014-04-07 15:37:01 -0700 | [diff] [blame] | 1282 | * cleanup path of mmap_region. When |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1283 | * hugetlbfs ->mmap method fails, |
Davidlohr Bueso | 7aa6b4a | 2014-04-07 15:37:01 -0700 | [diff] [blame] | 1284 | * mmap_region() nullifies vma->vm_file |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1285 | * before calling this function to clean up. |
| 1286 | * Since no pte has actually been setup, it is |
| 1287 | * safe to do nothing in this case. |
| 1288 | */ |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 1289 | if (vma->vm_file) { |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 1290 | i_mmap_lock_write(vma->vm_file->f_mapping); |
Mel Gorman | d833352 | 2012-07-31 16:46:20 -0700 | [diff] [blame] | 1291 | __unmap_hugepage_range_final(tlb, vma, start, end, NULL); |
Davidlohr Bueso | 83cde9e | 2014-12-12 16:54:21 -0800 | [diff] [blame] | 1292 | i_mmap_unlock_write(vma->vm_file->f_mapping); |
Aneesh Kumar K.V | 24669e5 | 2012-07-31 16:42:03 -0700 | [diff] [blame] | 1293 | } |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1294 | } else |
| 1295 | unmap_page_range(tlb, vma, start, end, details); |
| 1296 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | } |
| 1298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | /** |
| 1300 | * unmap_vmas - unmap a range of memory covered by a list of vma's |
Randy Dunlap | 0164f69 | 2011-06-15 15:08:09 -0700 | [diff] [blame] | 1301 | * @tlb: address of the caller's struct mmu_gather |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | * @vma: the starting vma |
| 1303 | * @start_addr: virtual address at which to start unmapping |
| 1304 | * @end_addr: virtual address at which to end unmapping |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | * |
Hugh Dickins | 508034a | 2005-10-29 18:16:30 -0700 | [diff] [blame] | 1306 | * Unmap all pages in the vma list. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1307 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | * Only addresses between `start' and `end' will be unmapped. |
| 1309 | * |
| 1310 | * The VMA list must be sorted in ascending virtual address order. |
| 1311 | * |
| 1312 | * unmap_vmas() assumes that the caller will flush the whole unmapped address |
| 1313 | * range after unmap_vmas() returns. So the only responsibility here is to |
| 1314 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() |
| 1315 | * drops the lock and schedules. |
| 1316 | */ |
Al Viro | 6e8bb01 | 2012-03-05 13:41:15 -0500 | [diff] [blame] | 1317 | void unmap_vmas(struct mmu_gather *tlb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | struct vm_area_struct *vma, unsigned long start_addr, |
Linus Torvalds | 4f74d2c | 2012-05-06 13:54:06 -0700 | [diff] [blame] | 1319 | unsigned long end_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1320 | { |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1321 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1322 | |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1323 | mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1324 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) |
Linus Torvalds | 4f74d2c | 2012-05-06 13:54:06 -0700 | [diff] [blame] | 1325 | unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); |
Andrea Arcangeli | cddb8a5 | 2008-07-28 15:46:29 -0700 | [diff] [blame] | 1326 | mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1327 | } |
| 1328 | |
| 1329 | /** |
| 1330 | * zap_page_range - remove user pages in a given range |
| 1331 | * @vma: vm_area_struct holding the applicable pages |
Randy Dunlap | eb4546b | 2012-06-20 12:53:02 -0700 | [diff] [blame] | 1332 | * @start: starting address of pages to zap |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1333 | * @size: number of bytes to zap |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1334 | * @details: details of shared cache invalidation |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1335 | * |
| 1336 | * Caller must protect the VMA list |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1337 | */ |
Linus Torvalds | 7e027b1 | 2012-05-06 13:43:15 -0700 | [diff] [blame] | 1338 | void zap_page_range(struct vm_area_struct *vma, unsigned long start, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1339 | unsigned long size, struct zap_details *details) |
| 1340 | { |
| 1341 | struct mm_struct *mm = vma->vm_mm; |
Peter Zijlstra | d16dfc5 | 2011-05-24 17:11:45 -0700 | [diff] [blame] | 1342 | struct mmu_gather tlb; |
Linus Torvalds | 7e027b1 | 2012-05-06 13:43:15 -0700 | [diff] [blame] | 1343 | unsigned long end = start + size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1345 | lru_add_drain(); |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 1346 | tlb_gather_mmu(&tlb, mm, start, end); |
Hugh Dickins | 365e9c87 | 2005-10-29 18:16:18 -0700 | [diff] [blame] | 1347 | update_hiwater_rss(mm); |
Linus Torvalds | 7e027b1 | 2012-05-06 13:43:15 -0700 | [diff] [blame] | 1348 | mmu_notifier_invalidate_range_start(mm, start, end); |
| 1349 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
Linus Torvalds | 4f74d2c | 2012-05-06 13:54:06 -0700 | [diff] [blame] | 1350 | unmap_single_vma(&tlb, vma, start, end, details); |
Linus Torvalds | 7e027b1 | 2012-05-06 13:43:15 -0700 | [diff] [blame] | 1351 | mmu_notifier_invalidate_range_end(mm, start, end); |
| 1352 | tlb_finish_mmu(&tlb, start, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1353 | } |
| 1354 | |
Jack Steiner | c627f9c | 2008-07-29 22:33:53 -0700 | [diff] [blame] | 1355 | /** |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1356 | * zap_page_range_single - remove user pages in a given range |
| 1357 | * @vma: vm_area_struct holding the applicable pages |
| 1358 | * @address: starting address of pages to zap |
| 1359 | * @size: number of bytes to zap |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 1360 | * @details: details of shared cache invalidation |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1361 | * |
| 1362 | * The range must fit into one VMA. |
| 1363 | */ |
| 1364 | static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, |
| 1365 | unsigned long size, struct zap_details *details) |
| 1366 | { |
| 1367 | struct mm_struct *mm = vma->vm_mm; |
| 1368 | struct mmu_gather tlb; |
| 1369 | unsigned long end = address + size; |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1370 | |
| 1371 | lru_add_drain(); |
Linus Torvalds | 2b04725 | 2013-08-15 11:42:25 -0700 | [diff] [blame] | 1372 | tlb_gather_mmu(&tlb, mm, address, end); |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1373 | update_hiwater_rss(mm); |
| 1374 | mmu_notifier_invalidate_range_start(mm, address, end); |
Linus Torvalds | 4f74d2c | 2012-05-06 13:54:06 -0700 | [diff] [blame] | 1375 | unmap_single_vma(&tlb, vma, address, end, details); |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1376 | mmu_notifier_invalidate_range_end(mm, address, end); |
| 1377 | tlb_finish_mmu(&tlb, address, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1378 | } |
| 1379 | |
Jack Steiner | c627f9c | 2008-07-29 22:33:53 -0700 | [diff] [blame] | 1380 | /** |
| 1381 | * zap_vma_ptes - remove ptes mapping the vma |
| 1382 | * @vma: vm_area_struct holding ptes to be zapped |
| 1383 | * @address: starting address of pages to zap |
| 1384 | * @size: number of bytes to zap |
| 1385 | * |
| 1386 | * This function only unmaps ptes assigned to VM_PFNMAP vmas. |
| 1387 | * |
| 1388 | * The entire address range must be fully contained within the vma. |
| 1389 | * |
| 1390 | * Returns 0 if successful. |
| 1391 | */ |
| 1392 | int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, |
| 1393 | unsigned long size) |
| 1394 | { |
| 1395 | if (address < vma->vm_start || address + size > vma->vm_end || |
| 1396 | !(vma->vm_flags & VM_PFNMAP)) |
| 1397 | return -1; |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 1398 | zap_page_range_single(vma, address, size, NULL); |
Jack Steiner | c627f9c | 2008-07-29 22:33:53 -0700 | [diff] [blame] | 1399 | return 0; |
| 1400 | } |
| 1401 | EXPORT_SYMBOL_GPL(zap_vma_ptes); |
| 1402 | |
Namhyung Kim | 25ca1d6 | 2010-10-26 14:21:59 -0700 | [diff] [blame] | 1403 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
Harvey Harrison | 920c7a5 | 2008-02-04 22:29:26 -0800 | [diff] [blame] | 1404 | spinlock_t **ptl) |
Linus Torvalds | c9cfcdd | 2005-11-29 14:03:14 -0800 | [diff] [blame] | 1405 | { |
| 1406 | pgd_t * pgd = pgd_offset(mm, addr); |
| 1407 | pud_t * pud = pud_alloc(mm, pgd, addr); |
| 1408 | if (pud) { |
Trond Myklebust | 49c91fb | 2005-11-29 19:27:22 -0500 | [diff] [blame] | 1409 | pmd_t * pmd = pmd_alloc(mm, pud, addr); |
Andrea Arcangeli | f66055ab | 2011-01-13 15:46:54 -0800 | [diff] [blame] | 1410 | if (pmd) { |
| 1411 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
Linus Torvalds | c9cfcdd | 2005-11-29 14:03:14 -0800 | [diff] [blame] | 1412 | return pte_alloc_map_lock(mm, pmd, addr, ptl); |
Andrea Arcangeli | f66055ab | 2011-01-13 15:46:54 -0800 | [diff] [blame] | 1413 | } |
Linus Torvalds | c9cfcdd | 2005-11-29 14:03:14 -0800 | [diff] [blame] | 1414 | } |
| 1415 | return NULL; |
| 1416 | } |
| 1417 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1418 | /* |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1419 | * This is the old fallback for page remapping. |
| 1420 | * |
| 1421 | * For historical reasons, it only allows reserved pages. Only |
| 1422 | * old drivers should use this, and they needed to mark their |
| 1423 | * pages reserved for the old functions anyway. |
| 1424 | */ |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1425 | static int insert_page(struct vm_area_struct *vma, unsigned long addr, |
| 1426 | struct page *page, pgprot_t prot) |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1427 | { |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1428 | struct mm_struct *mm = vma->vm_mm; |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1429 | int retval; |
Linus Torvalds | c9cfcdd | 2005-11-29 14:03:14 -0800 | [diff] [blame] | 1430 | pte_t *pte; |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1431 | spinlock_t *ptl; |
| 1432 | |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1433 | retval = -EINVAL; |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1434 | if (PageAnon(page)) |
KAMEZAWA Hiroyuki | 5b4e655 | 2008-10-18 20:28:10 -0700 | [diff] [blame] | 1435 | goto out; |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1436 | retval = -ENOMEM; |
| 1437 | flush_dcache_page(page); |
Linus Torvalds | c9cfcdd | 2005-11-29 14:03:14 -0800 | [diff] [blame] | 1438 | pte = get_locked_pte(mm, addr, &ptl); |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1439 | if (!pte) |
KAMEZAWA Hiroyuki | 5b4e655 | 2008-10-18 20:28:10 -0700 | [diff] [blame] | 1440 | goto out; |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1441 | retval = -EBUSY; |
| 1442 | if (!pte_none(*pte)) |
| 1443 | goto out_unlock; |
| 1444 | |
| 1445 | /* Ok, finally just insert the thing.. */ |
| 1446 | get_page(page); |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 1447 | inc_mm_counter_fast(mm, mm_counter_file(page)); |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1448 | page_add_file_rmap(page); |
| 1449 | set_pte_at(mm, addr, pte, mk_pte(page, prot)); |
| 1450 | |
| 1451 | retval = 0; |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 1452 | pte_unmap_unlock(pte, ptl); |
| 1453 | return retval; |
Linus Torvalds | 238f58d | 2005-11-29 13:01:56 -0800 | [diff] [blame] | 1454 | out_unlock: |
| 1455 | pte_unmap_unlock(pte, ptl); |
| 1456 | out: |
| 1457 | return retval; |
| 1458 | } |
| 1459 | |
Rolf Eike Beer | bfa5bf6 | 2006-09-25 23:31:22 -0700 | [diff] [blame] | 1460 | /** |
| 1461 | * vm_insert_page - insert single page into user vma |
| 1462 | * @vma: user vma to map to |
| 1463 | * @addr: target user address of this page |
| 1464 | * @page: source kernel page |
| 1465 | * |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1466 | * This allows drivers to insert individual pages they've allocated |
| 1467 | * into a user vma. |
| 1468 | * |
| 1469 | * The page has to be a nice clean _individual_ kernel allocation. |
| 1470 | * If you allocate a compound page, you need to have marked it as |
| 1471 | * such (__GFP_COMP), or manually just split the page up yourself |
Nick Piggin | 8dfcc9b | 2006-03-22 00:08:05 -0800 | [diff] [blame] | 1472 | * (see split_page()). |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1473 | * |
| 1474 | * NOTE! Traditionally this was done with "remap_pfn_range()" which |
| 1475 | * took an arbitrary page protection parameter. This doesn't allow |
| 1476 | * that. Your vma protection will have to be set up correctly, which |
| 1477 | * means that if you want a shared writable mapping, you'd better |
| 1478 | * ask for a shared writable mapping! |
| 1479 | * |
| 1480 | * The page does not need to be reserved. |
Konstantin Khlebnikov | 4b6e1e3 | 2012-10-08 16:28:40 -0700 | [diff] [blame] | 1481 | * |
| 1482 | * Usually this function is called from f_op->mmap() handler |
| 1483 | * under mm->mmap_sem write-lock, so it can change vma->vm_flags. |
| 1484 | * Caller must set VM_MIXEDMAP on vma if it wants to call this |
| 1485 | * function from other places, for example from page-fault handler. |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1486 | */ |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1487 | int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, |
| 1488 | struct page *page) |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1489 | { |
| 1490 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1491 | return -EFAULT; |
| 1492 | if (!page_count(page)) |
| 1493 | return -EINVAL; |
Konstantin Khlebnikov | 4b6e1e3 | 2012-10-08 16:28:40 -0700 | [diff] [blame] | 1494 | if (!(vma->vm_flags & VM_MIXEDMAP)) { |
| 1495 | BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); |
| 1496 | BUG_ON(vma->vm_flags & VM_PFNMAP); |
| 1497 | vma->vm_flags |= VM_MIXEDMAP; |
| 1498 | } |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1499 | return insert_page(vma, addr, page, vma->vm_page_prot); |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1500 | } |
Linus Torvalds | e3c3374 | 2005-12-03 20:48:11 -0800 | [diff] [blame] | 1501 | EXPORT_SYMBOL(vm_insert_page); |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1502 | |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1503 | static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1504 | pfn_t pfn, pgprot_t prot) |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1505 | { |
| 1506 | struct mm_struct *mm = vma->vm_mm; |
| 1507 | int retval; |
| 1508 | pte_t *pte, entry; |
| 1509 | spinlock_t *ptl; |
| 1510 | |
| 1511 | retval = -ENOMEM; |
| 1512 | pte = get_locked_pte(mm, addr, &ptl); |
| 1513 | if (!pte) |
| 1514 | goto out; |
| 1515 | retval = -EBUSY; |
| 1516 | if (!pte_none(*pte)) |
| 1517 | goto out_unlock; |
| 1518 | |
| 1519 | /* Ok, finally just insert the thing.. */ |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1520 | if (pfn_t_devmap(pfn)) |
| 1521 | entry = pte_mkdevmap(pfn_t_pte(pfn, prot)); |
| 1522 | else |
| 1523 | entry = pte_mkspecial(pfn_t_pte(pfn, prot)); |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1524 | set_pte_at(mm, addr, pte, entry); |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 1525 | update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1526 | |
| 1527 | retval = 0; |
| 1528 | out_unlock: |
| 1529 | pte_unmap_unlock(pte, ptl); |
| 1530 | out: |
| 1531 | return retval; |
| 1532 | } |
| 1533 | |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1534 | /** |
| 1535 | * vm_insert_pfn - insert single pfn into user vma |
| 1536 | * @vma: user vma to map to |
| 1537 | * @addr: target user address of this page |
| 1538 | * @pfn: source kernel pfn |
| 1539 | * |
Robert P. J. Day | c462f17 | 2012-10-08 16:33:43 -0700 | [diff] [blame] | 1540 | * Similar to vm_insert_page, this allows drivers to insert individual pages |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1541 | * they've allocated into a user vma. Same comments apply. |
| 1542 | * |
| 1543 | * This function should only be called from a vm_ops->fault handler, and |
| 1544 | * in that case the handler should return NULL. |
Nick Piggin | 0d71d10a | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 1545 | * |
| 1546 | * vma cannot be a COW mapping. |
| 1547 | * |
| 1548 | * As this is called only for pages that do not currently exist, we |
| 1549 | * do not need to flush old virtual caches or the TLB. |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1550 | */ |
| 1551 | int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1552 | unsigned long pfn) |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1553 | { |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1554 | int ret; |
venkatesh.pallipadi@intel.com | e4b866e | 2009-01-09 16:13:11 -0800 | [diff] [blame] | 1555 | pgprot_t pgprot = vma->vm_page_prot; |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 1556 | /* |
| 1557 | * Technically, architectures with pte_special can avoid all these |
| 1558 | * restrictions (same for remap_pfn_range). However we would like |
| 1559 | * consistency in testing and feature parity among all, so we should |
| 1560 | * try to keep these invariants in place for everybody. |
| 1561 | */ |
Jared Hulbert | b379d79 | 2008-04-28 02:12:58 -0700 | [diff] [blame] | 1562 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); |
| 1563 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 1564 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 1565 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
| 1566 | BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1567 | |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1568 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1569 | return -EFAULT; |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 1570 | if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV))) |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1571 | return -EINVAL; |
| 1572 | |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1573 | ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot); |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1574 | |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1575 | return ret; |
Nick Piggin | e0dc0d8 | 2007-02-12 00:51:36 -0800 | [diff] [blame] | 1576 | } |
| 1577 | EXPORT_SYMBOL(vm_insert_pfn); |
| 1578 | |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1579 | int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1580 | pfn_t pfn) |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1581 | { |
| 1582 | BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); |
| 1583 | |
| 1584 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 1585 | return -EFAULT; |
| 1586 | |
| 1587 | /* |
| 1588 | * If we don't have pte special, then we have to use the pfn_valid() |
| 1589 | * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* |
| 1590 | * refcount the page if pfn_valid is true (hence insert_page rather |
Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 1591 | * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP |
| 1592 | * without pte special, it would there be refcounted as a normal page. |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1593 | */ |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1594 | if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) { |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1595 | struct page *page; |
| 1596 | |
Dan Williams | 01c8f1c | 2016-01-15 16:56:40 -0800 | [diff] [blame] | 1597 | page = pfn_t_to_page(pfn); |
Nick Piggin | 423bad6 | 2008-04-28 02:13:01 -0700 | [diff] [blame] | 1598 | return insert_page(vma, addr, page, vma->vm_page_prot); |
| 1599 | } |
| 1600 | return insert_pfn(vma, addr, pfn, vma->vm_page_prot); |
| 1601 | } |
| 1602 | EXPORT_SYMBOL(vm_insert_mixed); |
| 1603 | |
Linus Torvalds | a145dd4 | 2005-11-30 09:35:19 -0800 | [diff] [blame] | 1604 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | * maps a range of physical memory into the requested pages. the old |
| 1606 | * mappings are removed. any references to nonexistent pages results |
| 1607 | * in null mappings (currently treated as "copy-on-access") |
| 1608 | */ |
| 1609 | static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, |
| 1610 | unsigned long addr, unsigned long end, |
| 1611 | unsigned long pfn, pgprot_t prot) |
| 1612 | { |
| 1613 | pte_t *pte; |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 1614 | spinlock_t *ptl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1615 | |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 1616 | pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | if (!pte) |
| 1618 | return -ENOMEM; |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 1619 | arch_enter_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | do { |
| 1621 | BUG_ON(!pte_none(*pte)); |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 1622 | set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | pfn++; |
| 1624 | } while (pte++, addr += PAGE_SIZE, addr != end); |
Zachary Amsden | 6606c3e | 2006-09-30 23:29:33 -0700 | [diff] [blame] | 1625 | arch_leave_lazy_mmu_mode(); |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 1626 | pte_unmap_unlock(pte - 1, ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1627 | return 0; |
| 1628 | } |
| 1629 | |
| 1630 | static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, |
| 1631 | unsigned long addr, unsigned long end, |
| 1632 | unsigned long pfn, pgprot_t prot) |
| 1633 | { |
| 1634 | pmd_t *pmd; |
| 1635 | unsigned long next; |
| 1636 | |
| 1637 | pfn -= addr >> PAGE_SHIFT; |
| 1638 | pmd = pmd_alloc(mm, pud, addr); |
| 1639 | if (!pmd) |
| 1640 | return -ENOMEM; |
Andrea Arcangeli | f66055ab | 2011-01-13 15:46:54 -0800 | [diff] [blame] | 1641 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1642 | do { |
| 1643 | next = pmd_addr_end(addr, end); |
| 1644 | if (remap_pte_range(mm, pmd, addr, next, |
| 1645 | pfn + (addr >> PAGE_SHIFT), prot)) |
| 1646 | return -ENOMEM; |
| 1647 | } while (pmd++, addr = next, addr != end); |
| 1648 | return 0; |
| 1649 | } |
| 1650 | |
| 1651 | static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, |
| 1652 | unsigned long addr, unsigned long end, |
| 1653 | unsigned long pfn, pgprot_t prot) |
| 1654 | { |
| 1655 | pud_t *pud; |
| 1656 | unsigned long next; |
| 1657 | |
| 1658 | pfn -= addr >> PAGE_SHIFT; |
| 1659 | pud = pud_alloc(mm, pgd, addr); |
| 1660 | if (!pud) |
| 1661 | return -ENOMEM; |
| 1662 | do { |
| 1663 | next = pud_addr_end(addr, end); |
| 1664 | if (remap_pmd_range(mm, pud, addr, next, |
| 1665 | pfn + (addr >> PAGE_SHIFT), prot)) |
| 1666 | return -ENOMEM; |
| 1667 | } while (pud++, addr = next, addr != end); |
| 1668 | return 0; |
| 1669 | } |
| 1670 | |
Rolf Eike Beer | bfa5bf6 | 2006-09-25 23:31:22 -0700 | [diff] [blame] | 1671 | /** |
| 1672 | * remap_pfn_range - remap kernel memory to userspace |
| 1673 | * @vma: user vma to map to |
| 1674 | * @addr: target user address to start at |
| 1675 | * @pfn: physical address of kernel memory |
| 1676 | * @size: size of map area |
| 1677 | * @prot: page protection flags for this mapping |
| 1678 | * |
| 1679 | * Note: this is only safe if the mm semaphore is held when called. |
| 1680 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1681 | int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, |
| 1682 | unsigned long pfn, unsigned long size, pgprot_t prot) |
| 1683 | { |
| 1684 | pgd_t *pgd; |
| 1685 | unsigned long next; |
Hugh Dickins | 2d15cab | 2005-06-25 14:54:33 -0700 | [diff] [blame] | 1686 | unsigned long end = addr + PAGE_ALIGN(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1687 | struct mm_struct *mm = vma->vm_mm; |
| 1688 | int err; |
| 1689 | |
| 1690 | /* |
| 1691 | * Physically remapped pages are special. Tell the |
| 1692 | * rest of the world about it: |
| 1693 | * VM_IO tells people not to look at these pages |
| 1694 | * (accesses can have side effects). |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1695 | * VM_PFNMAP tells the core MM that the base pages are just |
| 1696 | * raw PFN mappings, and do not have a "struct page" associated |
| 1697 | * with them. |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1698 | * VM_DONTEXPAND |
| 1699 | * Disable vma merging and expanding with mremap(). |
| 1700 | * VM_DONTDUMP |
| 1701 | * Omit vma from core dump, even when VM_IO turned off. |
Linus Torvalds | fb155c1 | 2005-12-11 19:46:02 -0800 | [diff] [blame] | 1702 | * |
| 1703 | * There's a horrible special case to handle copy-on-write |
| 1704 | * behaviour that some programs depend on. We mark the "original" |
| 1705 | * un-COW'ed pages by matching them up with "vma->vm_pgoff". |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1706 | * See vm_normal_page() for details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1707 | */ |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1708 | if (is_cow_mapping(vma->vm_flags)) { |
| 1709 | if (addr != vma->vm_start || end != vma->vm_end) |
| 1710 | return -EINVAL; |
Linus Torvalds | fb155c1 | 2005-12-11 19:46:02 -0800 | [diff] [blame] | 1711 | vma->vm_pgoff = pfn; |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 1712 | } |
| 1713 | |
| 1714 | err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); |
| 1715 | if (err) |
venkatesh.pallipadi@intel.com | 3c8bb73 | 2008-12-18 11:41:27 -0800 | [diff] [blame] | 1716 | return -EINVAL; |
Linus Torvalds | fb155c1 | 2005-12-11 19:46:02 -0800 | [diff] [blame] | 1717 | |
Konstantin Khlebnikov | 314e51b | 2012-10-08 16:29:02 -0700 | [diff] [blame] | 1718 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | |
| 1720 | BUG_ON(addr >= end); |
| 1721 | pfn -= addr >> PAGE_SHIFT; |
| 1722 | pgd = pgd_offset(mm, addr); |
| 1723 | flush_cache_range(vma, addr, end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1724 | do { |
| 1725 | next = pgd_addr_end(addr, end); |
| 1726 | err = remap_pud_range(mm, pgd, addr, next, |
| 1727 | pfn + (addr >> PAGE_SHIFT), prot); |
| 1728 | if (err) |
| 1729 | break; |
| 1730 | } while (pgd++, addr = next, addr != end); |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1731 | |
| 1732 | if (err) |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 1733 | untrack_pfn(vma, pfn, PAGE_ALIGN(size)); |
venkatesh.pallipadi@intel.com | 2ab6403 | 2008-12-18 11:41:29 -0800 | [diff] [blame] | 1734 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | return err; |
| 1736 | } |
| 1737 | EXPORT_SYMBOL(remap_pfn_range); |
| 1738 | |
Linus Torvalds | b4cbb19 | 2013-04-16 13:45:37 -0700 | [diff] [blame] | 1739 | /** |
| 1740 | * vm_iomap_memory - remap memory to userspace |
| 1741 | * @vma: user vma to map to |
| 1742 | * @start: start of area |
| 1743 | * @len: size of area |
| 1744 | * |
| 1745 | * This is a simplified io_remap_pfn_range() for common driver use. The |
| 1746 | * driver just needs to give us the physical memory range to be mapped, |
| 1747 | * we'll figure out the rest from the vma information. |
| 1748 | * |
| 1749 | * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get |
| 1750 | * whatever write-combining details or similar. |
| 1751 | */ |
| 1752 | int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) |
| 1753 | { |
| 1754 | unsigned long vm_len, pfn, pages; |
| 1755 | |
| 1756 | /* Check that the physical memory area passed in looks valid */ |
| 1757 | if (start + len < start) |
| 1758 | return -EINVAL; |
| 1759 | /* |
| 1760 | * You *really* shouldn't map things that aren't page-aligned, |
| 1761 | * but we've historically allowed it because IO memory might |
| 1762 | * just have smaller alignment. |
| 1763 | */ |
| 1764 | len += start & ~PAGE_MASK; |
| 1765 | pfn = start >> PAGE_SHIFT; |
| 1766 | pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; |
| 1767 | if (pfn + pages < pfn) |
| 1768 | return -EINVAL; |
| 1769 | |
| 1770 | /* We start the mapping 'vm_pgoff' pages into the area */ |
| 1771 | if (vma->vm_pgoff > pages) |
| 1772 | return -EINVAL; |
| 1773 | pfn += vma->vm_pgoff; |
| 1774 | pages -= vma->vm_pgoff; |
| 1775 | |
| 1776 | /* Can we fit all of the mapping? */ |
| 1777 | vm_len = vma->vm_end - vma->vm_start; |
| 1778 | if (vm_len >> PAGE_SHIFT > pages) |
| 1779 | return -EINVAL; |
| 1780 | |
| 1781 | /* Ok, let it rip */ |
| 1782 | return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); |
| 1783 | } |
| 1784 | EXPORT_SYMBOL(vm_iomap_memory); |
| 1785 | |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1786 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, |
| 1787 | unsigned long addr, unsigned long end, |
| 1788 | pte_fn_t fn, void *data) |
| 1789 | { |
| 1790 | pte_t *pte; |
| 1791 | int err; |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 1792 | pgtable_t token; |
Borislav Petkov | 9490991 | 2007-05-06 14:49:17 -0700 | [diff] [blame] | 1793 | spinlock_t *uninitialized_var(ptl); |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1794 | |
| 1795 | pte = (mm == &init_mm) ? |
| 1796 | pte_alloc_kernel(pmd, addr) : |
| 1797 | pte_alloc_map_lock(mm, pmd, addr, &ptl); |
| 1798 | if (!pte) |
| 1799 | return -ENOMEM; |
| 1800 | |
| 1801 | BUG_ON(pmd_huge(*pmd)); |
| 1802 | |
Jeremy Fitzhardinge | 38e0edb | 2009-01-06 14:39:21 -0800 | [diff] [blame] | 1803 | arch_enter_lazy_mmu_mode(); |
| 1804 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 1805 | token = pmd_pgtable(*pmd); |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1806 | |
| 1807 | do { |
Daisuke Nishimura | c36987e | 2009-10-26 16:50:23 -0700 | [diff] [blame] | 1808 | err = fn(pte++, token, addr, data); |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1809 | if (err) |
| 1810 | break; |
Daisuke Nishimura | c36987e | 2009-10-26 16:50:23 -0700 | [diff] [blame] | 1811 | } while (addr += PAGE_SIZE, addr != end); |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1812 | |
Jeremy Fitzhardinge | 38e0edb | 2009-01-06 14:39:21 -0800 | [diff] [blame] | 1813 | arch_leave_lazy_mmu_mode(); |
| 1814 | |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1815 | if (mm != &init_mm) |
| 1816 | pte_unmap_unlock(pte-1, ptl); |
| 1817 | return err; |
| 1818 | } |
| 1819 | |
| 1820 | static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, |
| 1821 | unsigned long addr, unsigned long end, |
| 1822 | pte_fn_t fn, void *data) |
| 1823 | { |
| 1824 | pmd_t *pmd; |
| 1825 | unsigned long next; |
| 1826 | int err; |
| 1827 | |
Andi Kleen | ceb8687 | 2008-07-23 21:27:50 -0700 | [diff] [blame] | 1828 | BUG_ON(pud_huge(*pud)); |
| 1829 | |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1830 | pmd = pmd_alloc(mm, pud, addr); |
| 1831 | if (!pmd) |
| 1832 | return -ENOMEM; |
| 1833 | do { |
| 1834 | next = pmd_addr_end(addr, end); |
| 1835 | err = apply_to_pte_range(mm, pmd, addr, next, fn, data); |
| 1836 | if (err) |
| 1837 | break; |
| 1838 | } while (pmd++, addr = next, addr != end); |
| 1839 | return err; |
| 1840 | } |
| 1841 | |
| 1842 | static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, |
| 1843 | unsigned long addr, unsigned long end, |
| 1844 | pte_fn_t fn, void *data) |
| 1845 | { |
| 1846 | pud_t *pud; |
| 1847 | unsigned long next; |
| 1848 | int err; |
| 1849 | |
| 1850 | pud = pud_alloc(mm, pgd, addr); |
| 1851 | if (!pud) |
| 1852 | return -ENOMEM; |
| 1853 | do { |
| 1854 | next = pud_addr_end(addr, end); |
| 1855 | err = apply_to_pmd_range(mm, pud, addr, next, fn, data); |
| 1856 | if (err) |
| 1857 | break; |
| 1858 | } while (pud++, addr = next, addr != end); |
| 1859 | return err; |
| 1860 | } |
| 1861 | |
| 1862 | /* |
| 1863 | * Scan a region of virtual memory, filling in page tables as necessary |
| 1864 | * and calling a provided function on each leaf page table. |
| 1865 | */ |
| 1866 | int apply_to_page_range(struct mm_struct *mm, unsigned long addr, |
| 1867 | unsigned long size, pte_fn_t fn, void *data) |
| 1868 | { |
| 1869 | pgd_t *pgd; |
| 1870 | unsigned long next; |
Jeremy Fitzhardinge | 57250a5 | 2010-08-09 17:19:52 -0700 | [diff] [blame] | 1871 | unsigned long end = addr + size; |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1872 | int err; |
| 1873 | |
| 1874 | BUG_ON(addr >= end); |
| 1875 | pgd = pgd_offset(mm, addr); |
| 1876 | do { |
| 1877 | next = pgd_addr_end(addr, end); |
| 1878 | err = apply_to_pud_range(mm, pgd, addr, next, fn, data); |
| 1879 | if (err) |
| 1880 | break; |
| 1881 | } while (pgd++, addr = next, addr != end); |
Jeremy Fitzhardinge | 57250a5 | 2010-08-09 17:19:52 -0700 | [diff] [blame] | 1882 | |
Jeremy Fitzhardinge | aee16b3 | 2007-05-06 14:48:54 -0700 | [diff] [blame] | 1883 | return err; |
| 1884 | } |
| 1885 | EXPORT_SYMBOL_GPL(apply_to_page_range); |
| 1886 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1887 | /* |
Kirill A. Shutemov | 9b4bdd2 | 2015-02-10 14:09:51 -0800 | [diff] [blame] | 1888 | * handle_pte_fault chooses page fault handler according to an entry which was |
| 1889 | * read non-atomically. Before making any commitment, on those architectures |
| 1890 | * or configurations (e.g. i386 with PAE) which might give a mix of unmatched |
| 1891 | * parts, do_swap_page must check under lock before unmapping the pte and |
| 1892 | * proceeding (but do_wp_page is only called after already making such a check; |
Ryota Ozaki | a335b2e | 2011-02-10 13:56:28 +0900 | [diff] [blame] | 1893 | * and do_anonymous_page can safely check later on). |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 1894 | */ |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 1895 | static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 1896 | pte_t *page_table, pte_t orig_pte) |
| 1897 | { |
| 1898 | int same = 1; |
| 1899 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) |
| 1900 | if (sizeof(pte_t) > sizeof(unsigned long)) { |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 1901 | spinlock_t *ptl = pte_lockptr(mm, pmd); |
| 1902 | spin_lock(ptl); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 1903 | same = pte_same(*page_table, orig_pte); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 1904 | spin_unlock(ptl); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 1905 | } |
| 1906 | #endif |
| 1907 | pte_unmap(page_table); |
| 1908 | return same; |
| 1909 | } |
| 1910 | |
Atsushi Nemoto | 9de455b | 2006-12-12 17:14:55 +0000 | [diff] [blame] | 1911 | static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1912 | { |
Dan Williams | 0abdd7a | 2014-01-21 15:48:12 -0800 | [diff] [blame] | 1913 | debug_dma_assert_idle(src); |
| 1914 | |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1915 | /* |
| 1916 | * If the source page was a PFN mapping, we don't have |
| 1917 | * a "struct page" for it. We do a best-effort copy by |
| 1918 | * just copying from the original user address. If that |
| 1919 | * fails, we just zero-fill it. Live with it. |
| 1920 | */ |
| 1921 | if (unlikely(!src)) { |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 1922 | void *kaddr = kmap_atomic(dst); |
Linus Torvalds | 5d2a2dbbc | 2005-11-29 14:07:55 -0800 | [diff] [blame] | 1923 | void __user *uaddr = (void __user *)(va & PAGE_MASK); |
| 1924 | |
| 1925 | /* |
| 1926 | * This really shouldn't fail, because the page is there |
| 1927 | * in the page tables. But it might just be unreadable, |
| 1928 | * in which case we just give up and fill the result with |
| 1929 | * zeroes. |
| 1930 | */ |
| 1931 | if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) |
Jan Beulich | 3ecb01d | 2010-10-26 14:22:27 -0700 | [diff] [blame] | 1932 | clear_page(kaddr); |
Cong Wang | 9b04c5f | 2011-11-25 23:14:39 +0800 | [diff] [blame] | 1933 | kunmap_atomic(kaddr); |
Dmitriy Monakhov | c4ec7b0 | 2006-10-19 23:29:08 -0700 | [diff] [blame] | 1934 | flush_dcache_page(dst); |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 1935 | } else |
| 1936 | copy_user_highpage(dst, src, va, vma); |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 1937 | } |
| 1938 | |
Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 1939 | static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) |
| 1940 | { |
| 1941 | struct file *vm_file = vma->vm_file; |
| 1942 | |
| 1943 | if (vm_file) |
| 1944 | return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; |
| 1945 | |
| 1946 | /* |
| 1947 | * Special mappings (e.g. VDSO) do not have any file so fake |
| 1948 | * a default GFP_KERNEL for them. |
| 1949 | */ |
| 1950 | return GFP_KERNEL; |
| 1951 | } |
| 1952 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1953 | /* |
Kirill A. Shutemov | fb09a46 | 2014-04-03 14:48:15 -0700 | [diff] [blame] | 1954 | * Notify the address space that the page is about to become writable so that |
| 1955 | * it can prohibit this or wait for the page to get into an appropriate state. |
| 1956 | * |
| 1957 | * We do this without the lock held, so that it can sleep if it needs to. |
| 1958 | */ |
| 1959 | static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, |
| 1960 | unsigned long address) |
| 1961 | { |
| 1962 | struct vm_fault vmf; |
| 1963 | int ret; |
| 1964 | |
| 1965 | vmf.virtual_address = (void __user *)(address & PAGE_MASK); |
| 1966 | vmf.pgoff = page->index; |
| 1967 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; |
Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 1968 | vmf.gfp_mask = __get_fault_gfp_mask(vma); |
Kirill A. Shutemov | fb09a46 | 2014-04-03 14:48:15 -0700 | [diff] [blame] | 1969 | vmf.page = page; |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 1970 | vmf.cow_page = NULL; |
Kirill A. Shutemov | fb09a46 | 2014-04-03 14:48:15 -0700 | [diff] [blame] | 1971 | |
| 1972 | ret = vma->vm_ops->page_mkwrite(vma, &vmf); |
| 1973 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
| 1974 | return ret; |
| 1975 | if (unlikely(!(ret & VM_FAULT_LOCKED))) { |
| 1976 | lock_page(page); |
| 1977 | if (!page->mapping) { |
| 1978 | unlock_page(page); |
| 1979 | return 0; /* retry */ |
| 1980 | } |
| 1981 | ret |= VM_FAULT_LOCKED; |
| 1982 | } else |
| 1983 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 1984 | return ret; |
| 1985 | } |
| 1986 | |
| 1987 | /* |
Shachar Raindel | 4e047f8 | 2015-04-14 15:46:25 -0700 | [diff] [blame] | 1988 | * Handle write page faults for pages that can be reused in the current vma |
| 1989 | * |
| 1990 | * This can happen either due to the mapping being with the VM_SHARED flag, |
| 1991 | * or due to us being the last reference standing to the page. In either |
| 1992 | * case, all we need to do here is to mark the page as writable and update |
| 1993 | * any related book-keeping. |
| 1994 | */ |
| 1995 | static inline int wp_page_reuse(struct mm_struct *mm, |
| 1996 | struct vm_area_struct *vma, unsigned long address, |
| 1997 | pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, |
| 1998 | struct page *page, int page_mkwrite, |
| 1999 | int dirty_shared) |
| 2000 | __releases(ptl) |
| 2001 | { |
| 2002 | pte_t entry; |
| 2003 | /* |
| 2004 | * Clear the pages cpupid information as the existing |
| 2005 | * information potentially belongs to a now completely |
| 2006 | * unrelated process. |
| 2007 | */ |
| 2008 | if (page) |
| 2009 | page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); |
| 2010 | |
| 2011 | flush_cache_page(vma, address, pte_pfn(orig_pte)); |
| 2012 | entry = pte_mkyoung(orig_pte); |
| 2013 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
| 2014 | if (ptep_set_access_flags(vma, address, page_table, entry, 1)) |
| 2015 | update_mmu_cache(vma, address, page_table); |
| 2016 | pte_unmap_unlock(page_table, ptl); |
| 2017 | |
| 2018 | if (dirty_shared) { |
| 2019 | struct address_space *mapping; |
| 2020 | int dirtied; |
| 2021 | |
| 2022 | if (!page_mkwrite) |
| 2023 | lock_page(page); |
| 2024 | |
| 2025 | dirtied = set_page_dirty(page); |
| 2026 | VM_BUG_ON_PAGE(PageAnon(page), page); |
| 2027 | mapping = page->mapping; |
| 2028 | unlock_page(page); |
| 2029 | page_cache_release(page); |
| 2030 | |
| 2031 | if ((dirtied || page_mkwrite) && mapping) { |
| 2032 | /* |
| 2033 | * Some device drivers do not set page.mapping |
| 2034 | * but still dirty their pages |
| 2035 | */ |
| 2036 | balance_dirty_pages_ratelimited(mapping); |
| 2037 | } |
| 2038 | |
| 2039 | if (!page_mkwrite) |
| 2040 | file_update_time(vma->vm_file); |
| 2041 | } |
| 2042 | |
| 2043 | return VM_FAULT_WRITE; |
| 2044 | } |
| 2045 | |
| 2046 | /* |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2047 | * Handle the case of a page which we actually need to copy to a new page. |
| 2048 | * |
| 2049 | * Called with mmap_sem locked and the old page referenced, but |
| 2050 | * without the ptl held. |
| 2051 | * |
| 2052 | * High level logic flow: |
| 2053 | * |
| 2054 | * - Allocate a page, copy the content of the old page to the new one. |
| 2055 | * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. |
| 2056 | * - Take the PTL. If the pte changed, bail out and release the allocated page |
| 2057 | * - If the pte is still the way we remember it, update the page table and all |
| 2058 | * relevant references. This includes dropping the reference the page-table |
| 2059 | * held to the old page, as well as updating the rmap. |
| 2060 | * - In any case, unlock the PTL and drop the reference we took to the old page. |
| 2061 | */ |
| 2062 | static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2063 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2064 | pte_t orig_pte, struct page *old_page) |
| 2065 | { |
| 2066 | struct page *new_page = NULL; |
| 2067 | spinlock_t *ptl = NULL; |
| 2068 | pte_t entry; |
| 2069 | int page_copied = 0; |
| 2070 | const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */ |
| 2071 | const unsigned long mmun_end = mmun_start + PAGE_SIZE; /* For mmu_notifiers */ |
| 2072 | struct mem_cgroup *memcg; |
| 2073 | |
| 2074 | if (unlikely(anon_vma_prepare(vma))) |
| 2075 | goto oom; |
| 2076 | |
| 2077 | if (is_zero_pfn(pte_pfn(orig_pte))) { |
| 2078 | new_page = alloc_zeroed_user_highpage_movable(vma, address); |
| 2079 | if (!new_page) |
| 2080 | goto oom; |
| 2081 | } else { |
| 2082 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 2083 | if (!new_page) |
| 2084 | goto oom; |
| 2085 | cow_user_page(new_page, old_page, address, vma); |
| 2086 | } |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2087 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2088 | if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2089 | goto oom_free_new; |
| 2090 | |
Mel Gorman | eb3c24f | 2015-06-24 16:57:27 -0700 | [diff] [blame] | 2091 | __SetPageUptodate(new_page); |
| 2092 | |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2093 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
| 2094 | |
| 2095 | /* |
| 2096 | * Re-check the pte - we dropped the lock |
| 2097 | */ |
| 2098 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 2099 | if (likely(pte_same(*page_table, orig_pte))) { |
| 2100 | if (old_page) { |
| 2101 | if (!PageAnon(old_page)) { |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 2102 | dec_mm_counter_fast(mm, |
| 2103 | mm_counter_file(old_page)); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2104 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
| 2105 | } |
| 2106 | } else { |
| 2107 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
| 2108 | } |
| 2109 | flush_cache_page(vma, address, pte_pfn(orig_pte)); |
| 2110 | entry = mk_pte(new_page, vma->vm_page_prot); |
| 2111 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
| 2112 | /* |
| 2113 | * Clear the pte entry and flush it first, before updating the |
| 2114 | * pte with the new entry. This will avoid a race condition |
| 2115 | * seen in the presence of one thread doing SMC and another |
| 2116 | * thread doing COW. |
| 2117 | */ |
| 2118 | ptep_clear_flush_notify(vma, address, page_table); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2119 | page_add_new_anon_rmap(new_page, vma, address, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2120 | mem_cgroup_commit_charge(new_page, memcg, false, false); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2121 | lru_cache_add_active_or_unevictable(new_page, vma); |
| 2122 | /* |
| 2123 | * We call the notify macro here because, when using secondary |
| 2124 | * mmu page tables (such as kvm shadow page tables), we want the |
| 2125 | * new page to be mapped directly into the secondary page table. |
| 2126 | */ |
| 2127 | set_pte_at_notify(mm, address, page_table, entry); |
| 2128 | update_mmu_cache(vma, address, page_table); |
| 2129 | if (old_page) { |
| 2130 | /* |
| 2131 | * Only after switching the pte to the new page may |
| 2132 | * we remove the mapcount here. Otherwise another |
| 2133 | * process may come and find the rmap count decremented |
| 2134 | * before the pte is switched to the new page, and |
| 2135 | * "reuse" the old page writing into it while our pte |
| 2136 | * here still points into it and can be read by other |
| 2137 | * threads. |
| 2138 | * |
| 2139 | * The critical issue is to order this |
| 2140 | * page_remove_rmap with the ptp_clear_flush above. |
| 2141 | * Those stores are ordered by (if nothing else,) |
| 2142 | * the barrier present in the atomic_add_negative |
| 2143 | * in page_remove_rmap. |
| 2144 | * |
| 2145 | * Then the TLB flush in ptep_clear_flush ensures that |
| 2146 | * no process can access the old page before the |
| 2147 | * decremented mapcount is visible. And the old page |
| 2148 | * cannot be reused until after the decremented |
| 2149 | * mapcount is visible. So transitively, TLBs to |
| 2150 | * old page will be flushed before it can be reused. |
| 2151 | */ |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2152 | page_remove_rmap(old_page, false); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2153 | } |
| 2154 | |
| 2155 | /* Free the old page.. */ |
| 2156 | new_page = old_page; |
| 2157 | page_copied = 1; |
| 2158 | } else { |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2159 | mem_cgroup_cancel_charge(new_page, memcg, false); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2160 | } |
| 2161 | |
| 2162 | if (new_page) |
| 2163 | page_cache_release(new_page); |
| 2164 | |
| 2165 | pte_unmap_unlock(page_table, ptl); |
| 2166 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2167 | if (old_page) { |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2168 | /* |
| 2169 | * Don't let another task, with possibly unlocked vma, |
| 2170 | * keep the mlocked page. |
| 2171 | */ |
| 2172 | if (page_copied && (vma->vm_flags & VM_LOCKED)) { |
| 2173 | lock_page(old_page); /* LRU manipulation */ |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2174 | if (PageMlocked(old_page)) |
| 2175 | munlock_vma_page(old_page); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2176 | unlock_page(old_page); |
| 2177 | } |
| 2178 | page_cache_release(old_page); |
| 2179 | } |
| 2180 | return page_copied ? VM_FAULT_WRITE : 0; |
| 2181 | oom_free_new: |
| 2182 | page_cache_release(new_page); |
| 2183 | oom: |
| 2184 | if (old_page) |
| 2185 | page_cache_release(old_page); |
| 2186 | return VM_FAULT_OOM; |
| 2187 | } |
| 2188 | |
Boaz Harrosh | dd90618 | 2015-04-15 16:15:11 -0700 | [diff] [blame] | 2189 | /* |
| 2190 | * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED |
| 2191 | * mapping |
| 2192 | */ |
| 2193 | static int wp_pfn_shared(struct mm_struct *mm, |
| 2194 | struct vm_area_struct *vma, unsigned long address, |
| 2195 | pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, |
| 2196 | pmd_t *pmd) |
| 2197 | { |
| 2198 | if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { |
| 2199 | struct vm_fault vmf = { |
| 2200 | .page = NULL, |
| 2201 | .pgoff = linear_page_index(vma, address), |
| 2202 | .virtual_address = (void __user *)(address & PAGE_MASK), |
| 2203 | .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, |
| 2204 | }; |
| 2205 | int ret; |
| 2206 | |
| 2207 | pte_unmap_unlock(page_table, ptl); |
| 2208 | ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); |
| 2209 | if (ret & VM_FAULT_ERROR) |
| 2210 | return ret; |
| 2211 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 2212 | /* |
| 2213 | * We might have raced with another page fault while we |
| 2214 | * released the pte_offset_map_lock. |
| 2215 | */ |
| 2216 | if (!pte_same(*page_table, orig_pte)) { |
| 2217 | pte_unmap_unlock(page_table, ptl); |
| 2218 | return 0; |
| 2219 | } |
| 2220 | } |
| 2221 | return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, |
| 2222 | NULL, 0, 0); |
| 2223 | } |
| 2224 | |
Shachar Raindel | 93e478d | 2015-04-14 15:46:35 -0700 | [diff] [blame] | 2225 | static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2226 | unsigned long address, pte_t *page_table, |
| 2227 | pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, |
| 2228 | struct page *old_page) |
| 2229 | __releases(ptl) |
| 2230 | { |
| 2231 | int page_mkwrite = 0; |
| 2232 | |
| 2233 | page_cache_get(old_page); |
| 2234 | |
| 2235 | /* |
| 2236 | * Only catch write-faults on shared writable pages, |
| 2237 | * read-only shared pages can get COWed by |
| 2238 | * get_user_pages(.write=1, .force=1). |
| 2239 | */ |
| 2240 | if (vma->vm_ops && vma->vm_ops->page_mkwrite) { |
| 2241 | int tmp; |
| 2242 | |
| 2243 | pte_unmap_unlock(page_table, ptl); |
| 2244 | tmp = do_page_mkwrite(vma, old_page, address); |
| 2245 | if (unlikely(!tmp || (tmp & |
| 2246 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
| 2247 | page_cache_release(old_page); |
| 2248 | return tmp; |
| 2249 | } |
| 2250 | /* |
| 2251 | * Since we dropped the lock we need to revalidate |
| 2252 | * the PTE as someone else may have changed it. If |
| 2253 | * they did, we just return, as we can count on the |
| 2254 | * MMU to tell us if they didn't also make it writable. |
| 2255 | */ |
| 2256 | page_table = pte_offset_map_lock(mm, pmd, address, |
| 2257 | &ptl); |
| 2258 | if (!pte_same(*page_table, orig_pte)) { |
| 2259 | unlock_page(old_page); |
| 2260 | pte_unmap_unlock(page_table, ptl); |
| 2261 | page_cache_release(old_page); |
| 2262 | return 0; |
| 2263 | } |
| 2264 | page_mkwrite = 1; |
| 2265 | } |
| 2266 | |
| 2267 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
| 2268 | orig_pte, old_page, page_mkwrite, 1); |
| 2269 | } |
| 2270 | |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2271 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2272 | * This routine handles present pages, when users try to write |
| 2273 | * to a shared page. It is done by copying the page to a new address |
| 2274 | * and decrementing the shared-page counter for the old page. |
| 2275 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2276 | * Note that this routine assumes that the protection checks have been |
| 2277 | * done by the caller (the low-level page fault routine in most cases). |
| 2278 | * Thus we can safely just mark it writable once we've done any necessary |
| 2279 | * COW. |
| 2280 | * |
| 2281 | * We also mark the page dirty at this point even though the page will |
| 2282 | * change only once the write actually happens. This avoids a few races, |
| 2283 | * and potentially makes it more efficient. |
| 2284 | * |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2285 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
| 2286 | * but allow concurrent faults), with pte both mapped and locked. |
| 2287 | * We return with mmap_sem still held, but pte unmapped and unlocked. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2288 | */ |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2289 | static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2290 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2291 | spinlock_t *ptl, pte_t orig_pte) |
Namhyung Kim | e6219ec | 2010-10-26 14:22:00 -0700 | [diff] [blame] | 2292 | __releases(ptl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2293 | { |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2294 | struct page *old_page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2295 | |
Linus Torvalds | 6aab341 | 2005-11-28 14:34:23 -0800 | [diff] [blame] | 2296 | old_page = vm_normal_page(vma, address, orig_pte); |
Peter Zijlstra | 251b97f | 2008-07-04 09:59:24 -0700 | [diff] [blame] | 2297 | if (!old_page) { |
| 2298 | /* |
Peter Feiner | 64e4550 | 2014-10-13 15:55:46 -0700 | [diff] [blame] | 2299 | * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a |
| 2300 | * VM_PFNMAP VMA. |
Peter Zijlstra | 251b97f | 2008-07-04 09:59:24 -0700 | [diff] [blame] | 2301 | * |
| 2302 | * We should not cow pages in a shared writeable mapping. |
Boaz Harrosh | dd90618 | 2015-04-15 16:15:11 -0700 | [diff] [blame] | 2303 | * Just mark the pages writable and/or call ops->pfn_mkwrite. |
Peter Zijlstra | 251b97f | 2008-07-04 09:59:24 -0700 | [diff] [blame] | 2304 | */ |
| 2305 | if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
| 2306 | (VM_WRITE|VM_SHARED)) |
Boaz Harrosh | dd90618 | 2015-04-15 16:15:11 -0700 | [diff] [blame] | 2307 | return wp_pfn_shared(mm, vma, address, page_table, ptl, |
| 2308 | orig_pte, pmd); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2309 | |
| 2310 | pte_unmap_unlock(page_table, ptl); |
| 2311 | return wp_page_copy(mm, vma, address, page_table, pmd, |
| 2312 | orig_pte, old_page); |
Peter Zijlstra | 251b97f | 2008-07-04 09:59:24 -0700 | [diff] [blame] | 2313 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2314 | |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 2315 | /* |
Peter Zijlstra | ee6a645 | 2006-09-25 23:31:00 -0700 | [diff] [blame] | 2316 | * Take out anonymous pages first, anonymous shared vmas are |
| 2317 | * not dirty accountable. |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 2318 | */ |
Hugh Dickins | 9a84089 | 2009-09-21 17:02:01 -0700 | [diff] [blame] | 2319 | if (PageAnon(old_page) && !PageKsm(old_page)) { |
Hugh Dickins | ab967d8 | 2009-01-06 14:39:33 -0800 | [diff] [blame] | 2320 | if (!trylock_page(old_page)) { |
| 2321 | page_cache_get(old_page); |
| 2322 | pte_unmap_unlock(page_table, ptl); |
| 2323 | lock_page(old_page); |
| 2324 | page_table = pte_offset_map_lock(mm, pmd, address, |
| 2325 | &ptl); |
| 2326 | if (!pte_same(*page_table, orig_pte)) { |
| 2327 | unlock_page(old_page); |
Shachar Raindel | 2876680 | 2015-04-14 15:46:29 -0700 | [diff] [blame] | 2328 | pte_unmap_unlock(page_table, ptl); |
| 2329 | page_cache_release(old_page); |
| 2330 | return 0; |
Hugh Dickins | ab967d8 | 2009-01-06 14:39:33 -0800 | [diff] [blame] | 2331 | } |
| 2332 | page_cache_release(old_page); |
Peter Zijlstra | ee6a645 | 2006-09-25 23:31:00 -0700 | [diff] [blame] | 2333 | } |
Michel Lespinasse | b009c02 | 2011-01-13 15:46:07 -0800 | [diff] [blame] | 2334 | if (reuse_swap_page(old_page)) { |
Rik van Riel | c44b674 | 2010-03-05 13:42:09 -0800 | [diff] [blame] | 2335 | /* |
| 2336 | * The page is all ours. Move it to our anon_vma so |
| 2337 | * the rmap code will not search our parent or siblings. |
| 2338 | * Protected against the rmap code by the page lock. |
| 2339 | */ |
| 2340 | page_move_anon_rmap(old_page, vma, address); |
Michel Lespinasse | b009c02 | 2011-01-13 15:46:07 -0800 | [diff] [blame] | 2341 | unlock_page(old_page); |
Shachar Raindel | 4e047f8 | 2015-04-14 15:46:25 -0700 | [diff] [blame] | 2342 | return wp_page_reuse(mm, vma, address, page_table, ptl, |
| 2343 | orig_pte, old_page, 0, 0); |
Michel Lespinasse | b009c02 | 2011-01-13 15:46:07 -0800 | [diff] [blame] | 2344 | } |
Hugh Dickins | ab967d8 | 2009-01-06 14:39:33 -0800 | [diff] [blame] | 2345 | unlock_page(old_page); |
Peter Zijlstra | ee6a645 | 2006-09-25 23:31:00 -0700 | [diff] [blame] | 2346 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 2347 | (VM_WRITE|VM_SHARED))) { |
Shachar Raindel | 93e478d | 2015-04-14 15:46:35 -0700 | [diff] [blame] | 2348 | return wp_page_shared(mm, vma, address, page_table, pmd, |
| 2349 | ptl, orig_pte, old_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2350 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2351 | |
| 2352 | /* |
| 2353 | * Ok, we need to copy. Oh, well.. |
| 2354 | */ |
Nick Piggin | b581003 | 2005-10-29 18:16:12 -0700 | [diff] [blame] | 2355 | page_cache_get(old_page); |
Shachar Raindel | 2876680 | 2015-04-14 15:46:29 -0700 | [diff] [blame] | 2356 | |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2357 | pte_unmap_unlock(page_table, ptl); |
Shachar Raindel | 2f38ab2 | 2015-04-14 15:46:32 -0700 | [diff] [blame] | 2358 | return wp_page_copy(mm, vma, address, page_table, pmd, |
| 2359 | orig_pte, old_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2360 | } |
| 2361 | |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 2362 | static void unmap_mapping_range_vma(struct vm_area_struct *vma, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2363 | unsigned long start_addr, unsigned long end_addr, |
| 2364 | struct zap_details *details) |
| 2365 | { |
Al Viro | f5cc4ee | 2012-03-05 14:14:20 -0500 | [diff] [blame] | 2366 | zap_page_range_single(vma, start_addr, end_addr - start_addr, details); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2367 | } |
| 2368 | |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 2369 | static inline void unmap_mapping_range_tree(struct rb_root *root, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2370 | struct zap_details *details) |
| 2371 | { |
| 2372 | struct vm_area_struct *vma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2373 | pgoff_t vba, vea, zba, zea; |
| 2374 | |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 2375 | vma_interval_tree_foreach(vma, root, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2376 | details->first_index, details->last_index) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2377 | |
| 2378 | vba = vma->vm_pgoff; |
Libin | d6e9321 | 2013-07-03 15:01:26 -0700 | [diff] [blame] | 2379 | vea = vba + vma_pages(vma) - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2380 | /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ |
| 2381 | zba = details->first_index; |
| 2382 | if (zba < vba) |
| 2383 | zba = vba; |
| 2384 | zea = details->last_index; |
| 2385 | if (zea > vea) |
| 2386 | zea = vea; |
| 2387 | |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 2388 | unmap_mapping_range_vma(vma, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2389 | ((zba - vba) << PAGE_SHIFT) + vma->vm_start, |
| 2390 | ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, |
Peter Zijlstra | 97a8941 | 2011-05-24 17:12:04 -0700 | [diff] [blame] | 2391 | details); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2392 | } |
| 2393 | } |
| 2394 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2395 | /** |
Kirill A. Shutemov | 8a5f14a | 2015-02-10 14:09:49 -0800 | [diff] [blame] | 2396 | * unmap_mapping_range - unmap the portion of all mmaps in the specified |
| 2397 | * address_space corresponding to the specified page range in the underlying |
| 2398 | * file. |
| 2399 | * |
Martin Waitz | 3d41088 | 2005-06-23 22:05:21 -0700 | [diff] [blame] | 2400 | * @mapping: the address space containing mmaps to be unmapped. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2401 | * @holebegin: byte in first page to unmap, relative to the start of |
| 2402 | * the underlying file. This will be rounded down to a PAGE_SIZE |
npiggin@suse.de | 25d9e2d | 2009-08-21 02:35:05 +1000 | [diff] [blame] | 2403 | * boundary. Note that this is different from truncate_pagecache(), which |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2404 | * must keep the partial page. In contrast, we must get rid of |
| 2405 | * partial pages. |
| 2406 | * @holelen: size of prospective hole in bytes. This will be rounded |
| 2407 | * up to a PAGE_SIZE boundary. A holelen of zero truncates to the |
| 2408 | * end of the file. |
| 2409 | * @even_cows: 1 when truncating a file, unmap even private COWed pages; |
| 2410 | * but 0 when invalidating pagecache, don't throw away private data. |
| 2411 | */ |
| 2412 | void unmap_mapping_range(struct address_space *mapping, |
| 2413 | loff_t const holebegin, loff_t const holelen, int even_cows) |
| 2414 | { |
| 2415 | struct zap_details details; |
| 2416 | pgoff_t hba = holebegin >> PAGE_SHIFT; |
| 2417 | pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 2418 | |
| 2419 | /* Check for overflow. */ |
| 2420 | if (sizeof(holelen) > sizeof(hlen)) { |
| 2421 | long long holeend = |
| 2422 | (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 2423 | if (holeend & ~(long long)ULONG_MAX) |
| 2424 | hlen = ULONG_MAX - hba + 1; |
| 2425 | } |
| 2426 | |
| 2427 | details.check_mapping = even_cows? NULL: mapping; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2428 | details.first_index = hba; |
| 2429 | details.last_index = hba + hlen - 1; |
| 2430 | if (details.last_index < details.first_index) |
| 2431 | details.last_index = ULONG_MAX; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2432 | |
Ross Zwisler | 0f90cc6 | 2015-10-15 15:28:32 -0700 | [diff] [blame] | 2433 | |
| 2434 | /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ |
Kirill A. Shutemov | 46c043e | 2015-09-08 14:59:42 -0700 | [diff] [blame] | 2435 | i_mmap_lock_write(mapping); |
Michel Lespinasse | 6b2dbba | 2012-10-08 16:31:25 -0700 | [diff] [blame] | 2436 | if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2437 | unmap_mapping_range_tree(&mapping->i_mmap, &details); |
Kirill A. Shutemov | 46c043e | 2015-09-08 14:59:42 -0700 | [diff] [blame] | 2438 | i_mmap_unlock_write(mapping); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2439 | } |
| 2440 | EXPORT_SYMBOL(unmap_mapping_range); |
| 2441 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2442 | /* |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2443 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
| 2444 | * but allow concurrent faults), and pte mapped but not yet locked. |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 2445 | * We return with pte unmapped and unlocked. |
| 2446 | * |
| 2447 | * We return with the mmap_sem locked or unlocked in the same cases |
| 2448 | * as does filemap_fault(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2449 | */ |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2450 | static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2451 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 2452 | unsigned int flags, pte_t orig_pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2453 | { |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2454 | spinlock_t *ptl; |
Hugh Dickins | 56f3180 | 2013-02-22 16:36:10 -0800 | [diff] [blame] | 2455 | struct page *page, *swapcache; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2456 | struct mem_cgroup *memcg; |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2457 | swp_entry_t entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2458 | pte_t pte; |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 2459 | int locked; |
Rik van Riel | ad8c2ee | 2010-08-09 17:19:48 -0700 | [diff] [blame] | 2460 | int exclusive = 0; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 2461 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2462 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 2463 | if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2464 | goto out; |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2465 | |
| 2466 | entry = pte_to_swp_entry(orig_pte); |
Andi Kleen | d1737fd | 2009-09-16 11:50:06 +0200 | [diff] [blame] | 2467 | if (unlikely(non_swap_entry(entry))) { |
| 2468 | if (is_migration_entry(entry)) { |
| 2469 | migration_entry_wait(mm, pmd, address); |
| 2470 | } else if (is_hwpoison_entry(entry)) { |
| 2471 | ret = VM_FAULT_HWPOISON; |
| 2472 | } else { |
| 2473 | print_bad_pte(vma, address, orig_pte, NULL); |
Hugh Dickins | d99be1a | 2009-12-14 17:59:04 -0800 | [diff] [blame] | 2474 | ret = VM_FAULT_SIGBUS; |
Andi Kleen | d1737fd | 2009-09-16 11:50:06 +0200 | [diff] [blame] | 2475 | } |
Christoph Lameter | 0697212 | 2006-06-23 02:03:35 -0700 | [diff] [blame] | 2476 | goto out; |
| 2477 | } |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 2478 | delayacct_set_flag(DELAYACCT_PF_SWAPIN); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2479 | page = lookup_swap_cache(entry); |
| 2480 | if (!page) { |
Hugh Dickins | 02098fe | 2008-02-04 22:28:42 -0800 | [diff] [blame] | 2481 | page = swapin_readahead(entry, |
| 2482 | GFP_HIGHUSER_MOVABLE, vma, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2483 | if (!page) { |
| 2484 | /* |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2485 | * Back out if somebody else faulted in this pte |
| 2486 | * while we released the pte lock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2487 | */ |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2488 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2489 | if (likely(pte_same(*page_table, orig_pte))) |
| 2490 | ret = VM_FAULT_OOM; |
Shailabh Nagar | 0ff9224 | 2006-07-14 00:24:37 -0700 | [diff] [blame] | 2491 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2492 | goto unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | } |
| 2494 | |
| 2495 | /* Had to read the page from swap area: Major fault */ |
| 2496 | ret = VM_FAULT_MAJOR; |
Christoph Lameter | f8891e5 | 2006-06-30 01:55:45 -0700 | [diff] [blame] | 2497 | count_vm_event(PGMAJFAULT); |
Ying Han | 456f998 | 2011-05-26 16:25:38 -0700 | [diff] [blame] | 2498 | mem_cgroup_count_vm_event(mm, PGMAJFAULT); |
Andi Kleen | d1737fd | 2009-09-16 11:50:06 +0200 | [diff] [blame] | 2499 | } else if (PageHWPoison(page)) { |
Wu Fengguang | 71f7252 | 2009-12-16 12:19:58 +0100 | [diff] [blame] | 2500 | /* |
| 2501 | * hwpoisoned dirty swapcache pages are kept for killing |
| 2502 | * owner processes (which may be unknown at hwpoison time) |
| 2503 | */ |
Andi Kleen | d1737fd | 2009-09-16 11:50:06 +0200 | [diff] [blame] | 2504 | ret = VM_FAULT_HWPOISON; |
| 2505 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
Hugh Dickins | 56f3180 | 2013-02-22 16:36:10 -0800 | [diff] [blame] | 2506 | swapcache = page; |
Andi Kleen | 4779cb3 | 2009-10-14 01:51:41 +0200 | [diff] [blame] | 2507 | goto out_release; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2508 | } |
| 2509 | |
Hugh Dickins | 56f3180 | 2013-02-22 16:36:10 -0800 | [diff] [blame] | 2510 | swapcache = page; |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 2511 | locked = lock_page_or_retry(page, mm, flags); |
Rik van Riel | e709ffd | 2012-05-29 15:06:18 -0700 | [diff] [blame] | 2512 | |
Balbir Singh | 20a1022 | 2007-11-14 17:00:33 -0800 | [diff] [blame] | 2513 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 2514 | if (!locked) { |
| 2515 | ret |= VM_FAULT_RETRY; |
| 2516 | goto out_release; |
| 2517 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 | |
Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 2519 | /* |
Hugh Dickins | 31c4a3d | 2010-09-19 19:40:22 -0700 | [diff] [blame] | 2520 | * Make sure try_to_free_swap or reuse_swap_page or swapoff did not |
| 2521 | * release the swapcache from under us. The page pin, and pte_same |
| 2522 | * test below, are not enough to exclude that. Even if it is still |
| 2523 | * swapcache, we need to check that the page's swap has not changed. |
Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 2524 | */ |
Hugh Dickins | 31c4a3d | 2010-09-19 19:40:22 -0700 | [diff] [blame] | 2525 | if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) |
Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 2526 | goto out_page; |
| 2527 | |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2528 | page = ksm_might_need_to_copy(page, vma, address); |
| 2529 | if (unlikely(!page)) { |
| 2530 | ret = VM_FAULT_OOM; |
| 2531 | page = swapcache; |
Hugh Dickins | cbf86cf | 2013-02-22 16:35:08 -0800 | [diff] [blame] | 2532 | goto out_page; |
Hugh Dickins | 5ad6468 | 2009-12-14 17:59:24 -0800 | [diff] [blame] | 2533 | } |
| 2534 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2535 | if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) { |
KAMEZAWA Hiroyuki | 073e587 | 2008-10-18 20:28:08 -0700 | [diff] [blame] | 2536 | ret = VM_FAULT_OOM; |
Johannes Weiner | bc43f75 | 2009-04-30 15:08:08 -0700 | [diff] [blame] | 2537 | goto out_page; |
KAMEZAWA Hiroyuki | 073e587 | 2008-10-18 20:28:08 -0700 | [diff] [blame] | 2538 | } |
| 2539 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2540 | /* |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2541 | * Back out if somebody else already faulted in this pte. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2542 | */ |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2543 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
Hugh Dickins | 9e9bef0 | 2005-10-29 18:16:15 -0700 | [diff] [blame] | 2544 | if (unlikely(!pte_same(*page_table, orig_pte))) |
Kirill Korotaev | b810748 | 2005-05-16 21:53:50 -0700 | [diff] [blame] | 2545 | goto out_nomap; |
Kirill Korotaev | b810748 | 2005-05-16 21:53:50 -0700 | [diff] [blame] | 2546 | |
| 2547 | if (unlikely(!PageUptodate(page))) { |
| 2548 | ret = VM_FAULT_SIGBUS; |
| 2549 | goto out_nomap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2550 | } |
| 2551 | |
KAMEZAWA Hiroyuki | 8c7c6e34 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2552 | /* |
| 2553 | * The page isn't present yet, go ahead with the fault. |
| 2554 | * |
| 2555 | * Be careful about the sequence of operations here. |
| 2556 | * To get its accounting right, reuse_swap_page() must be called |
| 2557 | * while the page is counted on swap but not yet in mapcount i.e. |
| 2558 | * before page_add_anon_rmap() and swap_free(); try_to_free_swap() |
| 2559 | * must be called after the swap_free(), or it will never succeed. |
KAMEZAWA Hiroyuki | 8c7c6e34 | 2009-01-07 18:08:00 -0800 | [diff] [blame] | 2560 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2561 | |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 2562 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
KAMEZAWA Hiroyuki | b084d43 | 2010-03-05 13:41:42 -0800 | [diff] [blame] | 2563 | dec_mm_counter_fast(mm, MM_SWAPENTS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2564 | pte = mk_pte(page, vma->vm_page_prot); |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 2565 | if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2566 | pte = maybe_mkwrite(pte_mkdirty(pte), vma); |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 2567 | flags &= ~FAULT_FLAG_WRITE; |
Andrea Arcangeli | 9a5b489 | 2010-08-09 17:19:49 -0700 | [diff] [blame] | 2568 | ret |= VM_FAULT_WRITE; |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2569 | exclusive = RMAP_EXCLUSIVE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2570 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2571 | flush_icache_page(vma, page); |
Cyrill Gorcunov | 179ef71 | 2013-08-13 16:00:49 -0700 | [diff] [blame] | 2572 | if (pte_swp_soft_dirty(orig_pte)) |
| 2573 | pte = pte_mksoft_dirty(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2574 | set_pte_at(mm, address, page_table, pte); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2575 | if (page == swapcache) { |
Johannes Weiner | af34770 | 2013-02-22 16:32:20 -0800 | [diff] [blame] | 2576 | do_page_add_anon_rmap(page, vma, address, exclusive); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2577 | mem_cgroup_commit_charge(page, memcg, true, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2578 | } else { /* ksm created a completely new copy */ |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2579 | page_add_new_anon_rmap(page, vma, address, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2580 | mem_cgroup_commit_charge(page, memcg, false, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2581 | lru_cache_add_active_or_unevictable(page, vma); |
| 2582 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2583 | |
Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 2584 | swap_free(entry); |
Vladimir Davydov | 5ccc5ab | 2016-01-20 15:03:10 -0800 | [diff] [blame] | 2585 | if (mem_cgroup_swap_full(page) || |
| 2586 | (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) |
Hugh Dickins | a2c43ee | 2009-01-06 14:39:36 -0800 | [diff] [blame] | 2587 | try_to_free_swap(page); |
Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 2588 | unlock_page(page); |
Hugh Dickins | 56f3180 | 2013-02-22 16:36:10 -0800 | [diff] [blame] | 2589 | if (page != swapcache) { |
Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 2590 | /* |
| 2591 | * Hold the lock to avoid the swap entry to be reused |
| 2592 | * until we take the PT lock for the pte_same() check |
| 2593 | * (to avoid false positives from pte_same). For |
| 2594 | * further safety release the lock after the swap_free |
| 2595 | * so that the swap count won't change under a |
| 2596 | * parallel locked swapcache. |
| 2597 | */ |
| 2598 | unlock_page(swapcache); |
| 2599 | page_cache_release(swapcache); |
| 2600 | } |
Hugh Dickins | c475a8a | 2005-06-21 17:15:12 -0700 | [diff] [blame] | 2601 | |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 2602 | if (flags & FAULT_FLAG_WRITE) { |
Hugh Dickins | 61469f1 | 2008-03-04 14:29:04 -0800 | [diff] [blame] | 2603 | ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); |
| 2604 | if (ret & VM_FAULT_ERROR) |
| 2605 | ret &= VM_FAULT_ERROR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | goto out; |
| 2607 | } |
| 2608 | |
| 2609 | /* No need to invalidate - it was non-present before */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 2610 | update_mmu_cache(vma, address, page_table); |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2611 | unlock: |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2612 | pte_unmap_unlock(page_table, ptl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2613 | out: |
| 2614 | return ret; |
Kirill Korotaev | b810748 | 2005-05-16 21:53:50 -0700 | [diff] [blame] | 2615 | out_nomap: |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2616 | mem_cgroup_cancel_charge(page, memcg, false); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2617 | pte_unmap_unlock(page_table, ptl); |
Johannes Weiner | bc43f75 | 2009-04-30 15:08:08 -0700 | [diff] [blame] | 2618 | out_page: |
Kirill Korotaev | b810748 | 2005-05-16 21:53:50 -0700 | [diff] [blame] | 2619 | unlock_page(page); |
Andi Kleen | 4779cb3 | 2009-10-14 01:51:41 +0200 | [diff] [blame] | 2620 | out_release: |
Kirill Korotaev | b810748 | 2005-05-16 21:53:50 -0700 | [diff] [blame] | 2621 | page_cache_release(page); |
Hugh Dickins | 56f3180 | 2013-02-22 16:36:10 -0800 | [diff] [blame] | 2622 | if (page != swapcache) { |
Andrea Arcangeli | 4969c11 | 2010-09-09 16:37:52 -0700 | [diff] [blame] | 2623 | unlock_page(swapcache); |
| 2624 | page_cache_release(swapcache); |
| 2625 | } |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2626 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2627 | } |
| 2628 | |
| 2629 | /* |
Luck, Tony | 8ca3eb0 | 2010-08-24 11:44:18 -0700 | [diff] [blame] | 2630 | * This is like a special single-page "expand_{down|up}wards()", |
| 2631 | * except we must first make sure that 'address{-|+}PAGE_SIZE' |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2632 | * doesn't hit another vma. |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2633 | */ |
| 2634 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) |
| 2635 | { |
| 2636 | address &= PAGE_MASK; |
| 2637 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { |
Linus Torvalds | 0e8e50e | 2010-08-20 16:49:40 -0700 | [diff] [blame] | 2638 | struct vm_area_struct *prev = vma->vm_prev; |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2639 | |
Linus Torvalds | 0e8e50e | 2010-08-20 16:49:40 -0700 | [diff] [blame] | 2640 | /* |
| 2641 | * Is there a mapping abutting this one below? |
| 2642 | * |
| 2643 | * That's only ok if it's the same stack mapping |
| 2644 | * that has gotten split.. |
| 2645 | */ |
| 2646 | if (prev && prev->vm_end == address) |
| 2647 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; |
| 2648 | |
Linus Torvalds | fee7e49 | 2015-01-06 13:00:05 -0800 | [diff] [blame] | 2649 | return expand_downwards(vma, address - PAGE_SIZE); |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2650 | } |
Luck, Tony | 8ca3eb0 | 2010-08-24 11:44:18 -0700 | [diff] [blame] | 2651 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { |
| 2652 | struct vm_area_struct *next = vma->vm_next; |
| 2653 | |
| 2654 | /* As VM_GROWSDOWN but s/below/above/ */ |
| 2655 | if (next && next->vm_start == address + PAGE_SIZE) |
| 2656 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; |
| 2657 | |
Linus Torvalds | fee7e49 | 2015-01-06 13:00:05 -0800 | [diff] [blame] | 2658 | return expand_upwards(vma, address + PAGE_SIZE); |
Luck, Tony | 8ca3eb0 | 2010-08-24 11:44:18 -0700 | [diff] [blame] | 2659 | } |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2660 | return 0; |
| 2661 | } |
| 2662 | |
| 2663 | /* |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2664 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
| 2665 | * but allow concurrent faults), and pte mapped but not yet locked. |
| 2666 | * We return with mmap_sem still held, but pte unmapped and unlocked. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2667 | */ |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2668 | static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2669 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 2670 | unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2671 | { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2672 | struct mem_cgroup *memcg; |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2673 | struct page *page; |
| 2674 | spinlock_t *ptl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | pte_t entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2676 | |
Linus Torvalds | 11ac552 | 2010-08-14 11:44:56 -0700 | [diff] [blame] | 2677 | pte_unmap(page_table); |
Linus Torvalds | 320b2b8 | 2010-08-12 17:54:33 -0700 | [diff] [blame] | 2678 | |
Kirill A. Shutemov | 6b7339f | 2015-07-06 23:18:37 +0300 | [diff] [blame] | 2679 | /* File mapping without ->vm_ops ? */ |
| 2680 | if (vma->vm_flags & VM_SHARED) |
| 2681 | return VM_FAULT_SIGBUS; |
| 2682 | |
Linus Torvalds | 11ac552 | 2010-08-14 11:44:56 -0700 | [diff] [blame] | 2683 | /* Check if we need to add a guard page to the stack */ |
| 2684 | if (check_stack_guard_page(vma, address) < 0) |
Linus Torvalds | 9c145c56 | 2015-01-29 11:15:17 -0800 | [diff] [blame] | 2685 | return VM_FAULT_SIGSEGV; |
Linus Torvalds | 11ac552 | 2010-08-14 11:44:56 -0700 | [diff] [blame] | 2686 | |
| 2687 | /* Use the zero-page for reads */ |
Dominik Dingel | 593befa | 2014-10-23 12:07:44 +0200 | [diff] [blame] | 2688 | if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { |
Hugh Dickins | 62eede6 | 2009-09-21 17:03:34 -0700 | [diff] [blame] | 2689 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
| 2690 | vma->vm_page_prot)); |
Linus Torvalds | 11ac552 | 2010-08-14 11:44:56 -0700 | [diff] [blame] | 2691 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 2692 | if (!pte_none(*page_table)) |
| 2693 | goto unlock; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 2694 | /* Deliver the page fault to userland, check inside PT lock */ |
| 2695 | if (userfaultfd_missing(vma)) { |
| 2696 | pte_unmap_unlock(page_table, ptl); |
| 2697 | return handle_userfault(vma, address, flags, |
| 2698 | VM_UFFD_MISSING); |
| 2699 | } |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 2700 | goto setpte; |
| 2701 | } |
| 2702 | |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2703 | /* Allocate our own private page. */ |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2704 | if (unlikely(anon_vma_prepare(vma))) |
| 2705 | goto oom; |
| 2706 | page = alloc_zeroed_user_highpage_movable(vma, address); |
| 2707 | if (!page) |
| 2708 | goto oom; |
Mel Gorman | eb3c24f | 2015-06-24 16:57:27 -0700 | [diff] [blame] | 2709 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2710 | if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) |
Mel Gorman | eb3c24f | 2015-06-24 16:57:27 -0700 | [diff] [blame] | 2711 | goto oom_free_page; |
| 2712 | |
Minchan Kim | 52f3762 | 2013-04-29 15:08:15 -0700 | [diff] [blame] | 2713 | /* |
| 2714 | * The memory barrier inside __SetPageUptodate makes sure that |
| 2715 | * preceeding stores to the page contents become visible before |
| 2716 | * the set_pte_at() write. |
| 2717 | */ |
Nick Piggin | 0ed361d | 2008-02-04 22:29:34 -0800 | [diff] [blame] | 2718 | __SetPageUptodate(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2719 | |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2720 | entry = mk_pte(page, vma->vm_page_prot); |
Hugh Dickins | 1ac0cb5 | 2009-09-21 17:03:29 -0700 | [diff] [blame] | 2721 | if (vma->vm_flags & VM_WRITE) |
| 2722 | entry = pte_mkwrite(pte_mkdirty(entry)); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2723 | |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2724 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
Andrea Arcangeli | 1c2fb7a | 2009-09-21 17:02:22 -0700 | [diff] [blame] | 2725 | if (!pte_none(*page_table)) |
Nick Piggin | 557ed1f | 2007-10-16 01:24:40 -0700 | [diff] [blame] | 2726 | goto release; |
Hugh Dickins | 9ba6929 | 2009-09-21 17:02:20 -0700 | [diff] [blame] | 2727 | |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 2728 | /* Deliver the page fault to userland, check inside PT lock */ |
| 2729 | if (userfaultfd_missing(vma)) { |
| 2730 | pte_unmap_unlock(page_table, ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2731 | mem_cgroup_cancel_charge(page, memcg, false); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 2732 | page_cache_release(page); |
| 2733 | return handle_userfault(vma, address, flags, |
| 2734 | VM_UFFD_MISSING); |
| 2735 | } |
| 2736 | |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 2737 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2738 | page_add_new_anon_rmap(page, vma, address, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2739 | mem_cgroup_commit_charge(page, memcg, false, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2740 | lru_cache_add_active_or_unevictable(page, vma); |
Hugh Dickins | a13ea5b | 2009-09-21 17:03:30 -0700 | [diff] [blame] | 2741 | setpte: |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2742 | set_pte_at(mm, address, page_table, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2743 | |
| 2744 | /* No need to invalidate - it was non-present before */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 2745 | update_mmu_cache(vma, address, page_table); |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2746 | unlock: |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2747 | pte_unmap_unlock(page_table, ptl); |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 2748 | return 0; |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2749 | release: |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2750 | mem_cgroup_cancel_charge(page, memcg, false); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 2751 | page_cache_release(page); |
| 2752 | goto unlock; |
Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 2753 | oom_free_page: |
Hugh Dickins | 6dbf6d3 | 2008-03-04 14:29:04 -0800 | [diff] [blame] | 2754 | page_cache_release(page); |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 2755 | oom: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2756 | return VM_FAULT_OOM; |
| 2757 | } |
| 2758 | |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 2759 | /* |
| 2760 | * The mmap_sem must have been held on entry, and may have been |
| 2761 | * released depending on flags and vma->vm_ops->fault() return value. |
| 2762 | * See filemap_fault() and __lock_page_retry(). |
| 2763 | */ |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 2764 | static int __do_fault(struct vm_area_struct *vma, unsigned long address, |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 2765 | pgoff_t pgoff, unsigned int flags, |
| 2766 | struct page *cow_page, struct page **page) |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 2767 | { |
| 2768 | struct vm_fault vmf; |
| 2769 | int ret; |
| 2770 | |
| 2771 | vmf.virtual_address = (void __user *)(address & PAGE_MASK); |
| 2772 | vmf.pgoff = pgoff; |
| 2773 | vmf.flags = flags; |
| 2774 | vmf.page = NULL; |
Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2775 | vmf.gfp_mask = __get_fault_gfp_mask(vma); |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 2776 | vmf.cow_page = cow_page; |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 2777 | |
| 2778 | ret = vma->vm_ops->fault(vma, &vmf); |
| 2779 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
| 2780 | return ret; |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 2781 | if (!vmf.page) |
| 2782 | goto out; |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 2783 | |
| 2784 | if (unlikely(PageHWPoison(vmf.page))) { |
| 2785 | if (ret & VM_FAULT_LOCKED) |
| 2786 | unlock_page(vmf.page); |
| 2787 | page_cache_release(vmf.page); |
| 2788 | return VM_FAULT_HWPOISON; |
| 2789 | } |
| 2790 | |
| 2791 | if (unlikely(!(ret & VM_FAULT_LOCKED))) |
| 2792 | lock_page(vmf.page); |
| 2793 | else |
| 2794 | VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page); |
| 2795 | |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 2796 | out: |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 2797 | *page = vmf.page; |
| 2798 | return ret; |
| 2799 | } |
| 2800 | |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2801 | /** |
| 2802 | * do_set_pte - setup new PTE entry for given page and add reverse page mapping. |
| 2803 | * |
| 2804 | * @vma: virtual memory area |
| 2805 | * @address: user virtual address |
| 2806 | * @page: page to map |
| 2807 | * @pte: pointer to target page table entry |
| 2808 | * @write: true, if new entry is writable |
| 2809 | * @anon: true, if it's anonymous page |
| 2810 | * |
| 2811 | * Caller must hold page table lock relevant for @pte. |
| 2812 | * |
| 2813 | * Target users are page handler itself and implementations of |
| 2814 | * vm_ops->map_pages. |
| 2815 | */ |
| 2816 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2817 | struct page *page, pte_t *pte, bool write, bool anon) |
| 2818 | { |
| 2819 | pte_t entry; |
| 2820 | |
| 2821 | flush_icache_page(vma, page); |
| 2822 | entry = mk_pte(page, vma->vm_page_prot); |
| 2823 | if (write) |
| 2824 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2825 | if (anon) { |
| 2826 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2827 | page_add_new_anon_rmap(page, vma, address, false); |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2828 | } else { |
Jerome Marchand | eca56ff | 2016-01-14 15:19:26 -0800 | [diff] [blame] | 2829 | inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2830 | page_add_file_rmap(page); |
| 2831 | } |
| 2832 | set_pte_at(vma->vm_mm, address, pte, entry); |
| 2833 | |
| 2834 | /* no need to invalidate: a not-present page won't be cached */ |
| 2835 | update_mmu_cache(vma, address, pte); |
| 2836 | } |
| 2837 | |
Kirill A. Shutemov | 3a91053 | 2014-08-06 16:08:07 -0700 | [diff] [blame] | 2838 | static unsigned long fault_around_bytes __read_mostly = |
| 2839 | rounddown_pow_of_two(65536); |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2840 | |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2841 | #ifdef CONFIG_DEBUG_FS |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2842 | static int fault_around_bytes_get(void *data, u64 *val) |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2843 | { |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2844 | *val = fault_around_bytes; |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2845 | return 0; |
| 2846 | } |
| 2847 | |
Andrey Ryabinin | b4903d6 | 2014-07-30 16:08:35 -0700 | [diff] [blame] | 2848 | /* |
| 2849 | * fault_around_pages() and fault_around_mask() expects fault_around_bytes |
| 2850 | * rounded down to nearest page order. It's what do_fault_around() expects to |
| 2851 | * see. |
| 2852 | */ |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2853 | static int fault_around_bytes_set(void *data, u64 val) |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2854 | { |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2855 | if (val / PAGE_SIZE > PTRS_PER_PTE) |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2856 | return -EINVAL; |
Andrey Ryabinin | b4903d6 | 2014-07-30 16:08:35 -0700 | [diff] [blame] | 2857 | if (val > PAGE_SIZE) |
| 2858 | fault_around_bytes = rounddown_pow_of_two(val); |
| 2859 | else |
| 2860 | fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */ |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2861 | return 0; |
| 2862 | } |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2863 | DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops, |
| 2864 | fault_around_bytes_get, fault_around_bytes_set, "%llu\n"); |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2865 | |
| 2866 | static int __init fault_around_debugfs(void) |
| 2867 | { |
| 2868 | void *ret; |
| 2869 | |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2870 | ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL, |
| 2871 | &fault_around_bytes_fops); |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2872 | if (!ret) |
Kirill A. Shutemov | a9b0f86 | 2014-06-04 16:10:54 -0700 | [diff] [blame] | 2873 | pr_warn("Failed to create fault_around_bytes in debugfs"); |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2874 | return 0; |
| 2875 | } |
| 2876 | late_initcall(fault_around_debugfs); |
Kirill A. Shutemov | 1592eef | 2014-04-07 15:37:22 -0700 | [diff] [blame] | 2877 | #endif |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2878 | |
Kirill A. Shutemov | 1fdb412 | 2014-06-04 16:10:55 -0700 | [diff] [blame] | 2879 | /* |
| 2880 | * do_fault_around() tries to map few pages around the fault address. The hope |
| 2881 | * is that the pages will be needed soon and this will lower the number of |
| 2882 | * faults to handle. |
| 2883 | * |
| 2884 | * It uses vm_ops->map_pages() to map the pages, which skips the page if it's |
| 2885 | * not ready to be mapped: not up-to-date, locked, etc. |
| 2886 | * |
| 2887 | * This function is called with the page table lock taken. In the split ptlock |
| 2888 | * case the page table lock only protects only those entries which belong to |
| 2889 | * the page table corresponding to the fault address. |
| 2890 | * |
| 2891 | * This function doesn't cross the VMA boundaries, in order to call map_pages() |
| 2892 | * only once. |
| 2893 | * |
| 2894 | * fault_around_pages() defines how many pages we'll try to map. |
| 2895 | * do_fault_around() expects it to return a power of two less than or equal to |
| 2896 | * PTRS_PER_PTE. |
| 2897 | * |
| 2898 | * The virtual address of the area that we map is naturally aligned to the |
| 2899 | * fault_around_pages() value (and therefore to page order). This way it's |
| 2900 | * easier to guarantee that we don't cross page table boundaries. |
| 2901 | */ |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2902 | static void do_fault_around(struct vm_area_struct *vma, unsigned long address, |
| 2903 | pte_t *pte, pgoff_t pgoff, unsigned int flags) |
| 2904 | { |
Kirill A. Shutemov | aecd6f4 | 2014-08-06 16:08:05 -0700 | [diff] [blame] | 2905 | unsigned long start_addr, nr_pages, mask; |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2906 | pgoff_t max_pgoff; |
| 2907 | struct vm_fault vmf; |
| 2908 | int off; |
| 2909 | |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 2910 | nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; |
Kirill A. Shutemov | aecd6f4 | 2014-08-06 16:08:05 -0700 | [diff] [blame] | 2911 | mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; |
| 2912 | |
| 2913 | start_addr = max(address & mask, vma->vm_start); |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2914 | off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
| 2915 | pte -= off; |
| 2916 | pgoff -= off; |
| 2917 | |
| 2918 | /* |
| 2919 | * max_pgoff is either end of page table or end of vma |
Kirill A. Shutemov | 850e9c6 | 2014-06-04 16:10:45 -0700 | [diff] [blame] | 2920 | * or fault_around_pages() from pgoff, depending what is nearest. |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2921 | */ |
| 2922 | max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + |
| 2923 | PTRS_PER_PTE - 1; |
| 2924 | max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, |
Kirill A. Shutemov | aecd6f4 | 2014-08-06 16:08:05 -0700 | [diff] [blame] | 2925 | pgoff + nr_pages - 1); |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2926 | |
| 2927 | /* Check if it makes any sense to call ->map_pages */ |
| 2928 | while (!pte_none(*pte)) { |
| 2929 | if (++pgoff > max_pgoff) |
| 2930 | return; |
| 2931 | start_addr += PAGE_SIZE; |
| 2932 | if (start_addr >= vma->vm_end) |
| 2933 | return; |
| 2934 | pte++; |
| 2935 | } |
| 2936 | |
| 2937 | vmf.virtual_address = (void __user *) start_addr; |
| 2938 | vmf.pte = pte; |
| 2939 | vmf.pgoff = pgoff; |
| 2940 | vmf.max_pgoff = max_pgoff; |
| 2941 | vmf.flags = flags; |
Michal Hocko | c20cd45 | 2016-01-14 15:20:12 -0800 | [diff] [blame] | 2942 | vmf.gfp_mask = __get_fault_gfp_mask(vma); |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2943 | vma->vm_ops->map_pages(vma, &vmf); |
| 2944 | } |
| 2945 | |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 2946 | static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2947 | unsigned long address, pmd_t *pmd, |
| 2948 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) |
| 2949 | { |
| 2950 | struct page *fault_page; |
| 2951 | spinlock_t *ptl; |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2952 | pte_t *pte; |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2953 | int ret = 0; |
| 2954 | |
| 2955 | /* |
| 2956 | * Let's call ->map_pages() first and use ->fault() as fallback |
| 2957 | * if page by the offset is not ready to be mapped (cold cache or |
| 2958 | * something). |
| 2959 | */ |
Kirill A. Shutemov | 9b4bdd2 | 2015-02-10 14:09:51 -0800 | [diff] [blame] | 2960 | if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2961 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 2962 | do_fault_around(vma, address, pte, pgoff, flags); |
| 2963 | if (!pte_same(*pte, orig_pte)) |
| 2964 | goto unlock_out; |
| 2965 | pte_unmap_unlock(pte, ptl); |
| 2966 | } |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 2967 | |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 2968 | ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 2969 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
| 2970 | return ret; |
| 2971 | |
| 2972 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 2973 | if (unlikely(!pte_same(*pte, orig_pte))) { |
| 2974 | pte_unmap_unlock(pte, ptl); |
| 2975 | unlock_page(fault_page); |
| 2976 | page_cache_release(fault_page); |
| 2977 | return ret; |
| 2978 | } |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2979 | do_set_pte(vma, address, fault_page, pte, false, false); |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 2980 | unlock_page(fault_page); |
Kirill A. Shutemov | 8c6e50b | 2014-04-07 15:37:18 -0700 | [diff] [blame] | 2981 | unlock_out: |
| 2982 | pte_unmap_unlock(pte, ptl); |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 2983 | return ret; |
| 2984 | } |
| 2985 | |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 2986 | static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2987 | unsigned long address, pmd_t *pmd, |
| 2988 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) |
| 2989 | { |
| 2990 | struct page *fault_page, *new_page; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2991 | struct mem_cgroup *memcg; |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 2992 | spinlock_t *ptl; |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 2993 | pte_t *pte; |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 2994 | int ret; |
| 2995 | |
| 2996 | if (unlikely(anon_vma_prepare(vma))) |
| 2997 | return VM_FAULT_OOM; |
| 2998 | |
| 2999 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
| 3000 | if (!new_page) |
| 3001 | return VM_FAULT_OOM; |
| 3002 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 3003 | if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3004 | page_cache_release(new_page); |
| 3005 | return VM_FAULT_OOM; |
| 3006 | } |
| 3007 | |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3008 | ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3009 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
| 3010 | goto uncharge_out; |
| 3011 | |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3012 | if (fault_page) |
| 3013 | copy_user_highpage(new_page, fault_page, address, vma); |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3014 | __SetPageUptodate(new_page); |
| 3015 | |
| 3016 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 3017 | if (unlikely(!pte_same(*pte, orig_pte))) { |
| 3018 | pte_unmap_unlock(pte, ptl); |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3019 | if (fault_page) { |
| 3020 | unlock_page(fault_page); |
| 3021 | page_cache_release(fault_page); |
| 3022 | } else { |
| 3023 | /* |
| 3024 | * The fault handler has no page to lock, so it holds |
Yigal Korman | 0df9d41 | 2015-11-16 14:09:15 +0200 | [diff] [blame] | 3025 | * i_mmap_lock for read to protect against truncate. |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3026 | */ |
Yigal Korman | 0df9d41 | 2015-11-16 14:09:15 +0200 | [diff] [blame] | 3027 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3028 | } |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3029 | goto uncharge_out; |
| 3030 | } |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 3031 | do_set_pte(vma, address, new_page, pte, true, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 3032 | mem_cgroup_commit_charge(new_page, memcg, false, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 3033 | lru_cache_add_active_or_unevictable(new_page, vma); |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3034 | pte_unmap_unlock(pte, ptl); |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3035 | if (fault_page) { |
| 3036 | unlock_page(fault_page); |
| 3037 | page_cache_release(fault_page); |
| 3038 | } else { |
| 3039 | /* |
| 3040 | * The fault handler has no page to lock, so it holds |
Yigal Korman | 0df9d41 | 2015-11-16 14:09:15 +0200 | [diff] [blame] | 3041 | * i_mmap_lock for read to protect against truncate. |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3042 | */ |
Yigal Korman | 0df9d41 | 2015-11-16 14:09:15 +0200 | [diff] [blame] | 3043 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3044 | } |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3045 | return ret; |
| 3046 | uncharge_out: |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 3047 | mem_cgroup_cancel_charge(new_page, memcg, false); |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3048 | page_cache_release(new_page); |
| 3049 | return ret; |
| 3050 | } |
| 3051 | |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3052 | static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
Hugh Dickins | 16abfa0 | 2007-10-04 16:56:06 +0100 | [diff] [blame] | 3053 | unsigned long address, pmd_t *pmd, |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3054 | pgoff_t pgoff, unsigned int flags, pte_t orig_pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3055 | { |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3056 | struct page *fault_page; |
| 3057 | struct address_space *mapping; |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 3058 | spinlock_t *ptl; |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 3059 | pte_t *pte; |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3060 | int dirtied = 0; |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3061 | int ret, tmp; |
KAMEZAWA Hiroyuki | 1d65f86 | 2011-07-25 17:12:27 -0700 | [diff] [blame] | 3062 | |
Matthew Wilcox | 2e4cdab | 2015-02-16 15:58:50 -0800 | [diff] [blame] | 3063 | ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); |
Kirill A. Shutemov | 7eae74a | 2014-04-03 14:48:10 -0700 | [diff] [blame] | 3064 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3065 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3066 | |
| 3067 | /* |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3068 | * Check if the backing address space wants to know that the page is |
| 3069 | * about to become writable |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3070 | */ |
Kirill A. Shutemov | fb09a46 | 2014-04-03 14:48:15 -0700 | [diff] [blame] | 3071 | if (vma->vm_ops->page_mkwrite) { |
| 3072 | unlock_page(fault_page); |
| 3073 | tmp = do_page_mkwrite(vma, fault_page, address); |
| 3074 | if (unlikely(!tmp || |
| 3075 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
| 3076 | page_cache_release(fault_page); |
| 3077 | return tmp; |
| 3078 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3079 | } |
| 3080 | |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3081 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 3082 | if (unlikely(!pte_same(*pte, orig_pte))) { |
| 3083 | pte_unmap_unlock(pte, ptl); |
| 3084 | unlock_page(fault_page); |
| 3085 | page_cache_release(fault_page); |
| 3086 | return ret; |
Peter Zijlstra | d08b385 | 2006-09-25 23:30:57 -0700 | [diff] [blame] | 3087 | } |
Kirill A. Shutemov | 3bb9779 | 2014-04-03 14:48:16 -0700 | [diff] [blame] | 3088 | do_set_pte(vma, address, fault_page, pte, true, false); |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3089 | pte_unmap_unlock(pte, ptl); |
| 3090 | |
| 3091 | if (set_page_dirty(fault_page)) |
| 3092 | dirtied = 1; |
Andrew Morton | d82fa87 | 2014-12-18 16:17:29 -0800 | [diff] [blame] | 3093 | /* |
| 3094 | * Take a local copy of the address_space - page.mapping may be zeroed |
| 3095 | * by truncate after unlock_page(). The address_space itself remains |
| 3096 | * pinned by vma->vm_file's reference. We rely on unlock_page()'s |
| 3097 | * release semantics to prevent the compiler from undoing this copying. |
| 3098 | */ |
Kirill A. Shutemov | 1c290f6 | 2016-01-15 16:52:07 -0800 | [diff] [blame] | 3099 | mapping = page_rmapping(fault_page); |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3100 | unlock_page(fault_page); |
| 3101 | if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { |
| 3102 | /* |
| 3103 | * Some device drivers do not set page.mapping but still |
| 3104 | * dirty their pages |
| 3105 | */ |
| 3106 | balance_dirty_pages_ratelimited(mapping); |
KAMEZAWA Hiroyuki | 1d65f86 | 2011-07-25 17:12:27 -0700 | [diff] [blame] | 3107 | } |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3108 | |
Johannes Weiner | 74ec675 | 2015-02-10 14:11:28 -0800 | [diff] [blame] | 3109 | if (!vma->vm_ops->page_mkwrite) |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3110 | file_update_time(vma->vm_file); |
| 3111 | |
KAMEZAWA Hiroyuki | 1d65f86 | 2011-07-25 17:12:27 -0700 | [diff] [blame] | 3112 | return ret; |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3113 | } |
Nick Piggin | d00806b | 2007-07-19 01:46:57 -0700 | [diff] [blame] | 3114 | |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 3115 | /* |
| 3116 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
| 3117 | * but allow concurrent faults). |
| 3118 | * The mmap_sem may have been released depending on flags and our |
| 3119 | * return value. See filemap_fault() and __lock_page_or_retry(). |
| 3120 | */ |
Kirill A. Shutemov | 9b4bdd2 | 2015-02-10 14:09:51 -0800 | [diff] [blame] | 3121 | static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3122 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3123 | unsigned int flags, pte_t orig_pte) |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3124 | { |
| 3125 | pgoff_t pgoff = (((address & PAGE_MASK) |
Dean Nelson | 0da7e01 | 2007-10-16 01:24:45 -0700 | [diff] [blame] | 3126 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3127 | |
Hugh Dickins | 16abfa0 | 2007-10-04 16:56:06 +0100 | [diff] [blame] | 3128 | pte_unmap(page_table); |
Kirill A. Shutemov | 6b7339f | 2015-07-06 23:18:37 +0300 | [diff] [blame] | 3129 | /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ |
| 3130 | if (!vma->vm_ops->fault) |
| 3131 | return VM_FAULT_SIGBUS; |
Kirill A. Shutemov | e655fb2 | 2014-04-03 14:48:11 -0700 | [diff] [blame] | 3132 | if (!(flags & FAULT_FLAG_WRITE)) |
| 3133 | return do_read_fault(mm, vma, address, pmd, pgoff, flags, |
| 3134 | orig_pte); |
Kirill A. Shutemov | ec47c3b | 2014-04-03 14:48:12 -0700 | [diff] [blame] | 3135 | if (!(vma->vm_flags & VM_SHARED)) |
| 3136 | return do_cow_fault(mm, vma, address, pmd, pgoff, flags, |
| 3137 | orig_pte); |
Kirill A. Shutemov | f0c6d4d | 2014-04-03 14:48:13 -0700 | [diff] [blame] | 3138 | return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); |
Nick Piggin | 54cb882 | 2007-07-19 01:46:59 -0700 | [diff] [blame] | 3139 | } |
| 3140 | |
Rashika Kheria | b19a993 | 2014-04-03 14:48:02 -0700 | [diff] [blame] | 3141 | static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 3142 | unsigned long addr, int page_nid, |
| 3143 | int *flags) |
Mel Gorman | 9532fec | 2012-11-15 01:24:32 +0000 | [diff] [blame] | 3144 | { |
| 3145 | get_page(page); |
| 3146 | |
| 3147 | count_vm_numa_event(NUMA_HINT_FAULTS); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 3148 | if (page_nid == numa_node_id()) { |
Mel Gorman | 9532fec | 2012-11-15 01:24:32 +0000 | [diff] [blame] | 3149 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 3150 | *flags |= TNF_FAULT_LOCAL; |
| 3151 | } |
Mel Gorman | 9532fec | 2012-11-15 01:24:32 +0000 | [diff] [blame] | 3152 | |
| 3153 | return mpol_misplaced(page, vma, addr); |
| 3154 | } |
| 3155 | |
Rashika Kheria | b19a993 | 2014-04-03 14:48:02 -0700 | [diff] [blame] | 3156 | static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3157 | unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) |
| 3158 | { |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3159 | struct page *page = NULL; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3160 | spinlock_t *ptl; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 3161 | int page_nid = -1; |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 3162 | int last_cpupid; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 3163 | int target_nid; |
Mel Gorman | b8593bf | 2012-11-21 01:18:23 +0000 | [diff] [blame] | 3164 | bool migrated = false; |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 3165 | bool was_writable = pte_write(pte); |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3166 | int flags = 0; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3167 | |
Mel Gorman | c0e7cad | 2015-02-12 14:58:41 -0800 | [diff] [blame] | 3168 | /* A PROT_NONE fault should not end up here */ |
| 3169 | BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); |
| 3170 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3171 | /* |
| 3172 | * The "pte" at this point cannot be used safely without |
| 3173 | * validation through pte_unmap_same(). It's of NUMA type but |
| 3174 | * the pfn may be screwed if the read is non atomic. |
| 3175 | * |
Mel Gorman | 4d94246 | 2015-02-12 14:58:28 -0800 | [diff] [blame] | 3176 | * We can safely just do a "set_pte_at()", because the old |
| 3177 | * page table entry is not accessible, so there would be no |
| 3178 | * concurrent hardware modifications to the PTE. |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3179 | */ |
| 3180 | ptl = pte_lockptr(mm, pmd); |
| 3181 | spin_lock(ptl); |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3182 | if (unlikely(!pte_same(*ptep, pte))) { |
| 3183 | pte_unmap_unlock(ptep, ptl); |
| 3184 | goto out; |
| 3185 | } |
| 3186 | |
Mel Gorman | 4d94246 | 2015-02-12 14:58:28 -0800 | [diff] [blame] | 3187 | /* Make it present again */ |
| 3188 | pte = pte_modify(pte, vma->vm_page_prot); |
| 3189 | pte = pte_mkyoung(pte); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 3190 | if (was_writable) |
| 3191 | pte = pte_mkwrite(pte); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3192 | set_pte_at(mm, addr, ptep, pte); |
| 3193 | update_mmu_cache(vma, addr, ptep); |
| 3194 | |
| 3195 | page = vm_normal_page(vma, addr, pte); |
| 3196 | if (!page) { |
| 3197 | pte_unmap_unlock(ptep, ptl); |
| 3198 | return 0; |
| 3199 | } |
| 3200 | |
Kirill A. Shutemov | e81c480 | 2016-01-15 16:53:49 -0800 | [diff] [blame] | 3201 | /* TODO: handle PTE-mapped THP */ |
| 3202 | if (PageCompound(page)) { |
| 3203 | pte_unmap_unlock(ptep, ptl); |
| 3204 | return 0; |
| 3205 | } |
| 3206 | |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3207 | /* |
Mel Gorman | bea66fb | 2015-03-25 15:55:37 -0700 | [diff] [blame] | 3208 | * Avoid grouping on RO pages in general. RO pages shouldn't hurt as |
| 3209 | * much anyway since they can be in shared cache state. This misses |
| 3210 | * the case where a mapping is writable but the process never writes |
| 3211 | * to it but pte_write gets cleared during protection updates and |
| 3212 | * pte_dirty has unpredictable behaviour between PTE scan updates, |
| 3213 | * background writeback, dirty balancing and application behaviour. |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3214 | */ |
Mel Gorman | bea66fb | 2015-03-25 15:55:37 -0700 | [diff] [blame] | 3215 | if (!(vma->vm_flags & VM_WRITE)) |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3216 | flags |= TNF_NO_GROUP; |
| 3217 | |
Rik van Riel | dabe1d9 | 2013-10-07 11:29:34 +0100 | [diff] [blame] | 3218 | /* |
| 3219 | * Flag if the page is shared between multiple address spaces. This |
| 3220 | * is later used when determining whether to group tasks together |
| 3221 | */ |
| 3222 | if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) |
| 3223 | flags |= TNF_SHARED; |
| 3224 | |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 3225 | last_cpupid = page_cpupid_last(page); |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 3226 | page_nid = page_to_nid(page); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 3227 | target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3228 | pte_unmap_unlock(ptep, ptl); |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3229 | if (target_nid == -1) { |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3230 | put_page(page); |
| 3231 | goto out; |
| 3232 | } |
| 3233 | |
| 3234 | /* Migrate to the requested node */ |
Mel Gorman | 1bc115d | 2013-10-07 11:29:05 +0100 | [diff] [blame] | 3235 | migrated = migrate_misplaced_page(page, vma, target_nid); |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3236 | if (migrated) { |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 3237 | page_nid = target_nid; |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3238 | flags |= TNF_MIGRATED; |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 3239 | } else |
| 3240 | flags |= TNF_MIGRATE_FAIL; |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3241 | |
| 3242 | out: |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 3243 | if (page_nid != -1) |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 3244 | task_numa_fault(last_cpupid, page_nid, 1, flags); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3245 | return 0; |
| 3246 | } |
| 3247 | |
Matthew Wilcox | b96375f | 2015-09-08 14:58:48 -0700 | [diff] [blame] | 3248 | static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3249 | unsigned long address, pmd_t *pmd, unsigned int flags) |
| 3250 | { |
Kirill A. Shutemov | fb6dd5f | 2015-09-09 15:39:35 -0700 | [diff] [blame] | 3251 | if (vma_is_anonymous(vma)) |
Matthew Wilcox | b96375f | 2015-09-08 14:58:48 -0700 | [diff] [blame] | 3252 | return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); |
| 3253 | if (vma->vm_ops->pmd_fault) |
| 3254 | return vma->vm_ops->pmd_fault(vma, address, pmd, flags); |
| 3255 | return VM_FAULT_FALLBACK; |
| 3256 | } |
| 3257 | |
| 3258 | static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3259 | unsigned long address, pmd_t *pmd, pmd_t orig_pmd, |
| 3260 | unsigned int flags) |
| 3261 | { |
Kirill A. Shutemov | fb6dd5f | 2015-09-09 15:39:35 -0700 | [diff] [blame] | 3262 | if (vma_is_anonymous(vma)) |
Matthew Wilcox | b96375f | 2015-09-08 14:58:48 -0700 | [diff] [blame] | 3263 | return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); |
| 3264 | if (vma->vm_ops->pmd_fault) |
| 3265 | return vma->vm_ops->pmd_fault(vma, address, pmd, flags); |
| 3266 | return VM_FAULT_FALLBACK; |
| 3267 | } |
| 3268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3269 | /* |
| 3270 | * These routines also need to handle stuff like marking pages dirty |
| 3271 | * and/or accessed for architectures that don't do it in hardware (most |
| 3272 | * RISC architectures). The early dirtying is also good on the i386. |
| 3273 | * |
| 3274 | * There is also a hook called "update_mmu_cache()" that architectures |
| 3275 | * with external mmu caches can use to update those (ie the Sparc or |
| 3276 | * PowerPC hashed page tables that act as extended TLBs). |
| 3277 | * |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3278 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
| 3279 | * but allow concurrent faults), and pte mapped but not yet locked. |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 3280 | * We return with pte unmapped and unlocked. |
| 3281 | * |
| 3282 | * The mmap_sem may have been released depending on flags and our |
| 3283 | * return value. See filemap_fault() and __lock_page_or_retry(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3284 | */ |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 3285 | static int handle_pte_fault(struct mm_struct *mm, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3286 | struct vm_area_struct *vma, unsigned long address, |
| 3287 | pte_t *pte, pmd_t *pmd, unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3288 | { |
| 3289 | pte_t entry; |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 3290 | spinlock_t *ptl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3291 | |
Christian Borntraeger | e37c698 | 2014-12-07 21:41:33 +0100 | [diff] [blame] | 3292 | /* |
| 3293 | * some architectures can have larger ptes than wordsize, |
| 3294 | * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y, |
| 3295 | * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses. |
| 3296 | * The code below just needs a consistent view for the ifs and |
| 3297 | * we later double check anyway with the ptl lock held. So here |
| 3298 | * a barrier will do. |
| 3299 | */ |
| 3300 | entry = *pte; |
| 3301 | barrier(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3302 | if (!pte_present(entry)) { |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 3303 | if (pte_none(entry)) { |
Oleg Nesterov | b533062 | 2015-09-08 14:58:28 -0700 | [diff] [blame] | 3304 | if (vma_is_anonymous(vma)) |
| 3305 | return do_anonymous_page(mm, vma, address, |
| 3306 | pte, pmd, flags); |
| 3307 | else |
Kirill A. Shutemov | 6b7339f | 2015-07-06 23:18:37 +0300 | [diff] [blame] | 3308 | return do_fault(mm, vma, address, pte, pmd, |
| 3309 | flags, entry); |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 3310 | } |
Hugh Dickins | 65500d2 | 2005-10-29 18:15:59 -0700 | [diff] [blame] | 3311 | return do_swap_page(mm, vma, address, |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3312 | pte, pmd, flags, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3313 | } |
| 3314 | |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 3315 | if (pte_protnone(entry)) |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3316 | return do_numa_page(mm, vma, address, entry, pte, pmd); |
| 3317 | |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 3318 | ptl = pte_lockptr(mm, pmd); |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 3319 | spin_lock(ptl); |
| 3320 | if (unlikely(!pte_same(*pte, entry))) |
| 3321 | goto unlock; |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3322 | if (flags & FAULT_FLAG_WRITE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3323 | if (!pte_write(entry)) |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 3324 | return do_wp_page(mm, vma, address, |
| 3325 | pte, pmd, ptl, entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3326 | entry = pte_mkdirty(entry); |
| 3327 | } |
| 3328 | entry = pte_mkyoung(entry); |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3329 | if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 3330 | update_mmu_cache(vma, address, pte); |
Andrea Arcangeli | 1a44e14 | 2005-10-29 18:16:48 -0700 | [diff] [blame] | 3331 | } else { |
| 3332 | /* |
| 3333 | * This is needed only for protection faults but the arch code |
| 3334 | * is not yet telling us if this is a protection fault or not. |
| 3335 | * This still avoids useless tlb flushes for .text page faults |
| 3336 | * with threads. |
| 3337 | */ |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3338 | if (flags & FAULT_FLAG_WRITE) |
Shaohua Li | 61c7732 | 2010-08-16 09:16:55 +0800 | [diff] [blame] | 3339 | flush_tlb_fix_spurious_fault(vma, address); |
Andrea Arcangeli | 1a44e14 | 2005-10-29 18:16:48 -0700 | [diff] [blame] | 3340 | } |
Hugh Dickins | 8f4e210 | 2005-10-29 18:16:26 -0700 | [diff] [blame] | 3341 | unlock: |
| 3342 | pte_unmap_unlock(pte, ptl); |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 3343 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3344 | } |
| 3345 | |
| 3346 | /* |
| 3347 | * By the time we get here, we already hold the mm semaphore |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 3348 | * |
| 3349 | * The mmap_sem may have been released depending on flags and our |
| 3350 | * return value. See filemap_fault() and __lock_page_or_retry(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3351 | */ |
Johannes Weiner | 519e524 | 2013-09-12 15:13:42 -0700 | [diff] [blame] | 3352 | static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3353 | unsigned long address, unsigned int flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3354 | { |
| 3355 | pgd_t *pgd; |
| 3356 | pud_t *pud; |
| 3357 | pmd_t *pmd; |
| 3358 | pte_t *pte; |
| 3359 | |
Hugh Dickins | ac9b9c6 | 2005-10-20 16:24:28 +0100 | [diff] [blame] | 3360 | if (unlikely(is_vm_hugetlb_page(vma))) |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3361 | return hugetlb_fault(mm, vma, address, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3362 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3363 | pgd = pgd_offset(mm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3364 | pud = pud_alloc(mm, pgd, address); |
| 3365 | if (!pud) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3366 | return VM_FAULT_OOM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3367 | pmd = pmd_alloc(mm, pud, address); |
| 3368 | if (!pmd) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3369 | return VM_FAULT_OOM; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3370 | if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { |
Matthew Wilcox | b96375f | 2015-09-08 14:58:48 -0700 | [diff] [blame] | 3371 | int ret = create_huge_pmd(mm, vma, address, pmd, flags); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 3372 | if (!(ret & VM_FAULT_FALLBACK)) |
| 3373 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3374 | } else { |
| 3375 | pmd_t orig_pmd = *pmd; |
David Rientjes | 1f1d06c | 2012-05-29 15:06:23 -0700 | [diff] [blame] | 3376 | int ret; |
| 3377 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3378 | barrier(); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 3379 | if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 3380 | unsigned int dirty = flags & FAULT_FLAG_WRITE; |
| 3381 | |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 3382 | if (pmd_protnone(orig_pmd)) |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 3383 | return do_huge_pmd_numa_page(mm, vma, address, |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 3384 | orig_pmd, pmd); |
| 3385 | |
Linus Torvalds | 3d59eeb | 2012-12-16 14:33:25 -0800 | [diff] [blame] | 3386 | if (dirty && !pmd_write(orig_pmd)) { |
Matthew Wilcox | b96375f | 2015-09-08 14:58:48 -0700 | [diff] [blame] | 3387 | ret = wp_huge_pmd(mm, vma, address, pmd, |
| 3388 | orig_pmd, flags); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 3389 | if (!(ret & VM_FAULT_FALLBACK)) |
| 3390 | return ret; |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 3391 | } else { |
| 3392 | huge_pmd_set_accessed(mm, vma, address, pmd, |
| 3393 | orig_pmd, dirty); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 3394 | return 0; |
David Rientjes | 1f1d06c | 2012-05-29 15:06:23 -0700 | [diff] [blame] | 3395 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3396 | } |
| 3397 | } |
| 3398 | |
| 3399 | /* |
| 3400 | * Use __pte_alloc instead of pte_alloc_map, because we can't |
| 3401 | * run pte_offset_map on the pmd, if an huge pmd could |
| 3402 | * materialize from under us from a different thread. |
| 3403 | */ |
Mel Gorman | 4fd0177 | 2011-10-12 21:06:51 +0200 | [diff] [blame] | 3404 | if (unlikely(pmd_none(*pmd)) && |
| 3405 | unlikely(__pte_alloc(mm, vma, pmd, address))) |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3406 | return VM_FAULT_OOM; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3407 | /* if an huge pmd materialized from under us just retry later */ |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame] | 3408 | if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 3409 | return 0; |
| 3410 | /* |
| 3411 | * A regular pmd is established and it can't morph into a huge pmd |
| 3412 | * from under us anymore at this point because we hold the mmap_sem |
| 3413 | * read mode and khugepaged takes it in write mode. So now it's |
| 3414 | * safe to run pte_offset_map(). |
| 3415 | */ |
| 3416 | pte = pte_offset_map(pmd, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3417 | |
Linus Torvalds | 30c9f3a | 2009-04-10 08:43:11 -0700 | [diff] [blame] | 3418 | return handle_pte_fault(mm, vma, address, pte, pmd, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3419 | } |
| 3420 | |
Paul Cassella | 9a95f3c | 2014-08-06 16:07:24 -0700 | [diff] [blame] | 3421 | /* |
| 3422 | * By the time we get here, we already hold the mm semaphore |
| 3423 | * |
| 3424 | * The mmap_sem may have been released depending on flags and our |
| 3425 | * return value. See filemap_fault() and __lock_page_or_retry(). |
| 3426 | */ |
Johannes Weiner | 519e524 | 2013-09-12 15:13:42 -0700 | [diff] [blame] | 3427 | int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
| 3428 | unsigned long address, unsigned int flags) |
| 3429 | { |
| 3430 | int ret; |
| 3431 | |
| 3432 | __set_current_state(TASK_RUNNING); |
| 3433 | |
| 3434 | count_vm_event(PGFAULT); |
| 3435 | mem_cgroup_count_vm_event(mm, PGFAULT); |
| 3436 | |
| 3437 | /* do counter updates before entering really critical section. */ |
| 3438 | check_sync_rss_stat(current); |
| 3439 | |
| 3440 | /* |
| 3441 | * Enable the memcg OOM handling for faults triggered in user |
| 3442 | * space. Kernel faults are handled more gracefully. |
| 3443 | */ |
| 3444 | if (flags & FAULT_FLAG_USER) |
Johannes Weiner | 4942642 | 2013-10-16 13:46:59 -0700 | [diff] [blame] | 3445 | mem_cgroup_oom_enable(); |
Johannes Weiner | 519e524 | 2013-09-12 15:13:42 -0700 | [diff] [blame] | 3446 | |
| 3447 | ret = __handle_mm_fault(mm, vma, address, flags); |
| 3448 | |
Johannes Weiner | 4942642 | 2013-10-16 13:46:59 -0700 | [diff] [blame] | 3449 | if (flags & FAULT_FLAG_USER) { |
| 3450 | mem_cgroup_oom_disable(); |
| 3451 | /* |
| 3452 | * The task may have entered a memcg OOM situation but |
| 3453 | * if the allocation error was handled gracefully (no |
| 3454 | * VM_FAULT_OOM), there is no need to kill anything. |
| 3455 | * Just clean up the OOM state peacefully. |
| 3456 | */ |
| 3457 | if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)) |
| 3458 | mem_cgroup_oom_synchronize(false); |
| 3459 | } |
Johannes Weiner | 3812c8c | 2013-09-12 15:13:44 -0700 | [diff] [blame] | 3460 | |
Johannes Weiner | 519e524 | 2013-09-12 15:13:42 -0700 | [diff] [blame] | 3461 | return ret; |
| 3462 | } |
Jesse Barnes | e1d6d01 | 2014-12-12 16:55:27 -0800 | [diff] [blame] | 3463 | EXPORT_SYMBOL_GPL(handle_mm_fault); |
Johannes Weiner | 519e524 | 2013-09-12 15:13:42 -0700 | [diff] [blame] | 3464 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3465 | #ifndef __PAGETABLE_PUD_FOLDED |
| 3466 | /* |
| 3467 | * Allocate page upper directory. |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 3468 | * We've already handled the fast-path in-line. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3469 | */ |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3470 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3471 | { |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3472 | pud_t *new = pud_alloc_one(mm, address); |
| 3473 | if (!new) |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3474 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3475 | |
Nick Piggin | 362a61a | 2008-05-14 06:37:36 +0200 | [diff] [blame] | 3476 | smp_wmb(); /* See comment in __pte_alloc */ |
| 3477 | |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 3478 | spin_lock(&mm->page_table_lock); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3479 | if (pgd_present(*pgd)) /* Another has populated it */ |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 3480 | pud_free(mm, new); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3481 | else |
| 3482 | pgd_populate(mm, pgd, new); |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3483 | spin_unlock(&mm->page_table_lock); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3484 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3485 | } |
| 3486 | #endif /* __PAGETABLE_PUD_FOLDED */ |
| 3487 | |
| 3488 | #ifndef __PAGETABLE_PMD_FOLDED |
| 3489 | /* |
| 3490 | * Allocate page middle directory. |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 3491 | * We've already handled the fast-path in-line. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3492 | */ |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3493 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3494 | { |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3495 | pmd_t *new = pmd_alloc_one(mm, address); |
| 3496 | if (!new) |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3497 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3498 | |
Nick Piggin | 362a61a | 2008-05-14 06:37:36 +0200 | [diff] [blame] | 3499 | smp_wmb(); /* See comment in __pte_alloc */ |
| 3500 | |
Hugh Dickins | 872fec1 | 2005-10-29 18:16:21 -0700 | [diff] [blame] | 3501 | spin_lock(&mm->page_table_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3502 | #ifndef __ARCH_HAS_4LEVEL_HACK |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 3503 | if (!pud_present(*pud)) { |
| 3504 | mm_inc_nr_pmds(mm); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3505 | pud_populate(mm, pud, new); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 3506 | } else /* Another has populated it */ |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 3507 | pmd_free(mm, new); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 3508 | #else |
| 3509 | if (!pgd_present(*pud)) { |
| 3510 | mm_inc_nr_pmds(mm); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3511 | pgd_populate(mm, pud, new); |
Kirill A. Shutemov | dc6c9a3 | 2015-02-11 15:26:50 -0800 | [diff] [blame] | 3512 | } else /* Another has populated it */ |
| 3513 | pmd_free(mm, new); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3514 | #endif /* __ARCH_HAS_4LEVEL_HACK */ |
Hugh Dickins | c74df32 | 2005-10-29 18:16:23 -0700 | [diff] [blame] | 3515 | spin_unlock(&mm->page_table_lock); |
Hugh Dickins | 1bb3630 | 2005-10-29 18:16:22 -0700 | [diff] [blame] | 3516 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3517 | } |
| 3518 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 3519 | |
Namhyung Kim | 1b36ba8 | 2010-10-26 14:22:00 -0700 | [diff] [blame] | 3520 | static int __follow_pte(struct mm_struct *mm, unsigned long address, |
Johannes Weiner | f8ad0f49 | 2009-06-16 15:32:33 -0700 | [diff] [blame] | 3521 | pte_t **ptepp, spinlock_t **ptlp) |
| 3522 | { |
| 3523 | pgd_t *pgd; |
| 3524 | pud_t *pud; |
| 3525 | pmd_t *pmd; |
| 3526 | pte_t *ptep; |
| 3527 | |
| 3528 | pgd = pgd_offset(mm, address); |
| 3529 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) |
| 3530 | goto out; |
| 3531 | |
| 3532 | pud = pud_offset(pgd, address); |
| 3533 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) |
| 3534 | goto out; |
| 3535 | |
| 3536 | pmd = pmd_offset(pud, address); |
Andrea Arcangeli | f66055ab | 2011-01-13 15:46:54 -0800 | [diff] [blame] | 3537 | VM_BUG_ON(pmd_trans_huge(*pmd)); |
Johannes Weiner | f8ad0f49 | 2009-06-16 15:32:33 -0700 | [diff] [blame] | 3538 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) |
| 3539 | goto out; |
| 3540 | |
| 3541 | /* We cannot handle huge page PFN maps. Luckily they don't exist. */ |
| 3542 | if (pmd_huge(*pmd)) |
| 3543 | goto out; |
| 3544 | |
| 3545 | ptep = pte_offset_map_lock(mm, pmd, address, ptlp); |
| 3546 | if (!ptep) |
| 3547 | goto out; |
| 3548 | if (!pte_present(*ptep)) |
| 3549 | goto unlock; |
| 3550 | *ptepp = ptep; |
| 3551 | return 0; |
| 3552 | unlock: |
| 3553 | pte_unmap_unlock(ptep, *ptlp); |
| 3554 | out: |
| 3555 | return -EINVAL; |
| 3556 | } |
| 3557 | |
Namhyung Kim | 1b36ba8 | 2010-10-26 14:22:00 -0700 | [diff] [blame] | 3558 | static inline int follow_pte(struct mm_struct *mm, unsigned long address, |
| 3559 | pte_t **ptepp, spinlock_t **ptlp) |
| 3560 | { |
| 3561 | int res; |
| 3562 | |
| 3563 | /* (void) is needed to make gcc happy */ |
| 3564 | (void) __cond_lock(*ptlp, |
| 3565 | !(res = __follow_pte(mm, address, ptepp, ptlp))); |
| 3566 | return res; |
| 3567 | } |
| 3568 | |
Johannes Weiner | 3b6748e | 2009-06-16 15:32:35 -0700 | [diff] [blame] | 3569 | /** |
| 3570 | * follow_pfn - look up PFN at a user virtual address |
| 3571 | * @vma: memory mapping |
| 3572 | * @address: user virtual address |
| 3573 | * @pfn: location to store found PFN |
| 3574 | * |
| 3575 | * Only IO mappings and raw PFN mappings are allowed. |
| 3576 | * |
| 3577 | * Returns zero and the pfn at @pfn on success, -ve otherwise. |
| 3578 | */ |
| 3579 | int follow_pfn(struct vm_area_struct *vma, unsigned long address, |
| 3580 | unsigned long *pfn) |
| 3581 | { |
| 3582 | int ret = -EINVAL; |
| 3583 | spinlock_t *ptl; |
| 3584 | pte_t *ptep; |
| 3585 | |
| 3586 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 3587 | return ret; |
| 3588 | |
| 3589 | ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); |
| 3590 | if (ret) |
| 3591 | return ret; |
| 3592 | *pfn = pte_pfn(*ptep); |
| 3593 | pte_unmap_unlock(ptep, ptl); |
| 3594 | return 0; |
| 3595 | } |
| 3596 | EXPORT_SYMBOL(follow_pfn); |
| 3597 | |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3598 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
venkatesh.pallipadi@intel.com | d87fe66 | 2008-12-19 13:47:27 -0800 | [diff] [blame] | 3599 | int follow_phys(struct vm_area_struct *vma, |
| 3600 | unsigned long address, unsigned int flags, |
| 3601 | unsigned long *prot, resource_size_t *phys) |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3602 | { |
Johannes Weiner | 03668a4 | 2009-06-16 15:32:34 -0700 | [diff] [blame] | 3603 | int ret = -EINVAL; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3604 | pte_t *ptep, pte; |
| 3605 | spinlock_t *ptl; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3606 | |
venkatesh.pallipadi@intel.com | d87fe66 | 2008-12-19 13:47:27 -0800 | [diff] [blame] | 3607 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) |
| 3608 | goto out; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3609 | |
Johannes Weiner | 03668a4 | 2009-06-16 15:32:34 -0700 | [diff] [blame] | 3610 | if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) |
venkatesh.pallipadi@intel.com | d87fe66 | 2008-12-19 13:47:27 -0800 | [diff] [blame] | 3611 | goto out; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3612 | pte = *ptep; |
Johannes Weiner | 03668a4 | 2009-06-16 15:32:34 -0700 | [diff] [blame] | 3613 | |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3614 | if ((flags & FOLL_WRITE) && !pte_write(pte)) |
| 3615 | goto unlock; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3616 | |
| 3617 | *prot = pgprot_val(pte_pgprot(pte)); |
Johannes Weiner | 03668a4 | 2009-06-16 15:32:34 -0700 | [diff] [blame] | 3618 | *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3619 | |
Johannes Weiner | 03668a4 | 2009-06-16 15:32:34 -0700 | [diff] [blame] | 3620 | ret = 0; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3621 | unlock: |
| 3622 | pte_unmap_unlock(ptep, ptl); |
| 3623 | out: |
venkatesh.pallipadi@intel.com | d87fe66 | 2008-12-19 13:47:27 -0800 | [diff] [blame] | 3624 | return ret; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3625 | } |
| 3626 | |
| 3627 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
| 3628 | void *buf, int len, int write) |
| 3629 | { |
| 3630 | resource_size_t phys_addr; |
| 3631 | unsigned long prot = 0; |
KOSAKI Motohiro | 2bc7273 | 2009-01-06 14:39:43 -0800 | [diff] [blame] | 3632 | void __iomem *maddr; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3633 | int offset = addr & (PAGE_SIZE-1); |
| 3634 | |
venkatesh.pallipadi@intel.com | d87fe66 | 2008-12-19 13:47:27 -0800 | [diff] [blame] | 3635 | if (follow_phys(vma, addr, write, &prot, &phys_addr)) |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3636 | return -EINVAL; |
| 3637 | |
Grazvydas Ignotas | 9cb12d7 | 2015-02-12 15:00:19 -0800 | [diff] [blame] | 3638 | maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3639 | if (write) |
| 3640 | memcpy_toio(maddr + offset, buf, len); |
| 3641 | else |
| 3642 | memcpy_fromio(buf, maddr + offset, len); |
| 3643 | iounmap(maddr); |
| 3644 | |
| 3645 | return len; |
| 3646 | } |
Uwe Kleine-König | 5a73633 | 2013-08-07 13:02:52 +0200 | [diff] [blame] | 3647 | EXPORT_SYMBOL_GPL(generic_access_phys); |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3648 | #endif |
| 3649 | |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3650 | /* |
Stephen Wilson | 206cb63 | 2011-03-13 15:49:19 -0400 | [diff] [blame] | 3651 | * Access another process' address space as given in mm. If non-NULL, use the |
| 3652 | * given task for page fault accounting. |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3653 | */ |
Stephen Wilson | 206cb63 | 2011-03-13 15:49:19 -0400 | [diff] [blame] | 3654 | static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
| 3655 | unsigned long addr, void *buf, int len, int write) |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3656 | { |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3657 | struct vm_area_struct *vma; |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3658 | void *old_buf = buf; |
| 3659 | |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3660 | down_read(&mm->mmap_sem); |
Simon Arlott | 183ff22 | 2007-10-20 01:27:18 +0200 | [diff] [blame] | 3661 | /* ignore errors, just check how much was successfully transferred */ |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3662 | while (len) { |
| 3663 | int bytes, ret, offset; |
| 3664 | void *maddr; |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3665 | struct page *page = NULL; |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3666 | |
| 3667 | ret = get_user_pages(tsk, mm, addr, 1, |
| 3668 | write, 1, &page, &vma); |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3669 | if (ret <= 0) { |
Rik van Riel | dbffcd0 | 2014-08-06 16:08:12 -0700 | [diff] [blame] | 3670 | #ifndef CONFIG_HAVE_IOREMAP_PROT |
| 3671 | break; |
| 3672 | #else |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3673 | /* |
| 3674 | * Check if this is a VM_IO | VM_PFNMAP VMA, which |
| 3675 | * we can access using slightly different code. |
| 3676 | */ |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3677 | vma = find_vma(mm, addr); |
Michael Ellerman | fe936df | 2011-04-14 15:22:10 -0700 | [diff] [blame] | 3678 | if (!vma || vma->vm_start > addr) |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3679 | break; |
| 3680 | if (vma->vm_ops && vma->vm_ops->access) |
| 3681 | ret = vma->vm_ops->access(vma, addr, buf, |
| 3682 | len, write); |
| 3683 | if (ret <= 0) |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3684 | break; |
| 3685 | bytes = ret; |
Rik van Riel | dbffcd0 | 2014-08-06 16:08:12 -0700 | [diff] [blame] | 3686 | #endif |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3687 | } else { |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 3688 | bytes = len; |
| 3689 | offset = addr & (PAGE_SIZE-1); |
| 3690 | if (bytes > PAGE_SIZE-offset) |
| 3691 | bytes = PAGE_SIZE-offset; |
| 3692 | |
| 3693 | maddr = kmap(page); |
| 3694 | if (write) { |
| 3695 | copy_to_user_page(vma, page, addr, |
| 3696 | maddr + offset, buf, bytes); |
| 3697 | set_page_dirty_lock(page); |
| 3698 | } else { |
| 3699 | copy_from_user_page(vma, page, addr, |
| 3700 | buf, maddr + offset, bytes); |
| 3701 | } |
| 3702 | kunmap(page); |
| 3703 | page_cache_release(page); |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3704 | } |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3705 | len -= bytes; |
| 3706 | buf += bytes; |
| 3707 | addr += bytes; |
| 3708 | } |
| 3709 | up_read(&mm->mmap_sem); |
David Howells | 0ec76a1 | 2006-09-27 01:50:15 -0700 | [diff] [blame] | 3710 | |
| 3711 | return buf - old_buf; |
| 3712 | } |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3713 | |
Stephen Wilson | 5ddd36b | 2011-03-13 15:49:20 -0400 | [diff] [blame] | 3714 | /** |
Randy Dunlap | ae91dbf | 2011-03-26 13:27:01 -0700 | [diff] [blame] | 3715 | * access_remote_vm - access another process' address space |
Stephen Wilson | 5ddd36b | 2011-03-13 15:49:20 -0400 | [diff] [blame] | 3716 | * @mm: the mm_struct of the target address space |
| 3717 | * @addr: start address to access |
| 3718 | * @buf: source or destination buffer |
| 3719 | * @len: number of bytes to transfer |
| 3720 | * @write: whether the access is a write |
| 3721 | * |
| 3722 | * The caller must hold a reference on @mm. |
| 3723 | */ |
| 3724 | int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
| 3725 | void *buf, int len, int write) |
| 3726 | { |
| 3727 | return __access_remote_vm(NULL, mm, addr, buf, len, write); |
| 3728 | } |
| 3729 | |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3730 | /* |
Stephen Wilson | 206cb63 | 2011-03-13 15:49:19 -0400 | [diff] [blame] | 3731 | * Access another process' address space. |
| 3732 | * Source/target buffer must be kernel space, |
| 3733 | * Do not walk the page table directly, use get_user_pages |
| 3734 | */ |
| 3735 | int access_process_vm(struct task_struct *tsk, unsigned long addr, |
| 3736 | void *buf, int len, int write) |
| 3737 | { |
| 3738 | struct mm_struct *mm; |
| 3739 | int ret; |
| 3740 | |
| 3741 | mm = get_task_mm(tsk); |
| 3742 | if (!mm) |
| 3743 | return 0; |
| 3744 | |
| 3745 | ret = __access_remote_vm(tsk, mm, addr, buf, len, write); |
| 3746 | mmput(mm); |
| 3747 | |
| 3748 | return ret; |
| 3749 | } |
| 3750 | |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3751 | /* |
| 3752 | * Print the name of a VMA. |
| 3753 | */ |
| 3754 | void print_vma_addr(char *prefix, unsigned long ip) |
| 3755 | { |
| 3756 | struct mm_struct *mm = current->mm; |
| 3757 | struct vm_area_struct *vma; |
| 3758 | |
Ingo Molnar | e8bff74 | 2008-02-13 20:21:06 +0100 | [diff] [blame] | 3759 | /* |
| 3760 | * Do not print if we are in atomic |
| 3761 | * contexts (in exception stacks, etc.): |
| 3762 | */ |
| 3763 | if (preempt_count()) |
| 3764 | return; |
| 3765 | |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3766 | down_read(&mm->mmap_sem); |
| 3767 | vma = find_vma(mm, ip); |
| 3768 | if (vma && vma->vm_file) { |
| 3769 | struct file *f = vma->vm_file; |
| 3770 | char *buf = (char *)__get_free_page(GFP_KERNEL); |
| 3771 | if (buf) { |
Andy Shevchenko | 2fbc57c | 2012-12-17 16:01:23 -0800 | [diff] [blame] | 3772 | char *p; |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3773 | |
Miklos Szeredi | 9bf39ab | 2015-06-19 10:29:13 +0200 | [diff] [blame] | 3774 | p = file_path(f, buf, PAGE_SIZE); |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3775 | if (IS_ERR(p)) |
| 3776 | p = "?"; |
Andy Shevchenko | 2fbc57c | 2012-12-17 16:01:23 -0800 | [diff] [blame] | 3777 | printk("%s%s[%lx+%lx]", prefix, kbasename(p), |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3778 | vma->vm_start, |
| 3779 | vma->vm_end - vma->vm_start); |
| 3780 | free_page((unsigned long)buf); |
| 3781 | } |
| 3782 | } |
Jeff Liu | 51a07e5 | 2012-07-31 16:43:18 -0700 | [diff] [blame] | 3783 | up_read(&mm->mmap_sem); |
Andi Kleen | 0325291 | 2008-01-30 13:33:18 +0100 | [diff] [blame] | 3784 | } |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 3785 | |
Michael S. Tsirkin | 662bbcb | 2013-05-26 17:32:23 +0300 | [diff] [blame] | 3786 | #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP) |
David Hildenbrand | 9ec2353 | 2015-05-11 17:52:07 +0200 | [diff] [blame] | 3787 | void __might_fault(const char *file, int line) |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 3788 | { |
Peter Zijlstra | 95156f0 | 2009-01-12 13:02:11 +0100 | [diff] [blame] | 3789 | /* |
| 3790 | * Some code (nfs/sunrpc) uses socket ops on kernel memory while |
| 3791 | * holding the mmap_sem, this is safe because kernel memory doesn't |
| 3792 | * get paged out, therefore we'll never actually fault, and the |
| 3793 | * below annotations will generate false positives. |
| 3794 | */ |
| 3795 | if (segment_eq(get_fs(), KERNEL_DS)) |
| 3796 | return; |
David Hildenbrand | 9ec2353 | 2015-05-11 17:52:07 +0200 | [diff] [blame] | 3797 | if (pagefault_disabled()) |
Michael S. Tsirkin | 662bbcb | 2013-05-26 17:32:23 +0300 | [diff] [blame] | 3798 | return; |
David Hildenbrand | 9ec2353 | 2015-05-11 17:52:07 +0200 | [diff] [blame] | 3799 | __might_sleep(file, line, 0); |
| 3800 | #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) |
Michael S. Tsirkin | 662bbcb | 2013-05-26 17:32:23 +0300 | [diff] [blame] | 3801 | if (current->mm) |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 3802 | might_lock_read(¤t->mm->mmap_sem); |
David Hildenbrand | 9ec2353 | 2015-05-11 17:52:07 +0200 | [diff] [blame] | 3803 | #endif |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 3804 | } |
David Hildenbrand | 9ec2353 | 2015-05-11 17:52:07 +0200 | [diff] [blame] | 3805 | EXPORT_SYMBOL(__might_fault); |
Nick Piggin | 3ee1afa | 2008-09-10 13:37:17 +0200 | [diff] [blame] | 3806 | #endif |
Andrea Arcangeli | 47ad847 | 2011-01-13 15:46:47 -0800 | [diff] [blame] | 3807 | |
| 3808 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS) |
| 3809 | static void clear_gigantic_page(struct page *page, |
| 3810 | unsigned long addr, |
| 3811 | unsigned int pages_per_huge_page) |
| 3812 | { |
| 3813 | int i; |
| 3814 | struct page *p = page; |
| 3815 | |
| 3816 | might_sleep(); |
| 3817 | for (i = 0; i < pages_per_huge_page; |
| 3818 | i++, p = mem_map_next(p, page, i)) { |
| 3819 | cond_resched(); |
| 3820 | clear_user_highpage(p, addr + i * PAGE_SIZE); |
| 3821 | } |
| 3822 | } |
| 3823 | void clear_huge_page(struct page *page, |
| 3824 | unsigned long addr, unsigned int pages_per_huge_page) |
| 3825 | { |
| 3826 | int i; |
| 3827 | |
| 3828 | if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { |
| 3829 | clear_gigantic_page(page, addr, pages_per_huge_page); |
| 3830 | return; |
| 3831 | } |
| 3832 | |
| 3833 | might_sleep(); |
| 3834 | for (i = 0; i < pages_per_huge_page; i++) { |
| 3835 | cond_resched(); |
| 3836 | clear_user_highpage(page + i, addr + i * PAGE_SIZE); |
| 3837 | } |
| 3838 | } |
| 3839 | |
| 3840 | static void copy_user_gigantic_page(struct page *dst, struct page *src, |
| 3841 | unsigned long addr, |
| 3842 | struct vm_area_struct *vma, |
| 3843 | unsigned int pages_per_huge_page) |
| 3844 | { |
| 3845 | int i; |
| 3846 | struct page *dst_base = dst; |
| 3847 | struct page *src_base = src; |
| 3848 | |
| 3849 | for (i = 0; i < pages_per_huge_page; ) { |
| 3850 | cond_resched(); |
| 3851 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); |
| 3852 | |
| 3853 | i++; |
| 3854 | dst = mem_map_next(dst, dst_base, i); |
| 3855 | src = mem_map_next(src, src_base, i); |
| 3856 | } |
| 3857 | } |
| 3858 | |
| 3859 | void copy_user_huge_page(struct page *dst, struct page *src, |
| 3860 | unsigned long addr, struct vm_area_struct *vma, |
| 3861 | unsigned int pages_per_huge_page) |
| 3862 | { |
| 3863 | int i; |
| 3864 | |
| 3865 | if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { |
| 3866 | copy_user_gigantic_page(dst, src, addr, vma, |
| 3867 | pages_per_huge_page); |
| 3868 | return; |
| 3869 | } |
| 3870 | |
| 3871 | might_sleep(); |
| 3872 | for (i = 0; i < pages_per_huge_page; i++) { |
| 3873 | cond_resched(); |
| 3874 | copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); |
| 3875 | } |
| 3876 | } |
| 3877 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3878 | |
Olof Johansson | 40b64ac | 2013-12-20 14:28:05 -0800 | [diff] [blame] | 3879 | #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS |
Kirill A. Shutemov | b35f181 | 2014-01-21 15:49:07 -0800 | [diff] [blame] | 3880 | |
| 3881 | static struct kmem_cache *page_ptl_cachep; |
| 3882 | |
| 3883 | void __init ptlock_cache_init(void) |
| 3884 | { |
| 3885 | page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, |
| 3886 | SLAB_PANIC, NULL); |
| 3887 | } |
| 3888 | |
Peter Zijlstra | 539edb5 | 2013-11-14 14:31:52 -0800 | [diff] [blame] | 3889 | bool ptlock_alloc(struct page *page) |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3890 | { |
| 3891 | spinlock_t *ptl; |
| 3892 | |
Kirill A. Shutemov | b35f181 | 2014-01-21 15:49:07 -0800 | [diff] [blame] | 3893 | ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3894 | if (!ptl) |
| 3895 | return false; |
Peter Zijlstra | 539edb5 | 2013-11-14 14:31:52 -0800 | [diff] [blame] | 3896 | page->ptl = ptl; |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3897 | return true; |
| 3898 | } |
| 3899 | |
Peter Zijlstra | 539edb5 | 2013-11-14 14:31:52 -0800 | [diff] [blame] | 3900 | void ptlock_free(struct page *page) |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3901 | { |
Kirill A. Shutemov | b35f181 | 2014-01-21 15:49:07 -0800 | [diff] [blame] | 3902 | kmem_cache_free(page_ptl_cachep, page->ptl); |
Kirill A. Shutemov | 49076ec | 2013-11-14 14:31:51 -0800 | [diff] [blame] | 3903 | } |
| 3904 | #endif |