Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Page fault handler for SH with an MMU. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Copyright (C) 1999 Niibe Yutaka |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 5 | * Copyright (C) 2003 - 2012 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Based on linux/arch/i386/mm/fault.c: |
| 8 | * Copyright (C) 1995 Linus Torvalds |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 9 | * |
| 10 | * This file is subject to the terms and conditions of the GNU General Public |
| 11 | * License. See the file "COPYING" in the main directory of this archive |
| 12 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> |
Paul Mundt | 0f08f33 | 2006-09-27 17:03:56 +0900 | [diff] [blame] | 16 | #include <linux/hardirq.h> |
| 17 | #include <linux/kprobes.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 18 | #include <linux/perf_event.h> |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 19 | #include <linux/kdebug.h> |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 20 | #include <linux/uaccess.h> |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 21 | #include <asm/io_trapped.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <asm/mmu_context.h> |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 23 | #include <asm/tlbflush.h> |
David Howells | e839ca5 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 24 | #include <asm/traps.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 26 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
| 27 | { |
| 28 | int ret = 0; |
| 29 | |
Paul Mundt | c63c310 | 2009-07-05 02:50:10 +0900 | [diff] [blame] | 30 | if (kprobes_built_in() && !user_mode(regs)) { |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 31 | preempt_disable(); |
| 32 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
| 33 | ret = 1; |
| 34 | preempt_enable(); |
| 35 | } |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 36 | |
| 37 | return ret; |
| 38 | } |
| 39 | |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 40 | static void |
| 41 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
| 42 | struct task_struct *tsk) |
| 43 | { |
| 44 | siginfo_t info; |
| 45 | |
| 46 | info.si_signo = si_signo; |
| 47 | info.si_errno = 0; |
| 48 | info.si_code = si_code; |
| 49 | info.si_addr = (void __user *)address; |
| 50 | |
| 51 | force_sig_info(si_signo, &info, tsk); |
| 52 | } |
| 53 | |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 54 | /* |
| 55 | * This is useful to dump out the page tables associated with |
| 56 | * 'addr' in mm 'mm'. |
| 57 | */ |
| 58 | static void show_pte(struct mm_struct *mm, unsigned long addr) |
| 59 | { |
| 60 | pgd_t *pgd; |
| 61 | |
Paul Mundt | 90eed7d | 2012-07-24 13:15:54 +0900 | [diff] [blame] | 62 | if (mm) { |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 63 | pgd = mm->pgd; |
Paul Mundt | 90eed7d | 2012-07-24 13:15:54 +0900 | [diff] [blame] | 64 | } else { |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 65 | pgd = get_TTB(); |
| 66 | |
Paul Mundt | 90eed7d | 2012-07-24 13:15:54 +0900 | [diff] [blame] | 67 | if (unlikely(!pgd)) |
| 68 | pgd = swapper_pg_dir; |
| 69 | } |
| 70 | |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 71 | printk(KERN_ALERT "pgd = %p\n", pgd); |
| 72 | pgd += pgd_index(addr); |
| 73 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 74 | (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 75 | |
| 76 | do { |
| 77 | pud_t *pud; |
| 78 | pmd_t *pmd; |
| 79 | pte_t *pte; |
| 80 | |
| 81 | if (pgd_none(*pgd)) |
| 82 | break; |
| 83 | |
| 84 | if (pgd_bad(*pgd)) { |
| 85 | printk("(bad)"); |
| 86 | break; |
| 87 | } |
| 88 | |
| 89 | pud = pud_offset(pgd, addr); |
| 90 | if (PTRS_PER_PUD != 1) |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 91 | printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2), |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 92 | (u64)pud_val(*pud)); |
| 93 | |
| 94 | if (pud_none(*pud)) |
| 95 | break; |
| 96 | |
| 97 | if (pud_bad(*pud)) { |
| 98 | printk("(bad)"); |
| 99 | break; |
| 100 | } |
| 101 | |
| 102 | pmd = pmd_offset(pud, addr); |
| 103 | if (PTRS_PER_PMD != 1) |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 104 | printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2), |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 105 | (u64)pmd_val(*pmd)); |
| 106 | |
| 107 | if (pmd_none(*pmd)) |
| 108 | break; |
| 109 | |
| 110 | if (pmd_bad(*pmd)) { |
| 111 | printk("(bad)"); |
| 112 | break; |
| 113 | } |
| 114 | |
| 115 | /* We must not map this if we have highmem enabled */ |
| 116 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) |
| 117 | break; |
| 118 | |
| 119 | pte = pte_offset_kernel(pmd, addr); |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 120 | printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2), |
| 121 | (u64)pte_val(*pte)); |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame] | 122 | } while (0); |
| 123 | |
| 124 | printk("\n"); |
| 125 | } |
| 126 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 127 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
| 128 | { |
| 129 | unsigned index = pgd_index(address); |
| 130 | pgd_t *pgd_k; |
| 131 | pud_t *pud, *pud_k; |
| 132 | pmd_t *pmd, *pmd_k; |
| 133 | |
| 134 | pgd += index; |
| 135 | pgd_k = init_mm.pgd + index; |
| 136 | |
| 137 | if (!pgd_present(*pgd_k)) |
| 138 | return NULL; |
| 139 | |
| 140 | pud = pud_offset(pgd, address); |
| 141 | pud_k = pud_offset(pgd_k, address); |
| 142 | if (!pud_present(*pud_k)) |
| 143 | return NULL; |
| 144 | |
Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 145 | if (!pud_present(*pud)) |
| 146 | set_pud(pud, *pud_k); |
| 147 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 148 | pmd = pmd_offset(pud, address); |
| 149 | pmd_k = pmd_offset(pud_k, address); |
| 150 | if (!pmd_present(*pmd_k)) |
| 151 | return NULL; |
| 152 | |
| 153 | if (!pmd_present(*pmd)) |
| 154 | set_pmd(pmd, *pmd_k); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 155 | else { |
| 156 | /* |
| 157 | * The page tables are fully synchronised so there must |
| 158 | * be another reason for the fault. Return NULL here to |
| 159 | * signal that we have not taken care of the fault. |
| 160 | */ |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 161 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 162 | return NULL; |
| 163 | } |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 164 | |
| 165 | return pmd_k; |
| 166 | } |
| 167 | |
Paul Mundt | d8fd35f | 2012-05-18 20:01:16 +0900 | [diff] [blame] | 168 | #ifdef CONFIG_SH_STORE_QUEUES |
| 169 | #define __FAULT_ADDR_LIMIT P3_ADDR_MAX |
| 170 | #else |
| 171 | #define __FAULT_ADDR_LIMIT VMALLOC_END |
| 172 | #endif |
| 173 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 174 | /* |
| 175 | * Handle a fault on the vmalloc or module mapping area |
| 176 | */ |
| 177 | static noinline int vmalloc_fault(unsigned long address) |
| 178 | { |
| 179 | pgd_t *pgd_k; |
| 180 | pmd_t *pmd_k; |
| 181 | pte_t *pte_k; |
| 182 | |
Paul Mundt | c3e0af9 | 2012-05-18 19:30:05 +0900 | [diff] [blame] | 183 | /* Make sure we are in vmalloc/module/P3 area: */ |
Paul Mundt | d8fd35f | 2012-05-18 20:01:16 +0900 | [diff] [blame] | 184 | if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT)) |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 185 | return -1; |
| 186 | |
| 187 | /* |
| 188 | * Synchronize this task's top level page-table |
| 189 | * with the 'reference' page table. |
| 190 | * |
| 191 | * Do _not_ use "current" here. We might be inside |
| 192 | * an interrupt in the middle of a task switch.. |
| 193 | */ |
| 194 | pgd_k = get_TTB(); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 195 | pmd_k = vmalloc_sync_one(pgd_k, address); |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 196 | if (!pmd_k) |
| 197 | return -1; |
| 198 | |
| 199 | pte_k = pte_offset_kernel(pmd_k, address); |
| 200 | if (!pte_present(*pte_k)) |
| 201 | return -1; |
| 202 | |
| 203 | return 0; |
| 204 | } |
| 205 | |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 206 | static void |
| 207 | show_fault_oops(struct pt_regs *regs, unsigned long address) |
| 208 | { |
| 209 | if (!oops_may_print()) |
| 210 | return; |
| 211 | |
| 212 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
| 213 | if (address < PAGE_SIZE) |
| 214 | printk(KERN_CONT "NULL pointer dereference"); |
| 215 | else |
| 216 | printk(KERN_CONT "paging request"); |
| 217 | |
| 218 | printk(KERN_CONT " at %08lx\n", address); |
| 219 | printk(KERN_ALERT "PC:"); |
| 220 | printk_address(regs->pc, 1); |
| 221 | |
| 222 | show_pte(NULL, address); |
| 223 | } |
| 224 | |
| 225 | static noinline void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 226 | no_context(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 227 | unsigned long address) |
| 228 | { |
| 229 | /* Are we prepared to handle this kernel fault? */ |
| 230 | if (fixup_exception(regs)) |
| 231 | return; |
| 232 | |
| 233 | if (handle_trapped_io(regs, address)) |
| 234 | return; |
| 235 | |
| 236 | /* |
| 237 | * Oops. The kernel tried to access some bad page. We'll have to |
| 238 | * terminate things with extreme prejudice. |
| 239 | */ |
| 240 | bust_spinlocks(1); |
| 241 | |
| 242 | show_fault_oops(regs, address); |
| 243 | |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 244 | die("Oops", regs, error_code); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 245 | bust_spinlocks(0); |
| 246 | do_exit(SIGKILL); |
| 247 | } |
| 248 | |
| 249 | static void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 250 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 251 | unsigned long address, int si_code) |
| 252 | { |
| 253 | struct task_struct *tsk = current; |
| 254 | |
| 255 | /* User mode accesses just cause a SIGSEGV */ |
| 256 | if (user_mode(regs)) { |
| 257 | /* |
| 258 | * It's possible to have interrupts off here: |
| 259 | */ |
| 260 | local_irq_enable(); |
| 261 | |
| 262 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
| 263 | |
| 264 | return; |
| 265 | } |
| 266 | |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 267 | no_context(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | static noinline void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 271 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 272 | unsigned long address) |
| 273 | { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 274 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | static void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 278 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 279 | unsigned long address, int si_code) |
| 280 | { |
| 281 | struct mm_struct *mm = current->mm; |
| 282 | |
| 283 | /* |
| 284 | * Something tried to access memory that isn't in our memory map.. |
| 285 | * Fix it, but check if it's kernel or user first.. |
| 286 | */ |
| 287 | up_read(&mm->mmap_sem); |
| 288 | |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 289 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | static noinline void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 293 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 294 | { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 295 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | static noinline void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 299 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 300 | unsigned long address) |
| 301 | { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 302 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 303 | } |
| 304 | |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 305 | static void |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 306 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 307 | { |
| 308 | struct task_struct *tsk = current; |
| 309 | struct mm_struct *mm = tsk->mm; |
| 310 | |
| 311 | up_read(&mm->mmap_sem); |
| 312 | |
| 313 | /* Kernel mode? Handle exceptions or die: */ |
| 314 | if (!user_mode(regs)) |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 315 | no_context(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 316 | |
| 317 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
| 318 | } |
| 319 | |
| 320 | static noinline int |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 321 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 322 | unsigned long address, unsigned int fault) |
| 323 | { |
| 324 | /* |
| 325 | * Pagefault was interrupted by SIGKILL. We have no reason to |
| 326 | * continue pagefault. |
| 327 | */ |
| 328 | if (fatal_signal_pending(current)) { |
| 329 | if (!(fault & VM_FAULT_RETRY)) |
| 330 | up_read(¤t->mm->mmap_sem); |
| 331 | if (!user_mode(regs)) |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 332 | no_context(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 333 | return 1; |
| 334 | } |
| 335 | |
| 336 | if (!(fault & VM_FAULT_ERROR)) |
| 337 | return 0; |
| 338 | |
| 339 | if (fault & VM_FAULT_OOM) { |
| 340 | /* Kernel mode? Handle exceptions or die: */ |
| 341 | if (!user_mode(regs)) { |
| 342 | up_read(¤t->mm->mmap_sem); |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 343 | no_context(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 344 | return 1; |
| 345 | } |
David Rientjes | c2d23f9 | 2012-12-12 13:52:10 -0800 | [diff] [blame] | 346 | up_read(¤t->mm->mmap_sem); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 347 | |
David Rientjes | c2d23f9 | 2012-12-12 13:52:10 -0800 | [diff] [blame] | 348 | /* |
| 349 | * We ran out of memory, call the OOM killer, and return the |
| 350 | * userspace (which will retry the fault, or kill us if we got |
| 351 | * oom-killed): |
| 352 | */ |
| 353 | pagefault_out_of_memory(); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 354 | } else { |
| 355 | if (fault & VM_FAULT_SIGBUS) |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 356 | do_sigbus(regs, error_code, address); |
Linus Torvalds | 33692f2 | 2015-01-29 10:51:32 -0800 | [diff] [blame] | 357 | else if (fault & VM_FAULT_SIGSEGV) |
| 358 | bad_area(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 359 | else |
| 360 | BUG(); |
| 361 | } |
| 362 | |
| 363 | return 1; |
| 364 | } |
| 365 | |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 366 | static inline int access_error(int error_code, struct vm_area_struct *vma) |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 367 | { |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 368 | if (error_code & FAULT_CODE_WRITE) { |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 369 | /* write, present and write, not present: */ |
| 370 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
| 371 | return 1; |
| 372 | return 0; |
| 373 | } |
| 374 | |
Paul Mundt | 2808032 | 2012-05-14 15:33:28 +0900 | [diff] [blame] | 375 | /* ITLB miss on NX page */ |
| 376 | if (unlikely((error_code & FAULT_CODE_ITLB) && |
| 377 | !(vma->vm_flags & VM_EXEC))) |
| 378 | return 1; |
| 379 | |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 380 | /* read, not present: */ |
| 381 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) |
| 382 | return 1; |
| 383 | |
| 384 | return 0; |
| 385 | } |
| 386 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 387 | static int fault_in_kernel_space(unsigned long address) |
| 388 | { |
| 389 | return address >= TASK_SIZE; |
| 390 | } |
| 391 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | /* |
| 393 | * This routine handles page faults. It determines the address, |
| 394 | * and the problem, and then passes it off to one of the appropriate |
| 395 | * routines. |
| 396 | */ |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 397 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 398 | unsigned long error_code, |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 399 | unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | { |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 401 | unsigned long vec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | struct task_struct *tsk; |
| 403 | struct mm_struct *mm; |
| 404 | struct vm_area_struct * vma; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 405 | int fault; |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 406 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | tsk = current; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 409 | mm = tsk->mm; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 410 | vec = lookup_exception_vector(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 412 | /* |
| 413 | * We fault-in kernel-space virtual memory on-demand. The |
| 414 | * 'reference' page table is init_mm.pgd. |
| 415 | * |
| 416 | * NOTE! We MUST NOT take any locks for this case. We may |
| 417 | * be in an interrupt or a critical region, and should |
| 418 | * only copy the information from the master page table, |
| 419 | * nothing more. |
| 420 | */ |
| 421 | if (unlikely(fault_in_kernel_space(address))) { |
| 422 | if (vmalloc_fault(address) >= 0) |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 423 | return; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 424 | if (notify_page_fault(regs, vec)) |
Stuart Menefy | 96e14e5 | 2008-09-05 16:17:15 +0900 | [diff] [blame] | 425 | return; |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 426 | |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 427 | bad_area_nosemaphore(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 428 | return; |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 429 | } |
| 430 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 431 | if (unlikely(notify_page_fault(regs, vec))) |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 432 | return; |
| 433 | |
| 434 | /* Only enable interrupts if they were on before the fault */ |
| 435 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
| 436 | local_irq_enable(); |
| 437 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 438 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | /* |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 441 | * If we're in an interrupt, have no user context or are running |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 442 | * with pagefaults disabled then we must not take the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | */ |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 444 | if (unlikely(faulthandler_disabled() || !mm)) { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 445 | bad_area_nosemaphore(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 446 | return; |
| 447 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | |
Kautuk Consul | 11fd982 | 2012-03-31 08:06:11 -0400 | [diff] [blame] | 449 | retry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | down_read(&mm->mmap_sem); |
| 451 | |
| 452 | vma = find_vma(mm, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 453 | if (unlikely(!vma)) { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 454 | bad_area(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 455 | return; |
| 456 | } |
| 457 | if (likely(vma->vm_start <= address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | goto good_area; |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 459 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 460 | bad_area(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 461 | return; |
| 462 | } |
| 463 | if (unlikely(expand_stack(vma, address))) { |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 464 | bad_area(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 465 | return; |
| 466 | } |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 467 | |
| 468 | /* |
| 469 | * Ok, we have a good vm_area for this memory access, so |
| 470 | * we can handle it.. |
| 471 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | good_area: |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 473 | if (unlikely(access_error(error_code, vma))) { |
| 474 | bad_area_access_error(regs, error_code, address); |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 475 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | } |
| 477 | |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 478 | set_thread_fault_code(error_code); |
| 479 | |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 480 | if (user_mode(regs)) |
| 481 | flags |= FAULT_FLAG_USER; |
| 482 | if (error_code & FAULT_CODE_WRITE) |
| 483 | flags |= FAULT_FLAG_WRITE; |
| 484 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | /* |
| 486 | * If for any reason at all we couldn't handle the fault, |
| 487 | * make sure we exit gracefully rather than endlessly redo |
| 488 | * the fault. |
| 489 | */ |
Kautuk Consul | 11fd982 | 2012-03-31 08:06:11 -0400 | [diff] [blame] | 490 | fault = handle_mm_fault(mm, vma, address, flags); |
| 491 | |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 492 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
Paul Mundt | 5a1dc78 | 2012-05-14 14:57:28 +0900 | [diff] [blame] | 493 | if (mm_fault_error(regs, error_code, address, fault)) |
Paul Mundt | dbdb4e9 | 2012-05-14 10:27:34 +0900 | [diff] [blame] | 494 | return; |
Kautuk Consul | 11fd982 | 2012-03-31 08:06:11 -0400 | [diff] [blame] | 495 | |
| 496 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 497 | if (fault & VM_FAULT_MAJOR) { |
| 498 | tsk->maj_flt++; |
| 499 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
| 500 | regs, address); |
| 501 | } else { |
| 502 | tsk->min_flt++; |
| 503 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
| 504 | regs, address); |
| 505 | } |
| 506 | if (fault & VM_FAULT_RETRY) { |
| 507 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
Shaohua Li | 45cac65 | 2012-10-08 16:32:19 -0700 | [diff] [blame] | 508 | flags |= FAULT_FLAG_TRIED; |
Kautuk Consul | 11fd982 | 2012-03-31 08:06:11 -0400 | [diff] [blame] | 509 | |
| 510 | /* |
| 511 | * No need to up_read(&mm->mmap_sem) as we would |
| 512 | * have already released it in __lock_page_or_retry |
| 513 | * in mm/filemap.c. |
| 514 | */ |
| 515 | goto retry; |
| 516 | } |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 517 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 518 | |
| 519 | up_read(&mm->mmap_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | } |