Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
Ingo Molnar | f8eeb2e | 2009-02-20 23:13:36 +0100 | [diff] [blame] | 4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | */ |
Ingo Molnar | a2bcd47 | 2009-03-29 23:47:48 +0200 | [diff] [blame] | 6 | #include <linux/magic.h> /* STACK_END_MAGIC */ |
| 7 | #include <linux/sched.h> /* test_thread_flag(), ... */ |
| 8 | #include <linux/kdebug.h> /* oops_begin/end, ... */ |
| 9 | #include <linux/module.h> /* search_exception_table */ |
| 10 | #include <linux/bootmem.h> /* max_low_pfn */ |
| 11 | #include <linux/kprobes.h> /* __kprobes, ... */ |
| 12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 13 | #include <linux/perf_event.h> /* perf_sw_event */ |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 14 | #include <linux/hugetlb.h> /* hstate_index_to_shift */ |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 15 | #include <linux/prefetch.h> /* prefetchw */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Ingo Molnar | a2bcd47 | 2009-03-29 23:47:48 +0200 | [diff] [blame] | 17 | #include <asm/traps.h> /* dotraplinkage, ... */ |
| 18 | #include <asm/pgalloc.h> /* pgd_*(), ... */ |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 19 | #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 21 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 22 | * Page fault error code bits: |
| 23 | * |
| 24 | * bit 0 == 0: no page found 1: protection fault |
| 25 | * bit 1 == 0: read access 1: write access |
| 26 | * bit 2 == 0: kernel-mode access 1: user-mode access |
| 27 | * bit 3 == 1: use of reserved bit detected |
| 28 | * bit 4 == 1: fault was an instruction fetch |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 29 | */ |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 30 | enum x86_pf_error_code { |
| 31 | |
| 32 | PF_PROT = 1 << 0, |
| 33 | PF_WRITE = 1 << 1, |
| 34 | PF_USER = 1 << 2, |
| 35 | PF_RSVD = 1 << 3, |
| 36 | PF_INSTR = 1 << 4, |
| 37 | }; |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 38 | |
Ingo Molnar | b814d41 | 2009-02-20 22:32:10 +0100 | [diff] [blame] | 39 | /* |
Ingo Molnar | b319eed | 2009-02-22 10:24:18 +0100 | [diff] [blame] | 40 | * Returns 0 if mmiotrace is disabled, or if the fault is not |
| 41 | * handled by mmiotrace: |
Ingo Molnar | b814d41 | 2009-02-20 22:32:10 +0100 | [diff] [blame] | 42 | */ |
Masami Hiramatsu | 62c9295 | 2009-08-27 13:23:11 -0400 | [diff] [blame] | 43 | static inline int __kprobes |
| 44 | kmmio_fault(struct pt_regs *regs, unsigned long addr) |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 45 | { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 46 | if (unlikely(is_kmmio_active())) |
| 47 | if (kmmio_handler(regs, addr) == 1) |
| 48 | return -1; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 49 | return 0; |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 50 | } |
| 51 | |
Masami Hiramatsu | 62c9295 | 2009-08-27 13:23:11 -0400 | [diff] [blame] | 52 | static inline int __kprobes notify_page_fault(struct pt_regs *regs) |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 53 | { |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 54 | int ret = 0; |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 55 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 56 | /* kprobe_running() needs smp_processor_id() */ |
Ingo Molnar | b180181 | 2009-02-20 22:42:57 +0100 | [diff] [blame] | 57 | if (kprobes_built_in() && !user_mode_vm(regs)) { |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 58 | preempt_disable(); |
| 59 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
| 60 | ret = 1; |
| 61 | preempt_enable(); |
| 62 | } |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 63 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 64 | return ret; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 65 | } |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 66 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 67 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 68 | * Prefetch quirks: |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 69 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 70 | * 32-bit mode: |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 71 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 72 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. |
| 73 | * Check that here and ignore it. |
| 74 | * |
| 75 | * 64-bit mode: |
| 76 | * |
| 77 | * Sometimes the CPU reports invalid exceptions on prefetch. |
| 78 | * Check that here and ignore it. |
| 79 | * |
| 80 | * Opcode checker based on code by Richard Brunner. |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 81 | */ |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 82 | static inline int |
| 83 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, |
| 84 | unsigned char opcode, int *prefetch) |
| 85 | { |
| 86 | unsigned char instr_hi = opcode & 0xf0; |
| 87 | unsigned char instr_lo = opcode & 0x0f; |
| 88 | |
| 89 | switch (instr_hi) { |
| 90 | case 0x20: |
| 91 | case 0x30: |
| 92 | /* |
| 93 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. |
| 94 | * In X86_64 long mode, the CPU will signal invalid |
| 95 | * opcode if some of these prefixes are present so |
| 96 | * X86_64 will never get here anyway |
| 97 | */ |
| 98 | return ((instr_lo & 7) == 0x6); |
| 99 | #ifdef CONFIG_X86_64 |
| 100 | case 0x40: |
| 101 | /* |
| 102 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes |
| 103 | * Need to figure out under what instruction mode the |
| 104 | * instruction was issued. Could check the LDT for lm, |
| 105 | * but for now it's good enough to assume that long |
| 106 | * mode only uses well known segments or kernel. |
| 107 | */ |
| 108 | return (!user_mode(regs)) || (regs->cs == __USER_CS); |
| 109 | #endif |
| 110 | case 0x60: |
| 111 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ |
| 112 | return (instr_lo & 0xC) == 0x4; |
| 113 | case 0xF0: |
| 114 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ |
| 115 | return !instr_lo || (instr_lo>>1) == 1; |
| 116 | case 0x00: |
| 117 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
| 118 | if (probe_kernel_address(instr, opcode)) |
| 119 | return 0; |
| 120 | |
| 121 | *prefetch = (instr_lo == 0xF) && |
| 122 | (opcode == 0x0D || opcode == 0x18); |
| 123 | return 0; |
| 124 | default: |
| 125 | return 0; |
| 126 | } |
| 127 | } |
| 128 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 129 | static int |
| 130 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 131 | { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 132 | unsigned char *max_instr; |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 133 | unsigned char *instr; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 134 | int prefetch = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Ingo Molnar | 3085354 | 2008-03-27 21:29:09 +0100 | [diff] [blame] | 136 | /* |
| 137 | * If it was a exec (instruction fetch) fault on NX page, then |
| 138 | * do not ignore the fault: |
| 139 | */ |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 140 | if (error_code & PF_INSTR) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | return 0; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 142 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 143 | instr = (void *)convert_ip_to_linear(current, regs); |
Andi Kleen | f1290ec | 2005-04-16 15:24:59 -0700 | [diff] [blame] | 144 | max_instr = instr + 15; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Vincent Hanquez | 76381fe | 2005-06-23 00:08:46 -0700 | [diff] [blame] | 146 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | return 0; |
| 148 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 149 | while (instr < max_instr) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 150 | unsigned char opcode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 152 | if (probe_kernel_address(instr, opcode)) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 153 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | instr++; |
| 156 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 157 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | } |
| 160 | return prefetch; |
| 161 | } |
| 162 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 163 | static void |
| 164 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 165 | struct task_struct *tsk, int fault) |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 166 | { |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 167 | unsigned lsb = 0; |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 168 | siginfo_t info; |
| 169 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 170 | info.si_signo = si_signo; |
| 171 | info.si_errno = 0; |
| 172 | info.si_code = si_code; |
| 173 | info.si_addr = (void __user *)address; |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 174 | if (fault & VM_FAULT_HWPOISON_LARGE) |
| 175 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); |
| 176 | if (fault & VM_FAULT_HWPOISON) |
| 177 | lsb = PAGE_SHIFT; |
| 178 | info.si_addr_lsb = lsb; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 179 | |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 180 | force_sig_info(si_signo, &info, tsk); |
| 181 | } |
| 182 | |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 183 | DEFINE_SPINLOCK(pgd_lock); |
| 184 | LIST_HEAD(pgd_list); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 185 | |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 186 | #ifdef CONFIG_X86_32 |
| 187 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
| 188 | { |
| 189 | unsigned index = pgd_index(address); |
| 190 | pgd_t *pgd_k; |
| 191 | pud_t *pud, *pud_k; |
| 192 | pmd_t *pmd, *pmd_k; |
| 193 | |
| 194 | pgd += index; |
| 195 | pgd_k = init_mm.pgd + index; |
| 196 | |
| 197 | if (!pgd_present(*pgd_k)) |
| 198 | return NULL; |
| 199 | |
| 200 | /* |
| 201 | * set_pgd(pgd, *pgd_k); here would be useless on PAE |
| 202 | * and redundant with the set_pmd() on non-PAE. As would |
| 203 | * set_pud. |
| 204 | */ |
| 205 | pud = pud_offset(pgd, address); |
| 206 | pud_k = pud_offset(pgd_k, address); |
| 207 | if (!pud_present(*pud_k)) |
| 208 | return NULL; |
| 209 | |
| 210 | pmd = pmd_offset(pud, address); |
| 211 | pmd_k = pmd_offset(pud_k, address); |
| 212 | if (!pmd_present(*pmd_k)) |
| 213 | return NULL; |
| 214 | |
Jeremy Fitzhardinge | b8bcfe9 | 2009-02-17 23:05:19 -0800 | [diff] [blame] | 215 | if (!pmd_present(*pmd)) |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 216 | set_pmd(pmd, *pmd_k); |
Jeremy Fitzhardinge | b8bcfe9 | 2009-02-17 23:05:19 -0800 | [diff] [blame] | 217 | else |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 218 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 219 | |
| 220 | return pmd_k; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 221 | } |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 222 | |
| 223 | void vmalloc_sync_all(void) |
| 224 | { |
| 225 | unsigned long address; |
| 226 | |
| 227 | if (SHARED_KERNEL_PMD) |
| 228 | return; |
| 229 | |
| 230 | for (address = VMALLOC_START & PMD_MASK; |
| 231 | address >= TASK_SIZE && address < FIXADDR_TOP; |
| 232 | address += PMD_SIZE) { |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 233 | struct page *page; |
| 234 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 235 | spin_lock(&pgd_lock); |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 236 | list_for_each_entry(page, &pgd_list, lru) { |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 237 | spinlock_t *pgt_lock; |
Borislav Petkov | f01f7c5 | 2010-10-19 22:17:37 +0000 | [diff] [blame] | 238 | pmd_t *ret; |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 239 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 240 | /* the pgt_lock only for Xen */ |
Jeremy Fitzhardinge | 617d34d | 2010-09-21 12:01:51 -0700 | [diff] [blame] | 241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
| 242 | |
| 243 | spin_lock(pgt_lock); |
| 244 | ret = vmalloc_sync_one(page_address(page), address); |
| 245 | spin_unlock(pgt_lock); |
| 246 | |
| 247 | if (!ret) |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 248 | break; |
| 249 | } |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 250 | spin_unlock(&pgd_lock); |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 251 | } |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * 32-bit: |
| 256 | * |
| 257 | * Handle a fault on the vmalloc or module mapping area |
| 258 | */ |
Masami Hiramatsu | 62c9295 | 2009-08-27 13:23:11 -0400 | [diff] [blame] | 259 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 260 | { |
| 261 | unsigned long pgd_paddr; |
| 262 | pmd_t *pmd_k; |
| 263 | pte_t *pte_k; |
| 264 | |
| 265 | /* Make sure we are in vmalloc area: */ |
| 266 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
| 267 | return -1; |
| 268 | |
Frederic Weisbecker | ebc8827 | 2010-09-27 18:50:51 +0200 | [diff] [blame] | 269 | WARN_ON_ONCE(in_nmi()); |
| 270 | |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 271 | /* |
| 272 | * Synchronize this task's top level page-table |
| 273 | * with the 'reference' page table. |
| 274 | * |
| 275 | * Do _not_ use "current" here. We might be inside |
| 276 | * an interrupt in the middle of a task switch.. |
| 277 | */ |
| 278 | pgd_paddr = read_cr3(); |
| 279 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); |
| 280 | if (!pmd_k) |
| 281 | return -1; |
| 282 | |
| 283 | pte_k = pte_offset_kernel(pmd_k, address); |
| 284 | if (!pte_present(*pte_k)) |
| 285 | return -1; |
| 286 | |
| 287 | return 0; |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Did it hit the DOS screen memory VA from vm86 mode? |
| 292 | */ |
| 293 | static inline void |
| 294 | check_v8086_mode(struct pt_regs *regs, unsigned long address, |
| 295 | struct task_struct *tsk) |
| 296 | { |
| 297 | unsigned long bit; |
| 298 | |
| 299 | if (!v8086_mode(regs)) |
| 300 | return; |
| 301 | |
| 302 | bit = (address - 0xA0000) >> PAGE_SHIFT; |
| 303 | if (bit < 32) |
| 304 | tsk->thread.screen_bitmap |= 1 << bit; |
| 305 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 307 | static bool low_pfn(unsigned long pfn) |
| 308 | { |
| 309 | return pfn < max_low_pfn; |
| 310 | } |
| 311 | |
Adrian Bunk | cae30f82 | 2008-02-13 23:31:31 +0200 | [diff] [blame] | 312 | static void dump_pagetable(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 314 | pgd_t *base = __va(read_cr3()); |
| 315 | pgd_t *pgd = &base[pgd_index(address)]; |
| 316 | pmd_t *pmd; |
| 317 | pte_t *pte; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 318 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 319 | #ifdef CONFIG_X86_PAE |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 320 | printk("*pdpt = %016Lx ", pgd_val(*pgd)); |
| 321 | if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd)) |
| 322 | goto out; |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 323 | #endif |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 324 | pmd = pmd_offset(pud_offset(pgd, address), address); |
| 325 | printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 326 | |
| 327 | /* |
| 328 | * We must not directly access the pte in the highpte |
| 329 | * case if the page table is located in highmem. |
| 330 | * And let's rather not kmap-atomic the pte, just in case |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 331 | * it's allocated already: |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 332 | */ |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 333 | if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd)) |
| 334 | goto out; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 335 | |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 336 | pte = pte_offset_kernel(pmd, address); |
| 337 | printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte)); |
| 338 | out: |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 339 | printk("\n"); |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | #else /* CONFIG_X86_64: */ |
| 343 | |
| 344 | void vmalloc_sync_all(void) |
| 345 | { |
Haicheng Li | 6afb515 | 2010-05-19 17:42:14 +0800 | [diff] [blame] | 346 | sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END); |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 347 | } |
| 348 | |
| 349 | /* |
| 350 | * 64-bit: |
| 351 | * |
| 352 | * Handle a fault on the vmalloc area |
| 353 | * |
| 354 | * This assumes no large pages in there. |
| 355 | */ |
Masami Hiramatsu | 62c9295 | 2009-08-27 13:23:11 -0400 | [diff] [blame] | 356 | static noinline __kprobes int vmalloc_fault(unsigned long address) |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 357 | { |
| 358 | pgd_t *pgd, *pgd_ref; |
| 359 | pud_t *pud, *pud_ref; |
| 360 | pmd_t *pmd, *pmd_ref; |
| 361 | pte_t *pte, *pte_ref; |
| 362 | |
| 363 | /* Make sure we are in vmalloc area: */ |
| 364 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
| 365 | return -1; |
| 366 | |
Frederic Weisbecker | ebc8827 | 2010-09-27 18:50:51 +0200 | [diff] [blame] | 367 | WARN_ON_ONCE(in_nmi()); |
| 368 | |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 369 | /* |
| 370 | * Copy kernel mappings over when needed. This can also |
| 371 | * happen within a race in page table update. In the later |
| 372 | * case just flush: |
| 373 | */ |
| 374 | pgd = pgd_offset(current->active_mm, address); |
| 375 | pgd_ref = pgd_offset_k(address); |
| 376 | if (pgd_none(*pgd_ref)) |
| 377 | return -1; |
| 378 | |
| 379 | if (pgd_none(*pgd)) |
| 380 | set_pgd(pgd, *pgd_ref); |
| 381 | else |
| 382 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
| 383 | |
| 384 | /* |
| 385 | * Below here mismatches are bugs because these lower tables |
| 386 | * are shared: |
| 387 | */ |
| 388 | |
| 389 | pud = pud_offset(pgd, address); |
| 390 | pud_ref = pud_offset(pgd_ref, address); |
| 391 | if (pud_none(*pud_ref)) |
| 392 | return -1; |
| 393 | |
| 394 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) |
| 395 | BUG(); |
| 396 | |
| 397 | pmd = pmd_offset(pud, address); |
| 398 | pmd_ref = pmd_offset(pud_ref, address); |
| 399 | if (pmd_none(*pmd_ref)) |
| 400 | return -1; |
| 401 | |
| 402 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) |
| 403 | BUG(); |
| 404 | |
| 405 | pte_ref = pte_offset_kernel(pmd_ref, address); |
| 406 | if (!pte_present(*pte_ref)) |
| 407 | return -1; |
| 408 | |
| 409 | pte = pte_offset_kernel(pmd, address); |
| 410 | |
| 411 | /* |
| 412 | * Don't use pte_page here, because the mappings can point |
| 413 | * outside mem_map, and the NUMA hash lookup cannot handle |
| 414 | * that: |
| 415 | */ |
| 416 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) |
| 417 | BUG(); |
| 418 | |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | static const char errata93_warning[] = |
Joe Perches | ad361c9 | 2009-07-06 13:05:40 -0700 | [diff] [blame] | 423 | KERN_ERR |
| 424 | "******* Your BIOS seems to not contain a fix for K8 errata #93\n" |
| 425 | "******* Working around it, but it may cause SEGVs or burn power.\n" |
| 426 | "******* Please consider a BIOS update.\n" |
| 427 | "******* Disabling USB legacy in the BIOS may also help.\n"; |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 428 | |
| 429 | /* |
| 430 | * No vm86 mode in 64-bit mode: |
| 431 | */ |
| 432 | static inline void |
| 433 | check_v8086_mode(struct pt_regs *regs, unsigned long address, |
| 434 | struct task_struct *tsk) |
| 435 | { |
| 436 | } |
| 437 | |
| 438 | static int bad_address(void *p) |
| 439 | { |
| 440 | unsigned long dummy; |
| 441 | |
| 442 | return probe_kernel_address((unsigned long *)p, dummy); |
| 443 | } |
| 444 | |
| 445 | static void dump_pagetable(unsigned long address) |
| 446 | { |
Akinobu Mita | 087975b | 2009-06-27 15:35:15 +0900 | [diff] [blame] | 447 | pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK); |
| 448 | pgd_t *pgd = base + pgd_index(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | pud_t *pud; |
| 450 | pmd_t *pmd; |
| 451 | pte_t *pte; |
| 452 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 453 | if (bad_address(pgd)) |
| 454 | goto bad; |
| 455 | |
Jan Beulich | d646bce | 2006-02-03 21:51:47 +0100 | [diff] [blame] | 456 | printk("PGD %lx ", pgd_val(*pgd)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 457 | |
| 458 | if (!pgd_present(*pgd)) |
| 459 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | |
Andi Kleen | d2ae5b5 | 2006-06-26 13:57:56 +0200 | [diff] [blame] | 461 | pud = pud_offset(pgd, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 462 | if (bad_address(pud)) |
| 463 | goto bad; |
| 464 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | printk("PUD %lx ", pud_val(*pud)); |
Andi Kleen | b536022 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 466 | if (!pud_present(*pud) || pud_large(*pud)) |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 467 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | |
| 469 | pmd = pmd_offset(pud, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 470 | if (bad_address(pmd)) |
| 471 | goto bad; |
| 472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | printk("PMD %lx ", pmd_val(*pmd)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 474 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
| 475 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | |
| 477 | pte = pte_offset_kernel(pmd, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 478 | if (bad_address(pte)) |
| 479 | goto bad; |
| 480 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 481 | printk("PTE %lx", pte_val(*pte)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 482 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | printk("\n"); |
| 484 | return; |
| 485 | bad: |
| 486 | printk("BAD\n"); |
| 487 | } |
| 488 | |
Ingo Molnar | f2f13a8 | 2009-02-20 22:50:24 +0100 | [diff] [blame] | 489 | #endif /* CONFIG_X86_64 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 491 | /* |
| 492 | * Workaround for K8 erratum #93 & buggy BIOS. |
| 493 | * |
| 494 | * BIOS SMM functions are required to use a specific workaround |
| 495 | * to avoid corruption of the 64bit RIP register on C stepping K8. |
| 496 | * |
| 497 | * A lot of BIOS that didn't get tested properly miss this. |
| 498 | * |
| 499 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. |
| 500 | * Try to work around it here. |
| 501 | * |
| 502 | * Note we only handle faults in kernel here. |
| 503 | * Does nothing on 32-bit. |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 504 | */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 505 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 | { |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 507 | #ifdef CONFIG_X86_64 |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 508 | if (address != regs->ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 510 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 511 | if ((address >> 32) != 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 513 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | address |= 0xffffffffUL << 32; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 515 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
| 516 | (address >= MODULES_VADDR && address <= MODULES_END)) { |
Ingo Molnar | a454ab3 | 2009-05-03 10:09:03 +0200 | [diff] [blame] | 517 | printk_once(errata93_warning); |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 518 | regs->ip = address; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | return 1; |
| 520 | } |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 521 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | return 0; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 523 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 525 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 526 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
| 527 | * to illegal addresses >4GB. |
| 528 | * |
| 529 | * We catch this in the page fault handler because these addresses |
| 530 | * are not reachable. Just detect this case and return. Any code |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 531 | * segment in LDT is compatibility mode. |
| 532 | */ |
| 533 | static int is_errata100(struct pt_regs *regs, unsigned long address) |
| 534 | { |
| 535 | #ifdef CONFIG_X86_64 |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 536 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 537 | return 1; |
| 538 | #endif |
| 539 | return 0; |
| 540 | } |
| 541 | |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 542 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
| 543 | { |
| 544 | #ifdef CONFIG_X86_F00F_BUG |
| 545 | unsigned long nr; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 546 | |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 547 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 548 | * Pentium F0 0F C7 C8 bug workaround: |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 549 | */ |
| 550 | if (boot_cpu_data.f00f_bug) { |
| 551 | nr = (address - idt_descr.address) >> 3; |
| 552 | |
| 553 | if (nr == 6) { |
| 554 | do_invalid_op(regs, 0); |
| 555 | return 1; |
| 556 | } |
| 557 | } |
| 558 | #endif |
| 559 | return 0; |
| 560 | } |
| 561 | |
Ingo Molnar | 8f76614 | 2009-02-20 23:00:29 +0100 | [diff] [blame] | 562 | static const char nx_warning[] = KERN_CRIT |
| 563 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; |
| 564 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 565 | static void |
| 566 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, |
| 567 | unsigned long address) |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 568 | { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 569 | if (!oops_may_print()) |
| 570 | return; |
| 571 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 572 | if (error_code & PF_INSTR) { |
Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 573 | unsigned int level; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 574 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 575 | pte_t *pte = lookup_address(address, &level); |
| 576 | |
Ingo Molnar | 8f76614 | 2009-02-20 23:00:29 +0100 | [diff] [blame] | 577 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
| 578 | printk(nx_warning, current_uid()); |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 579 | } |
Harvey Harrison | fd40d6e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 580 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 581 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
| 582 | if (address < PAGE_SIZE) |
| 583 | printk(KERN_CONT "NULL pointer dereference"); |
| 584 | else |
| 585 | printk(KERN_CONT "paging request"); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 586 | |
Vegard Nossum | f294a8c | 2008-07-01 15:38:13 +0200 | [diff] [blame] | 587 | printk(KERN_CONT " at %p\n", (void *) address); |
Harvey Harrison | 19f0dda | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 588 | printk(KERN_ALERT "IP:"); |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 589 | printk_address(regs->ip, 1); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 590 | |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 591 | dump_pagetable(address); |
| 592 | } |
| 593 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 594 | static noinline void |
| 595 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, |
| 596 | unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 598 | struct task_struct *tsk; |
| 599 | unsigned long flags; |
| 600 | int sig; |
| 601 | |
| 602 | flags = oops_begin(); |
| 603 | tsk = current; |
| 604 | sig = SIGKILL; |
Jan Beulich | 1209140 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 605 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 607 | tsk->comm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | dump_pagetable(address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 609 | |
| 610 | tsk->thread.cr2 = address; |
| 611 | tsk->thread.trap_no = 14; |
| 612 | tsk->thread.error_code = error_code; |
| 613 | |
Jan Beulich | 22f5991 | 2008-01-30 13:31:23 +0100 | [diff] [blame] | 614 | if (__die("Bad pagetable", regs, error_code)) |
Alexander van Heukelum | 874d93d | 2008-10-22 12:00:09 +0200 | [diff] [blame] | 615 | sig = 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 616 | |
Alexander van Heukelum | 874d93d | 2008-10-22 12:00:09 +0200 | [diff] [blame] | 617 | oops_end(flags, regs, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | } |
| 619 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 620 | static noinline void |
| 621 | no_context(struct pt_regs *regs, unsigned long error_code, |
| 622 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 623 | { |
| 624 | struct task_struct *tsk = current; |
Ingo Molnar | 1980307 | 2009-01-21 10:39:51 +0100 | [diff] [blame] | 625 | unsigned long *stackend; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 626 | unsigned long flags; |
| 627 | int sig; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 628 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 629 | /* Are we prepared to handle this kernel fault? */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 630 | if (fixup_exception(regs)) |
| 631 | return; |
| 632 | |
| 633 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 634 | * 32-bit: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 635 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 636 | * Valid to do another page fault here, because if this fault |
| 637 | * had been triggered by is_prefetch fixup_exception would have |
| 638 | * handled it. |
| 639 | * |
| 640 | * 64-bit: |
| 641 | * |
| 642 | * Hall of shame of CPU/BIOS bugs. |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 643 | */ |
| 644 | if (is_prefetch(regs, error_code, address)) |
| 645 | return; |
| 646 | |
| 647 | if (is_errata93(regs, address)) |
| 648 | return; |
| 649 | |
| 650 | /* |
| 651 | * Oops. The kernel tried to access some bad page. We'll have to |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 652 | * terminate things with extreme prejudice: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 653 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 654 | flags = oops_begin(); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 655 | |
| 656 | show_fault_oops(regs, error_code, address); |
| 657 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 658 | stackend = end_of_stack(tsk); |
Jan Beulich | 0e7810b | 2009-11-20 14:00:14 +0000 | [diff] [blame] | 659 | if (tsk != &init_task && *stackend != STACK_END_MAGIC) |
Ingo Molnar | 1980307 | 2009-01-21 10:39:51 +0100 | [diff] [blame] | 660 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
| 661 | |
Ingo Molnar | 1cc9954 | 2009-02-20 23:07:48 +0100 | [diff] [blame] | 662 | tsk->thread.cr2 = address; |
| 663 | tsk->thread.trap_no = 14; |
| 664 | tsk->thread.error_code = error_code; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 665 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 666 | sig = SIGKILL; |
| 667 | if (__die("Oops", regs, error_code)) |
| 668 | sig = 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 669 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 670 | /* Executive summary in case the body of the oops scrolled away */ |
| 671 | printk(KERN_EMERG "CR2: %016lx\n", address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 672 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 673 | oops_end(flags, regs, sig); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 674 | } |
| 675 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 676 | /* |
| 677 | * Print out info about fatal segfaults, if the show_unhandled_signals |
| 678 | * sysctl is set: |
| 679 | */ |
| 680 | static inline void |
| 681 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, |
| 682 | unsigned long address, struct task_struct *tsk) |
| 683 | { |
| 684 | if (!unhandled_signal(tsk, SIGSEGV)) |
| 685 | return; |
| 686 | |
| 687 | if (!printk_ratelimit()) |
| 688 | return; |
| 689 | |
Roland Dreier | a1a08d1 | 2009-07-11 00:10:04 -0700 | [diff] [blame] | 690 | printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 691 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
| 692 | tsk->comm, task_pid_nr(tsk), address, |
| 693 | (void *)regs->ip, (void *)regs->sp, error_code); |
| 694 | |
| 695 | print_vma_addr(KERN_CONT " in ", regs->ip); |
| 696 | |
| 697 | printk(KERN_CONT "\n"); |
| 698 | } |
| 699 | |
| 700 | static void |
| 701 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 702 | unsigned long address, int si_code) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 703 | { |
| 704 | struct task_struct *tsk = current; |
| 705 | |
| 706 | /* User mode accesses just cause a SIGSEGV */ |
| 707 | if (error_code & PF_USER) { |
| 708 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 709 | * It's possible to have interrupts off here: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 710 | */ |
| 711 | local_irq_enable(); |
| 712 | |
| 713 | /* |
| 714 | * Valid to do another page fault here because this one came |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 715 | * from user space: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 716 | */ |
| 717 | if (is_prefetch(regs, error_code, address)) |
| 718 | return; |
| 719 | |
| 720 | if (is_errata100(regs, address)) |
| 721 | return; |
| 722 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 723 | if (unlikely(show_unhandled_signals)) |
| 724 | show_signal_msg(regs, error_code, address, tsk); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 725 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 726 | /* Kernel addresses are always protection faults: */ |
| 727 | tsk->thread.cr2 = address; |
| 728 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
| 729 | tsk->thread.trap_no = 14; |
| 730 | |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 731 | force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 732 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 733 | return; |
| 734 | } |
| 735 | |
| 736 | if (is_f00f_bug(regs, address)) |
| 737 | return; |
| 738 | |
| 739 | no_context(regs, error_code, address); |
| 740 | } |
| 741 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 742 | static noinline void |
| 743 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 744 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 745 | { |
| 746 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
| 747 | } |
| 748 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 749 | static void |
| 750 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
| 751 | unsigned long address, int si_code) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 752 | { |
| 753 | struct mm_struct *mm = current->mm; |
| 754 | |
| 755 | /* |
| 756 | * Something tried to access memory that isn't in our memory map.. |
| 757 | * Fix it, but check if it's kernel or user first.. |
| 758 | */ |
| 759 | up_read(&mm->mmap_sem); |
| 760 | |
| 761 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
| 762 | } |
| 763 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 764 | static noinline void |
| 765 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 766 | { |
| 767 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
| 768 | } |
| 769 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 770 | static noinline void |
| 771 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
| 772 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 773 | { |
| 774 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
| 775 | } |
| 776 | |
| 777 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 778 | static void |
| 779 | out_of_memory(struct pt_regs *regs, unsigned long error_code, |
| 780 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 781 | { |
| 782 | /* |
| 783 | * We ran out of memory, call the OOM killer, and return the userspace |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 784 | * (which will retry the fault, or kill us if we got oom-killed): |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 785 | */ |
| 786 | up_read(¤t->mm->mmap_sem); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 787 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 788 | pagefault_out_of_memory(); |
| 789 | } |
| 790 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 791 | static void |
Andi Kleen | a6e04aa | 2009-09-16 11:50:09 +0200 | [diff] [blame] | 792 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, |
| 793 | unsigned int fault) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 794 | { |
| 795 | struct task_struct *tsk = current; |
| 796 | struct mm_struct *mm = tsk->mm; |
Andi Kleen | a6e04aa | 2009-09-16 11:50:09 +0200 | [diff] [blame] | 797 | int code = BUS_ADRERR; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 798 | |
| 799 | up_read(&mm->mmap_sem); |
| 800 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 801 | /* Kernel mode? Handle exceptions or die: */ |
Linus Torvalds | 9605456 | 2010-08-13 09:49:20 -0700 | [diff] [blame] | 802 | if (!(error_code & PF_USER)) { |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 803 | no_context(regs, error_code, address); |
Linus Torvalds | 9605456 | 2010-08-13 09:49:20 -0700 | [diff] [blame] | 804 | return; |
| 805 | } |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 806 | |
Ingo Molnar | cd1b68f | 2009-02-20 23:39:02 +0100 | [diff] [blame] | 807 | /* User-space => ok to do another page fault: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 808 | if (is_prefetch(regs, error_code, address)) |
| 809 | return; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 810 | |
| 811 | tsk->thread.cr2 = address; |
| 812 | tsk->thread.error_code = error_code; |
| 813 | tsk->thread.trap_no = 14; |
| 814 | |
Andi Kleen | a6e04aa | 2009-09-16 11:50:09 +0200 | [diff] [blame] | 815 | #ifdef CONFIG_MEMORY_FAILURE |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 816 | if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) { |
Andi Kleen | a6e04aa | 2009-09-16 11:50:09 +0200 | [diff] [blame] | 817 | printk(KERN_ERR |
| 818 | "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", |
| 819 | tsk->comm, tsk->pid, address); |
| 820 | code = BUS_MCEERR_AR; |
| 821 | } |
| 822 | #endif |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 823 | force_sig_info_fault(SIGBUS, code, address, tsk, fault); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 824 | } |
| 825 | |
KOSAKI Motohiro | b80ef10 | 2011-05-26 17:12:12 +0900 | [diff] [blame] | 826 | static noinline int |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 827 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
| 828 | unsigned long address, unsigned int fault) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 829 | { |
KOSAKI Motohiro | b80ef10 | 2011-05-26 17:12:12 +0900 | [diff] [blame] | 830 | /* |
| 831 | * Pagefault was interrupted by SIGKILL. We have no reason to |
| 832 | * continue pagefault. |
| 833 | */ |
| 834 | if (fatal_signal_pending(current)) { |
| 835 | if (!(fault & VM_FAULT_RETRY)) |
| 836 | up_read(¤t->mm->mmap_sem); |
| 837 | if (!(error_code & PF_USER)) |
| 838 | no_context(regs, error_code, address); |
| 839 | return 1; |
| 840 | } |
| 841 | if (!(fault & VM_FAULT_ERROR)) |
| 842 | return 0; |
| 843 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 844 | if (fault & VM_FAULT_OOM) { |
Andrey Vagin | f862685 | 2011-03-09 15:22:23 -0800 | [diff] [blame] | 845 | /* Kernel mode? Handle exceptions or die: */ |
| 846 | if (!(error_code & PF_USER)) { |
| 847 | up_read(¤t->mm->mmap_sem); |
| 848 | no_context(regs, error_code, address); |
KOSAKI Motohiro | b80ef10 | 2011-05-26 17:12:12 +0900 | [diff] [blame] | 849 | return 1; |
Andrey Vagin | f862685 | 2011-03-09 15:22:23 -0800 | [diff] [blame] | 850 | } |
| 851 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 852 | out_of_memory(regs, error_code, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 853 | } else { |
Andi Kleen | f672b49 | 2010-09-27 22:05:55 +0200 | [diff] [blame] | 854 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
| 855 | VM_FAULT_HWPOISON_LARGE)) |
Andi Kleen | a6e04aa | 2009-09-16 11:50:09 +0200 | [diff] [blame] | 856 | do_sigbus(regs, error_code, address, fault); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 857 | else |
| 858 | BUG(); |
| 859 | } |
KOSAKI Motohiro | b80ef10 | 2011-05-26 17:12:12 +0900 | [diff] [blame] | 860 | return 1; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 861 | } |
| 862 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 863 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
| 864 | { |
| 865 | if ((error_code & PF_WRITE) && !pte_write(*pte)) |
| 866 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 867 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 868 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
| 869 | return 0; |
| 870 | |
| 871 | return 1; |
| 872 | } |
| 873 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 875 | * Handle a spurious fault caused by a stale TLB entry. |
| 876 | * |
| 877 | * This allows us to lazily refresh the TLB when increasing the |
| 878 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it |
| 879 | * eagerly is very expensive since that implies doing a full |
| 880 | * cross-processor TLB flush, even if no stale TLB entries exist |
| 881 | * on other processors. |
| 882 | * |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 883 | * There are no security implications to leaving a stale TLB when |
| 884 | * increasing the permissions on a page. |
| 885 | */ |
Masami Hiramatsu | 62c9295 | 2009-08-27 13:23:11 -0400 | [diff] [blame] | 886 | static noinline __kprobes int |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 887 | spurious_fault(unsigned long error_code, unsigned long address) |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 888 | { |
| 889 | pgd_t *pgd; |
| 890 | pud_t *pud; |
| 891 | pmd_t *pmd; |
| 892 | pte_t *pte; |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 893 | int ret; |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 894 | |
| 895 | /* Reserved-bit violation or user access to kernel space? */ |
| 896 | if (error_code & (PF_USER | PF_RSVD)) |
| 897 | return 0; |
| 898 | |
| 899 | pgd = init_mm.pgd + pgd_index(address); |
| 900 | if (!pgd_present(*pgd)) |
| 901 | return 0; |
| 902 | |
| 903 | pud = pud_offset(pgd, address); |
| 904 | if (!pud_present(*pud)) |
| 905 | return 0; |
| 906 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 907 | if (pud_large(*pud)) |
| 908 | return spurious_fault_check(error_code, (pte_t *) pud); |
| 909 | |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 910 | pmd = pmd_offset(pud, address); |
| 911 | if (!pmd_present(*pmd)) |
| 912 | return 0; |
| 913 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 914 | if (pmd_large(*pmd)) |
| 915 | return spurious_fault_check(error_code, (pte_t *) pmd); |
| 916 | |
Shaohua Li | 660a293 | 2010-07-27 16:06:28 +0800 | [diff] [blame] | 917 | /* |
| 918 | * Note: don't use pte_present() here, since it returns true |
| 919 | * if the _PAGE_PROTNONE bit is set. However, this aliases the |
| 920 | * _PAGE_GLOBAL bit, which for kernel pages give false positives |
| 921 | * when CONFIG_DEBUG_PAGEALLOC is used. |
| 922 | */ |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 923 | pte = pte_offset_kernel(pmd, address); |
Shaohua Li | 660a293 | 2010-07-27 16:06:28 +0800 | [diff] [blame] | 924 | if (!(pte_flags(*pte) & _PAGE_PRESENT)) |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 925 | return 0; |
| 926 | |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 927 | ret = spurious_fault_check(error_code, pte); |
| 928 | if (!ret) |
| 929 | return 0; |
| 930 | |
| 931 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 932 | * Make sure we have permissions in PMD. |
| 933 | * If not, then there's a bug in the page tables: |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 934 | */ |
| 935 | ret = spurious_fault_check(error_code, (pte_t *) pmd); |
| 936 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 937 | |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 938 | return ret; |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 939 | } |
| 940 | |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 941 | int show_unhandled_signals = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 943 | static inline int |
Michel Lespinasse | 68da336 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 944 | access_error(unsigned long error_code, struct vm_area_struct *vma) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 945 | { |
Michel Lespinasse | 68da336 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 946 | if (error_code & PF_WRITE) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 947 | /* write, present and write, not present: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 948 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
| 949 | return 1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 950 | return 0; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 951 | } |
| 952 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 953 | /* read, present: */ |
| 954 | if (unlikely(error_code & PF_PROT)) |
| 955 | return 1; |
| 956 | |
| 957 | /* read, not present: */ |
| 958 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) |
| 959 | return 1; |
| 960 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 961 | return 0; |
| 962 | } |
| 963 | |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 964 | static int fault_in_kernel_space(unsigned long address) |
| 965 | { |
Ingo Molnar | d951734 | 2009-02-20 23:32:28 +0100 | [diff] [blame] | 966 | return address >= TASK_SIZE_MAX; |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 967 | } |
| 968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | /* |
| 970 | * This routine handles page faults. It determines the address, |
| 971 | * and the problem, and then passes it off to one of the appropriate |
| 972 | * routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | */ |
Ingo Molnar | c3731c6 | 2009-02-20 23:22:34 +0100 | [diff] [blame] | 974 | dotraplinkage void __kprobes |
| 975 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 977 | struct vm_area_struct *vma; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 978 | struct task_struct *tsk; |
| 979 | unsigned long address; |
| 980 | struct mm_struct *mm; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 981 | int fault; |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 982 | int write = error_code & PF_WRITE; |
KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 983 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 984 | (write ? FAULT_FLAG_WRITE : 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | |
Arjan van de Ven | a9ba9a3 | 2006-03-25 16:30:10 +0100 | [diff] [blame] | 986 | tsk = current; |
| 987 | mm = tsk->mm; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 988 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 989 | /* Get the faulting address: */ |
Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 990 | address = read_cr2(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 992 | /* |
| 993 | * Detect and handle instructions that would cause a page fault for |
| 994 | * both a tracked kernel page and a userspace page. |
| 995 | */ |
| 996 | if (kmemcheck_active(regs)) |
| 997 | kmemcheck_hide(regs); |
Ingo Molnar | 5dfaf90 | 2009-06-16 10:23:32 +0200 | [diff] [blame] | 998 | prefetchw(&mm->mmap_sem); |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 999 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 1000 | if (unlikely(kmmio_fault(regs, address))) |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 1001 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | |
| 1003 | /* |
| 1004 | * We fault-in kernel-space virtual memory on-demand. The |
| 1005 | * 'reference' page table is init_mm.pgd. |
| 1006 | * |
| 1007 | * NOTE! We MUST NOT take any locks for this case. We may |
| 1008 | * be in an interrupt or a critical region, and should |
| 1009 | * only copy the information from the master page table, |
| 1010 | * nothing more. |
| 1011 | * |
| 1012 | * This verifies that the fault happens in kernel space |
| 1013 | * (error_code & 4) == 0, and that the fault was not a |
Jan Beulich | 8b1bde9 | 2006-01-11 22:42:23 +0100 | [diff] [blame] | 1014 | * protection error (error_code & 9) == 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | */ |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 1016 | if (unlikely(fault_in_kernel_space(address))) { |
Vegard Nossum | f856129 | 2008-04-04 00:53:23 +0200 | [diff] [blame] | 1017 | if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) { |
| 1018 | if (vmalloc_fault(address) >= 0) |
| 1019 | return; |
| 1020 | |
| 1021 | if (kmemcheck_fault(regs, address, error_code)) |
| 1022 | return; |
| 1023 | } |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 1024 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1025 | /* Can handle a stale RO->RW TLB: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1026 | if (spurious_fault(error_code, address)) |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 1027 | return; |
| 1028 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1029 | /* kprobes don't want to hook the spurious faults: */ |
Masami Hiramatsu | 9be260a | 2009-02-05 17:12:39 -0500 | [diff] [blame] | 1030 | if (notify_page_fault(regs)) |
| 1031 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1032 | /* |
| 1033 | * Don't take the mm semaphore here. If we fixup a prefetch |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1034 | * fault we could otherwise deadlock: |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1035 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1036 | bad_area_nosemaphore(regs, error_code, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1037 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1038 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1039 | } |
| 1040 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1041 | /* kprobes don't want to hook the spurious faults: */ |
Ingo Molnar | f8a6b2b | 2009-02-13 09:44:22 +0100 | [diff] [blame] | 1042 | if (unlikely(notify_page_fault(regs))) |
Masami Hiramatsu | 9be260a | 2009-02-05 17:12:39 -0500 | [diff] [blame] | 1043 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1044 | /* |
Linus Torvalds | 891cffb | 2008-10-12 13:16:12 -0700 | [diff] [blame] | 1045 | * It's safe to allow irq's after cr2 has been saved and the |
| 1046 | * vmalloc fault has been handled. |
| 1047 | * |
| 1048 | * User-mode registers count as a user access even for any |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1049 | * potential system fault or CPU buglet: |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1050 | */ |
Linus Torvalds | 891cffb | 2008-10-12 13:16:12 -0700 | [diff] [blame] | 1051 | if (user_mode_vm(regs)) { |
| 1052 | local_irq_enable(); |
| 1053 | error_code |= PF_USER; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1054 | } else { |
| 1055 | if (regs->flags & X86_EFLAGS_IF) |
| 1056 | local_irq_enable(); |
| 1057 | } |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1058 | |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 1059 | if (unlikely(error_code & PF_RSVD)) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1060 | pgtable_bad(regs, error_code, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1062 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
Peter Zijlstra | 7dd1fcc | 2009-03-13 12:21:33 +0100 | [diff] [blame] | 1063 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1065 | * If we're in an interrupt, have no user context or are running |
| 1066 | * in an atomic region then we must not take the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1068 | if (unlikely(in_atomic() || !mm)) { |
| 1069 | bad_area_nosemaphore(regs, error_code, address); |
| 1070 | return; |
| 1071 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | |
Ingo Molnar | 3a1dfe6 | 2008-10-13 17:49:02 +0200 | [diff] [blame] | 1073 | /* |
| 1074 | * When running in the kernel we expect faults to occur only to |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1075 | * addresses in user space. All other faults represent errors in |
| 1076 | * the kernel and should generate an OOPS. Unfortunately, in the |
| 1077 | * case of an erroneous fault occurring in a code path which already |
| 1078 | * holds mmap_sem we will deadlock attempting to validate the fault |
| 1079 | * against the address space. Luckily the kernel only validly |
| 1080 | * references user space from well defined areas of code, which are |
| 1081 | * listed in the exceptions table. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1082 | * |
| 1083 | * As the vast majority of faults will be valid we will only perform |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1084 | * the source reference check when there is a possibility of a |
| 1085 | * deadlock. Attempt to lock the address space, if we cannot we then |
| 1086 | * validate the source. If this is invalid we can skip the address |
| 1087 | * space check, thus avoiding the deadlock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1089 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 1090 | if ((error_code & PF_USER) == 0 && |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1091 | !search_exception_tables(regs->ip)) { |
| 1092 | bad_area_nosemaphore(regs, error_code, address); |
| 1093 | return; |
| 1094 | } |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1095 | retry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1096 | down_read(&mm->mmap_sem); |
Peter Zijlstra | 0100607 | 2009-01-29 16:02:12 +0100 | [diff] [blame] | 1097 | } else { |
| 1098 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1099 | * The above down_read_trylock() might have succeeded in |
| 1100 | * which case we'll have missed the might_sleep() from |
| 1101 | * down_read(): |
Peter Zijlstra | 0100607 | 2009-01-29 16:02:12 +0100 | [diff] [blame] | 1102 | */ |
| 1103 | might_sleep(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | } |
| 1105 | |
| 1106 | vma = find_vma(mm, address); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1107 | if (unlikely(!vma)) { |
| 1108 | bad_area(regs, error_code, address); |
| 1109 | return; |
| 1110 | } |
| 1111 | if (likely(vma->vm_start <= address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | goto good_area; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1113 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
| 1114 | bad_area(regs, error_code, address); |
| 1115 | return; |
| 1116 | } |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 1117 | if (error_code & PF_USER) { |
Harvey Harrison | 6f4d368 | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 1118 | /* |
| 1119 | * Accessing the stack below %sp is always a bug. |
| 1120 | * The large cushion allows instructions like enter |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1121 | * and pusha to work. ("enter $65535, $31" pushes |
Harvey Harrison | 6f4d368 | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 1122 | * 32 pointers and then decrements %sp by 65535.) |
Chuck Ebbert | 03fdc2c | 2006-06-26 13:59:50 +0200 | [diff] [blame] | 1123 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1124 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
| 1125 | bad_area(regs, error_code, address); |
| 1126 | return; |
| 1127 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | } |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1129 | if (unlikely(expand_stack(vma, address))) { |
| 1130 | bad_area(regs, error_code, address); |
| 1131 | return; |
| 1132 | } |
| 1133 | |
| 1134 | /* |
| 1135 | * Ok, we have a good vm_area for this memory access, so |
| 1136 | * we can handle it.. |
| 1137 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | good_area: |
Michel Lespinasse | 68da336 | 2010-10-26 14:21:58 -0700 | [diff] [blame] | 1139 | if (unlikely(access_error(error_code, vma))) { |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1140 | bad_area_access_error(regs, error_code, address); |
| 1141 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | } |
| 1143 | |
| 1144 | /* |
| 1145 | * If for any reason at all we couldn't handle the fault, |
| 1146 | * make sure we exit gracefully rather than endlessly redo |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1147 | * the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | */ |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1149 | fault = handle_mm_fault(mm, vma, address, flags); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1150 | |
KOSAKI Motohiro | b80ef10 | 2011-05-26 17:12:12 +0900 | [diff] [blame] | 1151 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
| 1152 | if (mm_fault_error(regs, error_code, address, fault)) |
| 1153 | return; |
KOSAKI Motohiro | 37b23e0 | 2011-05-24 17:11:30 -0700 | [diff] [blame] | 1154 | } |
| 1155 | |
| 1156 | /* |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1157 | * Major/minor page fault accounting is only done on the |
| 1158 | * initial attempt. If we go through a retry, it is extremely |
| 1159 | * likely that the page will be found in page cache at that point. |
| 1160 | */ |
| 1161 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 1162 | if (fault & VM_FAULT_MAJOR) { |
| 1163 | tsk->maj_flt++; |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1164 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1165 | regs, address); |
| 1166 | } else { |
| 1167 | tsk->min_flt++; |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1168 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
Michel Lespinasse | d065bd8 | 2010-10-26 14:21:57 -0700 | [diff] [blame] | 1169 | regs, address); |
| 1170 | } |
| 1171 | if (fault & VM_FAULT_RETRY) { |
| 1172 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk |
| 1173 | * of starvation. */ |
| 1174 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 1175 | goto retry; |
| 1176 | } |
Peter Zijlstra | ac17dc8 | 2009-03-13 12:21:34 +0100 | [diff] [blame] | 1177 | } |
Harvey Harrison | d729ab3 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 1178 | |
Ingo Molnar | 8c938f9 | 2009-02-20 22:12:18 +0100 | [diff] [blame] | 1179 | check_v8086_mode(regs, address, tsk); |
| 1180 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1181 | up_read(&mm->mmap_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | } |