Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 1995 Linus Torvalds |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/interrupt.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 6 | #include <linux/mmiotrace.h> |
| 7 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #include <linux/compiler.h> |
Harvey Harrison | c61e211 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 9 | #include <linux/highmem.h> |
Prasanna S Panchamukhi | 0f2fbdc | 2005-09-06 15:19:28 -0700 | [diff] [blame] | 10 | #include <linux/kprobes.h> |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 11 | #include <linux/uaccess.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 12 | #include <linux/vmalloc.h> |
| 13 | #include <linux/vt_kern.h> |
| 14 | #include <linux/signal.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/string.h> |
| 18 | #include <linux/module.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 19 | #include <linux/kdebug.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 20 | #include <linux/errno.h> |
Eric Sandeen | 7c9f886 | 2008-04-22 16:38:23 -0500 | [diff] [blame] | 21 | #include <linux/magic.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 22 | #include <linux/sched.h> |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/mman.h> |
| 26 | #include <linux/tty.h> |
| 27 | #include <linux/smp.h> |
| 28 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm-generic/sections.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 31 | |
| 32 | #include <asm/tlbflush.h> |
| 33 | #include <asm/pgalloc.h> |
| 34 | #include <asm/segment.h> |
| 35 | #include <asm/system.h> |
| 36 | #include <asm/proto.h> |
Jaswinder Singh | 70ef564 | 2008-07-23 17:36:37 +0530 | [diff] [blame] | 37 | #include <asm/traps.h> |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 38 | #include <asm/desc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 40 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 41 | * Page fault error code bits: |
| 42 | * |
| 43 | * bit 0 == 0: no page found 1: protection fault |
| 44 | * bit 1 == 0: read access 1: write access |
| 45 | * bit 2 == 0: kernel-mode access 1: user-mode access |
| 46 | * bit 3 == 1: use of reserved bit detected |
| 47 | * bit 4 == 1: fault was an instruction fetch |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 48 | */ |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 49 | enum x86_pf_error_code { |
| 50 | |
| 51 | PF_PROT = 1 << 0, |
| 52 | PF_WRITE = 1 << 1, |
| 53 | PF_USER = 1 << 2, |
| 54 | PF_RSVD = 1 << 3, |
| 55 | PF_INSTR = 1 << 4, |
| 56 | }; |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 57 | |
Ingo Molnar | b814d41 | 2009-02-20 22:32:10 +0100 | [diff] [blame^] | 58 | /* |
| 59 | * (returns 0 if mmiotrace is disabled) |
| 60 | */ |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 61 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 62 | { |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 63 | if (unlikely(is_kmmio_active())) |
| 64 | if (kmmio_handler(regs, addr) == 1) |
| 65 | return -1; |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 66 | return 0; |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 67 | } |
| 68 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 69 | static inline int notify_page_fault(struct pt_regs *regs) |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 70 | { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 71 | #ifdef CONFIG_KPROBES |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 72 | int ret = 0; |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 73 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 74 | /* kprobe_running() needs smp_processor_id() */ |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 75 | if (!user_mode_vm(regs)) { |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 76 | preempt_disable(); |
| 77 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
| 78 | ret = 1; |
| 79 | preempt_enable(); |
| 80 | } |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 81 | |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 82 | return ret; |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 83 | #else |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 84 | return 0; |
Christoph Hellwig | 74a0b57 | 2007-10-16 01:24:07 -0700 | [diff] [blame] | 85 | #endif |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 86 | } |
Anil S Keshavamurthy | 1bd858a | 2006-06-26 00:25:25 -0700 | [diff] [blame] | 87 | |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 88 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 89 | * Prefetch quirks: |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 90 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 91 | * 32-bit mode: |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 92 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 93 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. |
| 94 | * Check that here and ignore it. |
| 95 | * |
| 96 | * 64-bit mode: |
| 97 | * |
| 98 | * Sometimes the CPU reports invalid exceptions on prefetch. |
| 99 | * Check that here and ignore it. |
| 100 | * |
| 101 | * Opcode checker based on code by Richard Brunner. |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 102 | */ |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 103 | static inline int |
| 104 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, |
| 105 | unsigned char opcode, int *prefetch) |
| 106 | { |
| 107 | unsigned char instr_hi = opcode & 0xf0; |
| 108 | unsigned char instr_lo = opcode & 0x0f; |
| 109 | |
| 110 | switch (instr_hi) { |
| 111 | case 0x20: |
| 112 | case 0x30: |
| 113 | /* |
| 114 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. |
| 115 | * In X86_64 long mode, the CPU will signal invalid |
| 116 | * opcode if some of these prefixes are present so |
| 117 | * X86_64 will never get here anyway |
| 118 | */ |
| 119 | return ((instr_lo & 7) == 0x6); |
| 120 | #ifdef CONFIG_X86_64 |
| 121 | case 0x40: |
| 122 | /* |
| 123 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes |
| 124 | * Need to figure out under what instruction mode the |
| 125 | * instruction was issued. Could check the LDT for lm, |
| 126 | * but for now it's good enough to assume that long |
| 127 | * mode only uses well known segments or kernel. |
| 128 | */ |
| 129 | return (!user_mode(regs)) || (regs->cs == __USER_CS); |
| 130 | #endif |
| 131 | case 0x60: |
| 132 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ |
| 133 | return (instr_lo & 0xC) == 0x4; |
| 134 | case 0xF0: |
| 135 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ |
| 136 | return !instr_lo || (instr_lo>>1) == 1; |
| 137 | case 0x00: |
| 138 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ |
| 139 | if (probe_kernel_address(instr, opcode)) |
| 140 | return 0; |
| 141 | |
| 142 | *prefetch = (instr_lo == 0xF) && |
| 143 | (opcode == 0x0D || opcode == 0x18); |
| 144 | return 0; |
| 145 | default: |
| 146 | return 0; |
| 147 | } |
| 148 | } |
| 149 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 150 | static int |
| 151 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 152 | { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 153 | unsigned char *max_instr; |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 154 | unsigned char *instr; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 155 | int prefetch = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Ingo Molnar | 3085354 | 2008-03-27 21:29:09 +0100 | [diff] [blame] | 157 | /* |
| 158 | * If it was a exec (instruction fetch) fault on NX page, then |
| 159 | * do not ignore the fault: |
| 160 | */ |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 161 | if (error_code & PF_INSTR) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 162 | return 0; |
Harvey Harrison | 1dc85be | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 163 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 164 | instr = (void *)convert_ip_to_linear(current, regs); |
Andi Kleen | f1290ec | 2005-04-16 15:24:59 -0700 | [diff] [blame] | 165 | max_instr = instr + 15; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
Vincent Hanquez | 76381fe | 2005-06-23 00:08:46 -0700 | [diff] [blame] | 167 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | return 0; |
| 169 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 170 | while (instr < max_instr) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 171 | unsigned char opcode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 173 | if (probe_kernel_address(instr, opcode)) |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 174 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | instr++; |
| 177 | |
Ingo Molnar | 107a036 | 2009-02-20 20:37:05 +0100 | [diff] [blame] | 178 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | } |
| 181 | return prefetch; |
| 182 | } |
| 183 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 184 | static void |
| 185 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
| 186 | struct task_struct *tsk) |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 187 | { |
| 188 | siginfo_t info; |
| 189 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 190 | info.si_signo = si_signo; |
| 191 | info.si_errno = 0; |
| 192 | info.si_code = si_code; |
| 193 | info.si_addr = (void __user *)address; |
| 194 | |
Harvey Harrison | c4aba4a | 2008-01-30 13:32:35 +0100 | [diff] [blame] | 195 | force_sig_info(si_signo, &info, tsk); |
| 196 | } |
| 197 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 198 | #ifdef CONFIG_X86_64 |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 199 | static int bad_address(void *p) |
| 200 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | unsigned long dummy; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 202 | |
Andi Kleen | ab2bf0c | 2006-12-07 02:14:06 +0100 | [diff] [blame] | 203 | return probe_kernel_address((unsigned long *)p, dummy); |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 204 | } |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 205 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Adrian Bunk | cae30f8 | 2008-02-13 23:31:31 +0200 | [diff] [blame] | 207 | static void dump_pagetable(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 209 | #ifdef CONFIG_X86_32 |
| 210 | __typeof__(pte_val(__pte(0))) page; |
| 211 | |
| 212 | page = read_cr3(); |
| 213 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 214 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 215 | #ifdef CONFIG_X86_PAE |
| 216 | printk("*pdpt = %016Lx ", page); |
| 217 | if ((page >> PAGE_SHIFT) < max_low_pfn |
| 218 | && page & _PAGE_PRESENT) { |
| 219 | page &= PAGE_MASK; |
| 220 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 221 | & (PTRS_PER_PMD - 1)]; |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 222 | printk(KERN_CONT "*pde = %016Lx ", page); |
| 223 | page &= ~_PAGE_NX; |
| 224 | } |
| 225 | #else |
| 226 | printk("*pde = %08lx ", page); |
| 227 | #endif |
| 228 | |
| 229 | /* |
| 230 | * We must not directly access the pte in the highpte |
| 231 | * case if the page table is located in highmem. |
| 232 | * And let's rather not kmap-atomic the pte, just in case |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 233 | * it's allocated already: |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 234 | */ |
| 235 | if ((page >> PAGE_SHIFT) < max_low_pfn |
| 236 | && (page & _PAGE_PRESENT) |
| 237 | && !(page & _PAGE_PSE)) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 238 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 239 | page &= PAGE_MASK; |
| 240 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 241 | & (PTRS_PER_PTE - 1)]; |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 242 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
| 243 | } |
| 244 | |
| 245 | printk("\n"); |
| 246 | #else /* CONFIG_X86_64 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | pgd_t *pgd; |
| 248 | pud_t *pud; |
| 249 | pmd_t *pmd; |
| 250 | pte_t *pte; |
| 251 | |
Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 252 | pgd = (pgd_t *)read_cr3(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 254 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 255 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | pgd += pgd_index(address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 257 | if (bad_address(pgd)) |
| 258 | goto bad; |
| 259 | |
Jan Beulich | d646bce | 2006-02-03 21:51:47 +0100 | [diff] [blame] | 260 | printk("PGD %lx ", pgd_val(*pgd)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 261 | |
| 262 | if (!pgd_present(*pgd)) |
| 263 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Andi Kleen | d2ae5b5 | 2006-06-26 13:57:56 +0200 | [diff] [blame] | 265 | pud = pud_offset(pgd, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 266 | if (bad_address(pud)) |
| 267 | goto bad; |
| 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | printk("PUD %lx ", pud_val(*pud)); |
Andi Kleen | b536022 | 2008-02-04 16:48:09 +0100 | [diff] [blame] | 270 | if (!pud_present(*pud) || pud_large(*pud)) |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 271 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | |
| 273 | pmd = pmd_offset(pud, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 274 | if (bad_address(pmd)) |
| 275 | goto bad; |
| 276 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | printk("PMD %lx ", pmd_val(*pmd)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 278 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
| 279 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
| 281 | pte = pte_offset_kernel(pmd, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 282 | if (bad_address(pte)) |
| 283 | goto bad; |
| 284 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 285 | printk("PTE %lx", pte_val(*pte)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 286 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | printk("\n"); |
| 288 | return; |
| 289 | bad: |
| 290 | printk("BAD\n"); |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 291 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | } |
| 293 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 294 | #ifdef CONFIG_X86_32 |
| 295 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
| 296 | { |
| 297 | unsigned index = pgd_index(address); |
| 298 | pgd_t *pgd_k; |
| 299 | pud_t *pud, *pud_k; |
| 300 | pmd_t *pmd, *pmd_k; |
| 301 | |
| 302 | pgd += index; |
| 303 | pgd_k = init_mm.pgd + index; |
| 304 | |
| 305 | if (!pgd_present(*pgd_k)) |
| 306 | return NULL; |
| 307 | |
| 308 | /* |
| 309 | * set_pgd(pgd, *pgd_k); here would be useless on PAE |
| 310 | * and redundant with the set_pmd() on non-PAE. As would |
| 311 | * set_pud. |
| 312 | */ |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 313 | pud = pud_offset(pgd, address); |
| 314 | pud_k = pud_offset(pgd_k, address); |
| 315 | if (!pud_present(*pud_k)) |
| 316 | return NULL; |
| 317 | |
| 318 | pmd = pmd_offset(pud, address); |
| 319 | pmd_k = pmd_offset(pud_k, address); |
| 320 | if (!pmd_present(*pmd_k)) |
| 321 | return NULL; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 322 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 323 | if (!pmd_present(*pmd)) { |
| 324 | set_pmd(pmd, *pmd_k); |
| 325 | arch_flush_lazy_mmu_mode(); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 326 | } else { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 327 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 328 | } |
| 329 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 330 | return pmd_k; |
| 331 | } |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 332 | |
Ingo Molnar | 8c938f9 | 2009-02-20 22:12:18 +0100 | [diff] [blame] | 333 | /* |
| 334 | * Did it hit the DOS screen memory VA from vm86 mode? |
| 335 | */ |
| 336 | static inline void |
| 337 | check_v8086_mode(struct pt_regs *regs, unsigned long address, |
| 338 | struct task_struct *tsk) |
| 339 | { |
| 340 | unsigned long bit; |
| 341 | |
| 342 | if (!v8086_mode(regs)) |
| 343 | return; |
| 344 | |
| 345 | bit = (address - 0xA0000) >> PAGE_SHIFT; |
| 346 | if (bit < 32) |
| 347 | tsk->thread.screen_bitmap |= 1 << bit; |
| 348 | } |
| 349 | |
| 350 | #else /* CONFIG_X86_64: */ |
| 351 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 352 | static const char errata93_warning[] = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" |
| 354 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" |
| 355 | KERN_ERR "******* Please consider a BIOS update.\n" |
| 356 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; |
Ingo Molnar | 8c938f9 | 2009-02-20 22:12:18 +0100 | [diff] [blame] | 357 | |
| 358 | /* |
| 359 | * No vm86 mode in 64-bit mode: |
| 360 | */ |
| 361 | static inline void |
| 362 | check_v8086_mode(struct pt_regs *regs, unsigned long address, |
| 363 | struct task_struct *tsk) |
| 364 | { |
| 365 | } |
| 366 | |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 367 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 369 | /* |
| 370 | * Workaround for K8 erratum #93 & buggy BIOS. |
| 371 | * |
| 372 | * BIOS SMM functions are required to use a specific workaround |
| 373 | * to avoid corruption of the 64bit RIP register on C stepping K8. |
| 374 | * |
| 375 | * A lot of BIOS that didn't get tested properly miss this. |
| 376 | * |
| 377 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. |
| 378 | * Try to work around it here. |
| 379 | * |
| 380 | * Note we only handle faults in kernel here. |
| 381 | * Does nothing on 32-bit. |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 382 | */ |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 383 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | { |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 385 | #ifdef CONFIG_X86_64 |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 386 | static int once; |
| 387 | |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 388 | if (address != regs->ip) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 390 | |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 391 | if ((address >> 32) != 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 393 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | address |= 0xffffffffUL << 32; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 395 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
| 396 | (address >= MODULES_VADDR && address <= MODULES_END)) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 397 | if (!once) { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 398 | printk(errata93_warning); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 399 | once = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | } |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 401 | regs->ip = address; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | return 1; |
| 403 | } |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 404 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | return 0; |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 406 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 408 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 409 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
| 410 | * to illegal addresses >4GB. |
| 411 | * |
| 412 | * We catch this in the page fault handler because these addresses |
| 413 | * are not reachable. Just detect this case and return. Any code |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 414 | * segment in LDT is compatibility mode. |
| 415 | */ |
| 416 | static int is_errata100(struct pt_regs *regs, unsigned long address) |
| 417 | { |
| 418 | #ifdef CONFIG_X86_64 |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 419 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
Harvey Harrison | 35f3266 | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 420 | return 1; |
| 421 | #endif |
| 422 | return 0; |
| 423 | } |
| 424 | |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 425 | static int is_f00f_bug(struct pt_regs *regs, unsigned long address) |
| 426 | { |
| 427 | #ifdef CONFIG_X86_F00F_BUG |
| 428 | unsigned long nr; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 429 | |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 430 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 431 | * Pentium F0 0F C7 C8 bug workaround: |
Harvey Harrison | 29caf2f | 2008-01-30 13:34:09 +0100 | [diff] [blame] | 432 | */ |
| 433 | if (boot_cpu_data.f00f_bug) { |
| 434 | nr = (address - idt_descr.address) >> 3; |
| 435 | |
| 436 | if (nr == 6) { |
| 437 | do_invalid_op(regs, 0); |
| 438 | return 1; |
| 439 | } |
| 440 | } |
| 441 | #endif |
| 442 | return 0; |
| 443 | } |
| 444 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 445 | static void |
| 446 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, |
| 447 | unsigned long address) |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 448 | { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 449 | #ifdef CONFIG_X86_32 |
| 450 | if (!oops_may_print()) |
| 451 | return; |
Harvey Harrison | fd40d6e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 452 | #endif |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 453 | |
| 454 | #ifdef CONFIG_X86_PAE |
| 455 | if (error_code & PF_INSTR) { |
Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 456 | unsigned int level; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 457 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 458 | pte_t *pte = lookup_address(address, &level); |
| 459 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 460 | if (pte && pte_present(*pte) && !pte_exec(*pte)) { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 461 | printk(KERN_CRIT "kernel tried to execute " |
| 462 | "NX-protected page - exploit attempt? " |
David Howells | 350b4da | 2008-11-14 10:38:40 +1100 | [diff] [blame] | 463 | "(uid: %d)\n", current_uid()); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 464 | } |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 465 | } |
| 466 | #endif |
Harvey Harrison | fd40d6e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 467 | |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 468 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
| 469 | if (address < PAGE_SIZE) |
| 470 | printk(KERN_CONT "NULL pointer dereference"); |
| 471 | else |
| 472 | printk(KERN_CONT "paging request"); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 473 | |
Vegard Nossum | f294a8c | 2008-07-01 15:38:13 +0200 | [diff] [blame] | 474 | printk(KERN_CONT " at %p\n", (void *) address); |
Harvey Harrison | 19f0dda | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 475 | printk(KERN_ALERT "IP:"); |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 476 | printk_address(regs->ip, 1); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 477 | |
Harvey Harrison | b3279c7 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 478 | dump_pagetable(address); |
| 479 | } |
| 480 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 481 | static noinline void |
| 482 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, |
| 483 | unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 485 | struct task_struct *tsk; |
| 486 | unsigned long flags; |
| 487 | int sig; |
| 488 | |
| 489 | flags = oops_begin(); |
| 490 | tsk = current; |
| 491 | sig = SIGKILL; |
Jan Beulich | 1209140 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 492 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 494 | tsk->comm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | dump_pagetable(address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 496 | |
| 497 | tsk->thread.cr2 = address; |
| 498 | tsk->thread.trap_no = 14; |
| 499 | tsk->thread.error_code = error_code; |
| 500 | |
Jan Beulich | 22f5991 | 2008-01-30 13:31:23 +0100 | [diff] [blame] | 501 | if (__die("Bad pagetable", regs, error_code)) |
Alexander van Heukelum | 874d93d | 2008-10-22 12:00:09 +0200 | [diff] [blame] | 502 | sig = 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 503 | |
Alexander van Heukelum | 874d93d | 2008-10-22 12:00:09 +0200 | [diff] [blame] | 504 | oops_end(flags, regs, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | } |
| 506 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 507 | static noinline void |
| 508 | no_context(struct pt_regs *regs, unsigned long error_code, |
| 509 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 510 | { |
| 511 | struct task_struct *tsk = current; |
Ingo Molnar | 1980307 | 2009-01-21 10:39:51 +0100 | [diff] [blame] | 512 | unsigned long *stackend; |
| 513 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 514 | #ifdef CONFIG_X86_64 |
| 515 | unsigned long flags; |
| 516 | int sig; |
| 517 | #endif |
| 518 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 519 | /* Are we prepared to handle this kernel fault? */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 520 | if (fixup_exception(regs)) |
| 521 | return; |
| 522 | |
| 523 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 524 | * 32-bit: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 525 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 526 | * Valid to do another page fault here, because if this fault |
| 527 | * had been triggered by is_prefetch fixup_exception would have |
| 528 | * handled it. |
| 529 | * |
| 530 | * 64-bit: |
| 531 | * |
| 532 | * Hall of shame of CPU/BIOS bugs. |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 533 | */ |
| 534 | if (is_prefetch(regs, error_code, address)) |
| 535 | return; |
| 536 | |
| 537 | if (is_errata93(regs, address)) |
| 538 | return; |
| 539 | |
| 540 | /* |
| 541 | * Oops. The kernel tried to access some bad page. We'll have to |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 542 | * terminate things with extreme prejudice: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 543 | */ |
| 544 | #ifdef CONFIG_X86_32 |
| 545 | bust_spinlocks(1); |
| 546 | #else |
| 547 | flags = oops_begin(); |
| 548 | #endif |
| 549 | |
| 550 | show_fault_oops(regs, error_code, address); |
| 551 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 552 | stackend = end_of_stack(tsk); |
Ingo Molnar | 1980307 | 2009-01-21 10:39:51 +0100 | [diff] [blame] | 553 | if (*stackend != STACK_END_MAGIC) |
| 554 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
| 555 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 556 | tsk->thread.cr2 = address; |
| 557 | tsk->thread.trap_no = 14; |
| 558 | tsk->thread.error_code = error_code; |
| 559 | |
| 560 | #ifdef CONFIG_X86_32 |
| 561 | die("Oops", regs, error_code); |
| 562 | bust_spinlocks(0); |
| 563 | do_exit(SIGKILL); |
| 564 | #else |
| 565 | sig = SIGKILL; |
| 566 | if (__die("Oops", regs, error_code)) |
| 567 | sig = 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 568 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 569 | /* Executive summary in case the body of the oops scrolled away */ |
| 570 | printk(KERN_EMERG "CR2: %016lx\n", address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 571 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 572 | oops_end(flags, regs, sig); |
| 573 | #endif |
| 574 | } |
| 575 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 576 | /* |
| 577 | * Print out info about fatal segfaults, if the show_unhandled_signals |
| 578 | * sysctl is set: |
| 579 | */ |
| 580 | static inline void |
| 581 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, |
| 582 | unsigned long address, struct task_struct *tsk) |
| 583 | { |
| 584 | if (!unhandled_signal(tsk, SIGSEGV)) |
| 585 | return; |
| 586 | |
| 587 | if (!printk_ratelimit()) |
| 588 | return; |
| 589 | |
| 590 | printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", |
| 591 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, |
| 592 | tsk->comm, task_pid_nr(tsk), address, |
| 593 | (void *)regs->ip, (void *)regs->sp, error_code); |
| 594 | |
| 595 | print_vma_addr(KERN_CONT " in ", regs->ip); |
| 596 | |
| 597 | printk(KERN_CONT "\n"); |
| 598 | } |
| 599 | |
| 600 | static void |
| 601 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 602 | unsigned long address, int si_code) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 603 | { |
| 604 | struct task_struct *tsk = current; |
| 605 | |
| 606 | /* User mode accesses just cause a SIGSEGV */ |
| 607 | if (error_code & PF_USER) { |
| 608 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 609 | * It's possible to have interrupts off here: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 610 | */ |
| 611 | local_irq_enable(); |
| 612 | |
| 613 | /* |
| 614 | * Valid to do another page fault here because this one came |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 615 | * from user space: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 616 | */ |
| 617 | if (is_prefetch(regs, error_code, address)) |
| 618 | return; |
| 619 | |
| 620 | if (is_errata100(regs, address)) |
| 621 | return; |
| 622 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 623 | if (unlikely(show_unhandled_signals)) |
| 624 | show_signal_msg(regs, error_code, address, tsk); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 625 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 626 | /* Kernel addresses are always protection faults: */ |
| 627 | tsk->thread.cr2 = address; |
| 628 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
| 629 | tsk->thread.trap_no = 14; |
| 630 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 631 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 632 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 633 | return; |
| 634 | } |
| 635 | |
| 636 | if (is_f00f_bug(regs, address)) |
| 637 | return; |
| 638 | |
| 639 | no_context(regs, error_code, address); |
| 640 | } |
| 641 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 642 | static noinline void |
| 643 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 644 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 645 | { |
| 646 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
| 647 | } |
| 648 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 649 | static void |
| 650 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
| 651 | unsigned long address, int si_code) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 652 | { |
| 653 | struct mm_struct *mm = current->mm; |
| 654 | |
| 655 | /* |
| 656 | * Something tried to access memory that isn't in our memory map.. |
| 657 | * Fix it, but check if it's kernel or user first.. |
| 658 | */ |
| 659 | up_read(&mm->mmap_sem); |
| 660 | |
| 661 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
| 662 | } |
| 663 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 664 | static noinline void |
| 665 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 666 | { |
| 667 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
| 668 | } |
| 669 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 670 | static noinline void |
| 671 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
| 672 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 673 | { |
| 674 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
| 675 | } |
| 676 | |
| 677 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 678 | static void |
| 679 | out_of_memory(struct pt_regs *regs, unsigned long error_code, |
| 680 | unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 681 | { |
| 682 | /* |
| 683 | * We ran out of memory, call the OOM killer, and return the userspace |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 684 | * (which will retry the fault, or kill us if we got oom-killed): |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 685 | */ |
| 686 | up_read(¤t->mm->mmap_sem); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 687 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 688 | pagefault_out_of_memory(); |
| 689 | } |
| 690 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 691 | static void |
| 692 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 693 | { |
| 694 | struct task_struct *tsk = current; |
| 695 | struct mm_struct *mm = tsk->mm; |
| 696 | |
| 697 | up_read(&mm->mmap_sem); |
| 698 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 699 | /* Kernel mode? Handle exceptions or die: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 700 | if (!(error_code & PF_USER)) |
| 701 | no_context(regs, error_code, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 702 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 703 | #ifdef CONFIG_X86_32 |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 704 | /* User space => ok to do another page fault: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 705 | if (is_prefetch(regs, error_code, address)) |
| 706 | return; |
| 707 | #endif |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 708 | |
| 709 | tsk->thread.cr2 = address; |
| 710 | tsk->thread.error_code = error_code; |
| 711 | tsk->thread.trap_no = 14; |
| 712 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 713 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
| 714 | } |
| 715 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 716 | static noinline void |
| 717 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
| 718 | unsigned long address, unsigned int fault) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 719 | { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 720 | if (fault & VM_FAULT_OOM) { |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 721 | out_of_memory(regs, error_code, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 722 | } else { |
| 723 | if (fault & VM_FAULT_SIGBUS) |
| 724 | do_sigbus(regs, error_code, address); |
| 725 | else |
| 726 | BUG(); |
| 727 | } |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 728 | } |
| 729 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 730 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
| 731 | { |
| 732 | if ((error_code & PF_WRITE) && !pte_write(*pte)) |
| 733 | return 0; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 734 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 735 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
| 736 | return 0; |
| 737 | |
| 738 | return 1; |
| 739 | } |
| 740 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 742 | * Handle a spurious fault caused by a stale TLB entry. |
| 743 | * |
| 744 | * This allows us to lazily refresh the TLB when increasing the |
| 745 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it |
| 746 | * eagerly is very expensive since that implies doing a full |
| 747 | * cross-processor TLB flush, even if no stale TLB entries exist |
| 748 | * on other processors. |
| 749 | * |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 750 | * There are no security implications to leaving a stale TLB when |
| 751 | * increasing the permissions on a page. |
| 752 | */ |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 753 | static noinline int |
| 754 | spurious_fault(unsigned long error_code, unsigned long address) |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 755 | { |
| 756 | pgd_t *pgd; |
| 757 | pud_t *pud; |
| 758 | pmd_t *pmd; |
| 759 | pte_t *pte; |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 760 | int ret; |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 761 | |
| 762 | /* Reserved-bit violation or user access to kernel space? */ |
| 763 | if (error_code & (PF_USER | PF_RSVD)) |
| 764 | return 0; |
| 765 | |
| 766 | pgd = init_mm.pgd + pgd_index(address); |
| 767 | if (!pgd_present(*pgd)) |
| 768 | return 0; |
| 769 | |
| 770 | pud = pud_offset(pgd, address); |
| 771 | if (!pud_present(*pud)) |
| 772 | return 0; |
| 773 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 774 | if (pud_large(*pud)) |
| 775 | return spurious_fault_check(error_code, (pte_t *) pud); |
| 776 | |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 777 | pmd = pmd_offset(pud, address); |
| 778 | if (!pmd_present(*pmd)) |
| 779 | return 0; |
| 780 | |
Thomas Gleixner | d8b57bb | 2008-02-06 22:39:43 +0100 | [diff] [blame] | 781 | if (pmd_large(*pmd)) |
| 782 | return spurious_fault_check(error_code, (pte_t *) pmd); |
| 783 | |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 784 | pte = pte_offset_kernel(pmd, address); |
| 785 | if (!pte_present(*pte)) |
| 786 | return 0; |
| 787 | |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 788 | ret = spurious_fault_check(error_code, pte); |
| 789 | if (!ret) |
| 790 | return 0; |
| 791 | |
| 792 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 793 | * Make sure we have permissions in PMD. |
| 794 | * If not, then there's a bug in the page tables: |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 795 | */ |
| 796 | ret = spurious_fault_check(error_code, (pte_t *) pmd); |
| 797 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 798 | |
Steven Rostedt | 3c3e569 | 2009-02-19 11:46:36 -0500 | [diff] [blame] | 799 | return ret; |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 800 | } |
| 801 | |
| 802 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 803 | * 32-bit: |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 804 | * |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 805 | * Handle a fault on the vmalloc or module mapping area |
| 806 | * |
| 807 | * 64-bit: |
| 808 | * |
| 809 | * Handle a fault on the vmalloc area |
Andi Kleen | 3b9ba4d | 2005-05-16 21:53:31 -0700 | [diff] [blame] | 810 | * |
| 811 | * This assumes no large pages in there. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 812 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 813 | static noinline int vmalloc_fault(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 814 | { |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 815 | #ifdef CONFIG_X86_32 |
| 816 | unsigned long pgd_paddr; |
| 817 | pmd_t *pmd_k; |
| 818 | pte_t *pte_k; |
Henry Nestler | b29c701 | 2008-05-12 15:44:39 +0200 | [diff] [blame] | 819 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 820 | /* Make sure we are in vmalloc area: */ |
Henry Nestler | b29c701 | 2008-05-12 15:44:39 +0200 | [diff] [blame] | 821 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
| 822 | return -1; |
| 823 | |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 824 | /* |
| 825 | * Synchronize this task's top level page-table |
| 826 | * with the 'reference' page table. |
| 827 | * |
| 828 | * Do _not_ use "current" here. We might be inside |
| 829 | * an interrupt in the middle of a task switch.. |
| 830 | */ |
| 831 | pgd_paddr = read_cr3(); |
| 832 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); |
| 833 | if (!pmd_k) |
| 834 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 835 | |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 836 | pte_k = pte_offset_kernel(pmd_k, address); |
| 837 | if (!pte_present(*pte_k)) |
| 838 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 839 | |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 840 | return 0; |
| 841 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | pgd_t *pgd, *pgd_ref; |
| 843 | pud_t *pud, *pud_ref; |
| 844 | pmd_t *pmd, *pmd_ref; |
| 845 | pte_t *pte, *pte_ref; |
| 846 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 847 | /* Make sure we are in vmalloc area: */ |
Harvey Harrison | cf89ec9 | 2008-02-04 16:47:56 +0100 | [diff] [blame] | 848 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) |
| 849 | return -1; |
| 850 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 851 | /* |
| 852 | * Copy kernel mappings over when needed. This can also |
| 853 | * happen within a race in page table update. In the later |
| 854 | * case just flush: |
| 855 | */ |
Andi Kleen | f313e12 | 2009-01-09 12:17:43 -0800 | [diff] [blame] | 856 | pgd = pgd_offset(current->active_mm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | pgd_ref = pgd_offset_k(address); |
| 858 | if (pgd_none(*pgd_ref)) |
| 859 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 860 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | if (pgd_none(*pgd)) |
| 862 | set_pgd(pgd, *pgd_ref); |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 863 | else |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 864 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 866 | /* |
| 867 | * Below here mismatches are bugs because these lower tables |
| 868 | * are shared: |
| 869 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | |
| 871 | pud = pud_offset(pgd, address); |
| 872 | pud_ref = pud_offset(pgd_ref, address); |
| 873 | if (pud_none(*pud_ref)) |
| 874 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 875 | |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 876 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | BUG(); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 878 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | pmd = pmd_offset(pud, address); |
| 880 | pmd_ref = pmd_offset(pud_ref, address); |
| 881 | if (pmd_none(*pmd_ref)) |
| 882 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 883 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) |
| 885 | BUG(); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 886 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | pte_ref = pte_offset_kernel(pmd_ref, address); |
| 888 | if (!pte_present(*pte_ref)) |
| 889 | return -1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 890 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | pte = pte_offset_kernel(pmd, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 892 | |
| 893 | /* |
| 894 | * Don't use pte_page here, because the mappings can point |
| 895 | * outside mem_map, and the NUMA hash lookup cannot handle |
| 896 | * that: |
| 897 | */ |
Andi Kleen | 3b9ba4d | 2005-05-16 21:53:31 -0700 | [diff] [blame] | 898 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 | BUG(); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 900 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 901 | return 0; |
Harvey Harrison | fdfe8aa | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 902 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 903 | } |
| 904 | |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 905 | int show_unhandled_signals = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 907 | static inline int |
| 908 | access_error(unsigned long error_code, int write, struct vm_area_struct *vma) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 909 | { |
| 910 | if (write) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 911 | /* write, present and write, not present: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 912 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
| 913 | return 1; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 914 | return 0; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 915 | } |
| 916 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 917 | /* read, present: */ |
| 918 | if (unlikely(error_code & PF_PROT)) |
| 919 | return 1; |
| 920 | |
| 921 | /* read, not present: */ |
| 922 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) |
| 923 | return 1; |
| 924 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 925 | return 0; |
| 926 | } |
| 927 | |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 928 | static int fault_in_kernel_space(unsigned long address) |
| 929 | { |
| 930 | #ifdef CONFIG_X86_32 |
| 931 | return address >= TASK_SIZE; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 932 | #else |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 933 | return address >= TASK_SIZE64; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 934 | #endif |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 935 | } |
| 936 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | /* |
| 938 | * This routine handles page faults. It determines the address, |
| 939 | * and the problem, and then passes it off to one of the appropriate |
| 940 | * routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 941 | */ |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 942 | #ifdef CONFIG_X86_64 |
| 943 | asmlinkage |
| 944 | #endif |
| 945 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | { |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 947 | struct vm_area_struct *vma; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 948 | struct task_struct *tsk; |
| 949 | unsigned long address; |
| 950 | struct mm_struct *mm; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 951 | int write; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 952 | int fault; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | |
Arjan van de Ven | a9ba9a3 | 2006-03-25 16:30:10 +0100 | [diff] [blame] | 954 | tsk = current; |
| 955 | mm = tsk->mm; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 956 | |
Arjan van de Ven | a9ba9a3 | 2006-03-25 16:30:10 +0100 | [diff] [blame] | 957 | prefetchw(&mm->mmap_sem); |
| 958 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 959 | /* Get the faulting address: */ |
Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 960 | address = read_cr2(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | |
Pekka Paalanen | 0fd0e3d | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 962 | if (unlikely(kmmio_fault(regs, address))) |
Pekka Paalanen | 8606978 | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 963 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | |
| 965 | /* |
| 966 | * We fault-in kernel-space virtual memory on-demand. The |
| 967 | * 'reference' page table is init_mm.pgd. |
| 968 | * |
| 969 | * NOTE! We MUST NOT take any locks for this case. We may |
| 970 | * be in an interrupt or a critical region, and should |
| 971 | * only copy the information from the master page table, |
| 972 | * nothing more. |
| 973 | * |
| 974 | * This verifies that the fault happens in kernel space |
| 975 | * (error_code & 4) == 0, and that the fault was not a |
Jan Beulich | 8b1bde9 | 2006-01-11 22:42:23 +0100 | [diff] [blame] | 976 | * protection error (error_code & 9) == 0. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | */ |
Hiroshi Shimamoto | 0973a06 | 2009-02-04 15:24:09 -0800 | [diff] [blame] | 978 | if (unlikely(fault_in_kernel_space(address))) { |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 979 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
| 980 | vmalloc_fault(address) >= 0) |
| 981 | return; |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 982 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 983 | /* Can handle a stale RO->RW TLB: */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 984 | if (spurious_fault(error_code, address)) |
Jeremy Fitzhardinge | 5b727a3 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 985 | return; |
| 986 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 987 | /* kprobes don't want to hook the spurious faults: */ |
Masami Hiramatsu | 9be260a | 2009-02-05 17:12:39 -0500 | [diff] [blame] | 988 | if (notify_page_fault(regs)) |
| 989 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 990 | /* |
| 991 | * Don't take the mm semaphore here. If we fixup a prefetch |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 992 | * fault we could otherwise deadlock: |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 993 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 994 | bad_area_nosemaphore(regs, error_code, address); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 995 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 996 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 997 | } |
| 998 | |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 999 | /* kprobes don't want to hook the spurious faults: */ |
Ingo Molnar | f8a6b2b | 2009-02-13 09:44:22 +0100 | [diff] [blame] | 1000 | if (unlikely(notify_page_fault(regs))) |
Masami Hiramatsu | 9be260a | 2009-02-05 17:12:39 -0500 | [diff] [blame] | 1001 | return; |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1002 | /* |
Linus Torvalds | 891cffb | 2008-10-12 13:16:12 -0700 | [diff] [blame] | 1003 | * It's safe to allow irq's after cr2 has been saved and the |
| 1004 | * vmalloc fault has been handled. |
| 1005 | * |
| 1006 | * User-mode registers count as a user access even for any |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1007 | * potential system fault or CPU buglet: |
Harvey Harrison | f8c2ee2 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1008 | */ |
Linus Torvalds | 891cffb | 2008-10-12 13:16:12 -0700 | [diff] [blame] | 1009 | if (user_mode_vm(regs)) { |
| 1010 | local_irq_enable(); |
| 1011 | error_code |= PF_USER; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1012 | } else { |
| 1013 | if (regs->flags & X86_EFLAGS_IF) |
| 1014 | local_irq_enable(); |
| 1015 | } |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1016 | |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 1017 | if (unlikely(error_code & PF_RSVD)) |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1018 | pgtable_bad(regs, error_code, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | |
| 1020 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1021 | * If we're in an interrupt, have no user context or are running |
| 1022 | * in an atomic region then we must not take the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1023 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1024 | if (unlikely(in_atomic() || !mm)) { |
| 1025 | bad_area_nosemaphore(regs, error_code, address); |
| 1026 | return; |
| 1027 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1028 | |
Ingo Molnar | 3a1dfe6 | 2008-10-13 17:49:02 +0200 | [diff] [blame] | 1029 | /* |
| 1030 | * When running in the kernel we expect faults to occur only to |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1031 | * addresses in user space. All other faults represent errors in |
| 1032 | * the kernel and should generate an OOPS. Unfortunately, in the |
| 1033 | * case of an erroneous fault occurring in a code path which already |
| 1034 | * holds mmap_sem we will deadlock attempting to validate the fault |
| 1035 | * against the address space. Luckily the kernel only validly |
| 1036 | * references user space from well defined areas of code, which are |
| 1037 | * listed in the exceptions table. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1038 | * |
| 1039 | * As the vast majority of faults will be valid we will only perform |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1040 | * the source reference check when there is a possibility of a |
| 1041 | * deadlock. Attempt to lock the address space, if we cannot we then |
| 1042 | * validate the source. If this is invalid we can skip the address |
| 1043 | * space check, thus avoiding the deadlock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1044 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1045 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
Andi Kleen | 66c5815 | 2006-01-11 22:44:09 +0100 | [diff] [blame] | 1046 | if ((error_code & PF_USER) == 0 && |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1047 | !search_exception_tables(regs->ip)) { |
| 1048 | bad_area_nosemaphore(regs, error_code, address); |
| 1049 | return; |
| 1050 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1051 | down_read(&mm->mmap_sem); |
Peter Zijlstra | 0100607 | 2009-01-29 16:02:12 +0100 | [diff] [blame] | 1052 | } else { |
| 1053 | /* |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1054 | * The above down_read_trylock() might have succeeded in |
| 1055 | * which case we'll have missed the might_sleep() from |
| 1056 | * down_read(): |
Peter Zijlstra | 0100607 | 2009-01-29 16:02:12 +0100 | [diff] [blame] | 1057 | */ |
| 1058 | might_sleep(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | vma = find_vma(mm, address); |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1062 | if (unlikely(!vma)) { |
| 1063 | bad_area(regs, error_code, address); |
| 1064 | return; |
| 1065 | } |
| 1066 | if (likely(vma->vm_start <= address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1067 | goto good_area; |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1068 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
| 1069 | bad_area(regs, error_code, address); |
| 1070 | return; |
| 1071 | } |
Harvey Harrison | 33cb524 | 2008-01-30 13:32:19 +0100 | [diff] [blame] | 1072 | if (error_code & PF_USER) { |
Harvey Harrison | 6f4d368 | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 1073 | /* |
| 1074 | * Accessing the stack below %sp is always a bug. |
| 1075 | * The large cushion allows instructions like enter |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1076 | * and pusha to work. ("enter $65535, $31" pushes |
Harvey Harrison | 6f4d368 | 2008-01-30 13:33:13 +0100 | [diff] [blame] | 1077 | * 32 pointers and then decrements %sp by 65535.) |
Chuck Ebbert | 03fdc2c | 2006-06-26 13:59:50 +0200 | [diff] [blame] | 1078 | */ |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1079 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
| 1080 | bad_area(regs, error_code, address); |
| 1081 | return; |
| 1082 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | } |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1084 | if (unlikely(expand_stack(vma, address))) { |
| 1085 | bad_area(regs, error_code, address); |
| 1086 | return; |
| 1087 | } |
| 1088 | |
| 1089 | /* |
| 1090 | * Ok, we have a good vm_area for this memory access, so |
| 1091 | * we can handle it.. |
| 1092 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1093 | good_area: |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1094 | write = error_code & PF_WRITE; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1095 | |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1096 | if (unlikely(access_error(error_code, write, vma))) { |
| 1097 | bad_area_access_error(regs, error_code, address); |
| 1098 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | } |
| 1100 | |
| 1101 | /* |
| 1102 | * If for any reason at all we couldn't handle the fault, |
| 1103 | * make sure we exit gracefully rather than endlessly redo |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1104 | * the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | */ |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1106 | fault = handle_mm_fault(mm, vma, address, write); |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1107 | |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1108 | if (unlikely(fault & VM_FAULT_ERROR)) { |
Nick Piggin | 92181f1 | 2009-01-20 04:24:26 +0100 | [diff] [blame] | 1109 | mm_fault_error(regs, error_code, address, fault); |
| 1110 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | } |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1112 | |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 1113 | if (fault & VM_FAULT_MAJOR) |
| 1114 | tsk->maj_flt++; |
| 1115 | else |
| 1116 | tsk->min_flt++; |
Harvey Harrison | d729ab3 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 1117 | |
Ingo Molnar | 8c938f9 | 2009-02-20 22:12:18 +0100 | [diff] [blame] | 1118 | check_v8086_mode(regs, address, tsk); |
| 1119 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1120 | up_read(&mm->mmap_sem); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1121 | } |
Andi Kleen | 9e43e1b | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 1122 | |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1123 | DEFINE_SPINLOCK(pgd_lock); |
Christoph Lameter | 2bff738 | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 1124 | LIST_HEAD(pgd_list); |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1125 | |
| 1126 | void vmalloc_sync_all(void) |
| 1127 | { |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1128 | unsigned long address; |
| 1129 | |
Jan Beulich | cc643d4 | 2008-08-29 12:53:45 +0100 | [diff] [blame] | 1130 | #ifdef CONFIG_X86_32 |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1131 | if (SHARED_KERNEL_PMD) |
| 1132 | return; |
| 1133 | |
Jan Beulich | cc643d4 | 2008-08-29 12:53:45 +0100 | [diff] [blame] | 1134 | for (address = VMALLOC_START & PMD_MASK; |
| 1135 | address >= TASK_SIZE && address < FIXADDR_TOP; |
| 1136 | address += PMD_SIZE) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1137 | |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1138 | unsigned long flags; |
| 1139 | struct page *page; |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1140 | |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1141 | spin_lock_irqsave(&pgd_lock, flags); |
| 1142 | list_for_each_entry(page, &pgd_list, lru) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1143 | if (!vmalloc_sync_one(page_address(page), address)) |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1144 | break; |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1145 | } |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1146 | spin_unlock_irqrestore(&pgd_lock, flags); |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1147 | } |
| 1148 | #else /* CONFIG_X86_64 */ |
Jan Beulich | cc643d4 | 2008-08-29 12:53:45 +0100 | [diff] [blame] | 1149 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; |
| 1150 | address += PGDIR_SIZE) { |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1151 | |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1152 | const pgd_t *pgd_ref = pgd_offset_k(address); |
| 1153 | unsigned long flags; |
| 1154 | struct page *page; |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1155 | |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1156 | if (pgd_none(*pgd_ref)) |
| 1157 | continue; |
Ingo Molnar | 2d4a716 | 2009-02-20 19:56:40 +0100 | [diff] [blame] | 1158 | |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1159 | spin_lock_irqsave(&pgd_lock, flags); |
| 1160 | list_for_each_entry(page, &pgd_list, lru) { |
| 1161 | pgd_t *pgd; |
| 1162 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
| 1163 | if (pgd_none(*pgd)) |
| 1164 | set_pgd(pgd, *pgd_ref); |
| 1165 | else |
| 1166 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1167 | } |
Jeremy Fitzhardinge | 67350a5 | 2008-06-25 00:19:11 -0400 | [diff] [blame] | 1168 | spin_unlock_irqrestore(&pgd_lock, flags); |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1169 | } |
Harvey Harrison | 1156e09 | 2008-01-30 13:34:10 +0100 | [diff] [blame] | 1170 | #endif |
Jan Beulich | 8c914cb | 2006-03-25 16:29:40 +0100 | [diff] [blame] | 1171 | } |