blob: fd07b86c062d172522a245439e9d0b42ce078dab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
6 */
7
8#include <linux/config.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/tty.h>
23#include <linux/vt_kern.h> /* For unblank_screen() */
24#include <linux/compiler.h>
25#include <linux/module.h>
Prasanna S Panchamukhi0f2fbdc2005-09-06 15:19:28 -070026#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/pgalloc.h>
31#include <asm/smp.h>
32#include <asm/tlbflush.h>
33#include <asm/proto.h>
34#include <asm/kdebug.h>
35#include <asm-generic/sections.h>
36#include <asm/kdebug.h>
37
38void bust_spinlocks(int yes)
39{
40 int loglevel_save = console_loglevel;
41 if (yes) {
42 oops_in_progress = 1;
43 } else {
44#ifdef CONFIG_VT
45 unblank_screen();
46#endif
47 oops_in_progress = 0;
48 /*
49 * OK, the message is on the console. Now we call printk()
50 * without oops_in_progress set so that printk will give klogd
51 * a poke. Hold onto your hats...
52 */
53 console_loglevel = 15; /* NMI oopser may have shut the console up */
54 printk(" ");
55 console_loglevel = loglevel_save;
56 }
57}
58
59/* Sometimes the CPU reports invalid exceptions on prefetch.
60 Check that here and ignore.
61 Opcode checker based on code by Richard Brunner */
62static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
63 unsigned long error_code)
64{
Andi Kleenf1290ec2005-04-16 15:24:59 -070065 unsigned char *instr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 int scan_more = 1;
67 int prefetch = 0;
Andi Kleenf1290ec2005-04-16 15:24:59 -070068 unsigned char *max_instr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70 /* If it was a exec fault ignore */
71 if (error_code & (1<<4))
72 return 0;
73
Andi Kleenf1290ec2005-04-16 15:24:59 -070074 instr = (unsigned char *)convert_rip_to_linear(current, regs);
75 max_instr = instr + 15;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Vincent Hanquez76381fe2005-06-23 00:08:46 -070077 if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return 0;
79
80 while (scan_more && instr < max_instr) {
81 unsigned char opcode;
82 unsigned char instr_hi;
83 unsigned char instr_lo;
84
85 if (__get_user(opcode, instr))
86 break;
87
88 instr_hi = opcode & 0xf0;
89 instr_lo = opcode & 0x0f;
90 instr++;
91
92 switch (instr_hi) {
93 case 0x20:
94 case 0x30:
95 /* Values 0x26,0x2E,0x36,0x3E are valid x86
96 prefixes. In long mode, the CPU will signal
97 invalid opcode if some of these prefixes are
98 present so we will never get here anyway */
99 scan_more = ((instr_lo & 7) == 0x6);
100 break;
101
102 case 0x40:
103 /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
104 Need to figure out under what instruction mode the
105 instruction was issued ... */
106 /* Could check the LDT for lm, but for now it's good
107 enough to assume that long mode only uses well known
108 segments or kernel. */
Vincent Hanquez76381fe2005-06-23 00:08:46 -0700109 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 break;
111
112 case 0x60:
113 /* 0x64 thru 0x67 are valid prefixes in all modes. */
114 scan_more = (instr_lo & 0xC) == 0x4;
115 break;
116 case 0xF0:
117 /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
118 scan_more = !instr_lo || (instr_lo>>1) == 1;
119 break;
120 case 0x00:
121 /* Prefetch instruction is 0x0F0D or 0x0F18 */
122 scan_more = 0;
123 if (__get_user(opcode, instr))
124 break;
125 prefetch = (instr_lo == 0xF) &&
126 (opcode == 0x0D || opcode == 0x18);
127 break;
128 default:
129 scan_more = 0;
130 break;
131 }
132 }
133 return prefetch;
134}
135
136static int bad_address(void *p)
137{
138 unsigned long dummy;
139 return __get_user(dummy, (unsigned long *)p);
140}
141
142void dump_pagetable(unsigned long address)
143{
144 pgd_t *pgd;
145 pud_t *pud;
146 pmd_t *pmd;
147 pte_t *pte;
148
149 asm("movq %%cr3,%0" : "=r" (pgd));
150
151 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
152 pgd += pgd_index(address);
153 printk("PGD %lx ", pgd_val(*pgd));
154 if (bad_address(pgd)) goto bad;
155 if (!pgd_present(*pgd)) goto ret;
156
157 pud = __pud_offset_k((pud_t *)pgd_page(*pgd), address);
158 if (bad_address(pud)) goto bad;
159 printk("PUD %lx ", pud_val(*pud));
160 if (!pud_present(*pud)) goto ret;
161
162 pmd = pmd_offset(pud, address);
163 if (bad_address(pmd)) goto bad;
164 printk("PMD %lx ", pmd_val(*pmd));
165 if (!pmd_present(*pmd)) goto ret;
166
167 pte = pte_offset_kernel(pmd, address);
168 if (bad_address(pte)) goto bad;
169 printk("PTE %lx", pte_val(*pte));
170ret:
171 printk("\n");
172 return;
173bad:
174 printk("BAD\n");
175}
176
177static const char errata93_warning[] =
178KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
179KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
180KERN_ERR "******* Please consider a BIOS update.\n"
181KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
182
183/* Workaround for K8 erratum #93 & buggy BIOS.
184 BIOS SMM functions are required to use a specific workaround
185 to avoid corruption of the 64bit RIP register on C stepping K8.
186 A lot of BIOS that didn't get tested properly miss this.
187 The OS sees this as a page fault with the upper 32bits of RIP cleared.
188 Try to work around it here.
189 Note we only handle faults in kernel here. */
190
191static int is_errata93(struct pt_regs *regs, unsigned long address)
192{
193 static int warned;
194 if (address != regs->rip)
195 return 0;
196 if ((address >> 32) != 0)
197 return 0;
198 address |= 0xffffffffUL << 32;
199 if ((address >= (u64)_stext && address <= (u64)_etext) ||
200 (address >= MODULES_VADDR && address <= MODULES_END)) {
201 if (!warned) {
202 printk(errata93_warning);
203 warned = 1;
204 }
205 regs->rip = address;
206 return 1;
207 }
208 return 0;
209}
210
211int unhandled_signal(struct task_struct *tsk, int sig)
212{
213 if (tsk->pid == 1)
214 return 1;
Andi Kleen5e5ec102005-08-19 06:56:04 +0200215 if (tsk->ptrace & PT_PTRACED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 return 0;
217 return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
218 (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
219}
220
221static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
222 unsigned long error_code)
223{
Jan Beulich12091402005-09-12 18:49:24 +0200224 unsigned long flags = oops_begin();
Jan Beulich6e3f3612006-01-11 22:42:14 +0100225 struct task_struct *tsk;
Jan Beulich12091402005-09-12 18:49:24 +0200226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
228 current->comm, address);
229 dump_pagetable(address);
Jan Beulich6e3f3612006-01-11 22:42:14 +0100230 tsk = current;
231 tsk->thread.cr2 = address;
232 tsk->thread.trap_no = 14;
233 tsk->thread.error_code = error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 __die("Bad pagetable", regs, error_code);
Jan Beulich12091402005-09-12 18:49:24 +0200235 oops_end(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 do_exit(SIGKILL);
237}
238
239/*
240 * Handle a fault on the vmalloc or module mapping area
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700241 *
242 * This assumes no large pages in there.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 */
244static int vmalloc_fault(unsigned long address)
245{
246 pgd_t *pgd, *pgd_ref;
247 pud_t *pud, *pud_ref;
248 pmd_t *pmd, *pmd_ref;
249 pte_t *pte, *pte_ref;
250
251 /* Copy kernel mappings over when needed. This can also
252 happen within a race in page table update. In the later
253 case just flush. */
254
255 pgd = pgd_offset(current->mm ?: &init_mm, address);
256 pgd_ref = pgd_offset_k(address);
257 if (pgd_none(*pgd_ref))
258 return -1;
259 if (pgd_none(*pgd))
260 set_pgd(pgd, *pgd_ref);
261
262 /* Below here mismatches are bugs because these lower tables
263 are shared */
264
265 pud = pud_offset(pgd, address);
266 pud_ref = pud_offset(pgd_ref, address);
267 if (pud_none(*pud_ref))
268 return -1;
269 if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
270 BUG();
271 pmd = pmd_offset(pud, address);
272 pmd_ref = pmd_offset(pud_ref, address);
273 if (pmd_none(*pmd_ref))
274 return -1;
275 if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
276 BUG();
277 pte_ref = pte_offset_kernel(pmd_ref, address);
278 if (!pte_present(*pte_ref))
279 return -1;
280 pte = pte_offset_kernel(pmd, address);
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700281 /* Don't use pte_page here, because the mappings can point
282 outside mem_map, and the NUMA hash lookup cannot handle
283 that. */
284 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 BUG();
286 __flush_tlb_all();
287 return 0;
288}
289
290int page_fault_trace = 0;
291int exception_trace = 1;
292
293/*
294 * This routine handles page faults. It determines the address,
295 * and the problem, and then passes it off to one of the appropriate
296 * routines.
297 *
298 * error_code:
299 * bit 0 == 0 means no page found, 1 means protection fault
300 * bit 1 == 0 means read, 1 means write
301 * bit 2 == 0 means kernel, 1 means user-mode
Jan Beulich8b1bde92006-01-11 22:42:23 +0100302 * bit 3 == 1 means use of reserved bit detected
303 * bit 4 == 1 means fault was an instruction fetch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 */
Prasanna S Panchamukhi0f2fbdc2005-09-06 15:19:28 -0700305asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
306 unsigned long error_code)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307{
308 struct task_struct *tsk;
309 struct mm_struct *mm;
310 struct vm_area_struct * vma;
311 unsigned long address;
312 const struct exception_table_entry *fixup;
313 int write;
Jan Beulich12091402005-09-12 18:49:24 +0200314 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 siginfo_t info;
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 /* get the address */
318 __asm__("movq %%cr2,%0":"=r" (address));
319 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
320 SIGSEGV) == NOTIFY_STOP)
321 return;
322
323 if (likely(regs->eflags & X86_EFLAGS_IF))
324 local_irq_enable();
325
326 if (unlikely(page_fault_trace))
327 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
328 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
329
330 tsk = current;
331 mm = tsk->mm;
332 info.si_code = SEGV_MAPERR;
333
334
335 /*
336 * We fault-in kernel-space virtual memory on-demand. The
337 * 'reference' page table is init_mm.pgd.
338 *
339 * NOTE! We MUST NOT take any locks for this case. We may
340 * be in an interrupt or a critical region, and should
341 * only copy the information from the master page table,
342 * nothing more.
343 *
344 * This verifies that the fault happens in kernel space
345 * (error_code & 4) == 0, and that the fault was not a
Jan Beulich8b1bde92006-01-11 22:42:23 +0100346 * protection error (error_code & 9) == 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 */
Suresh Siddha84929802005-06-21 17:14:32 -0700348 if (unlikely(address >= TASK_SIZE64)) {
Jan Beulich8b1bde92006-01-11 22:42:23 +0100349 if (!(error_code & 0xd) &&
Andi Kleen3b9ba4d2005-05-16 21:53:31 -0700350 ((address >= VMALLOC_START && address < VMALLOC_END) ||
351 (address >= MODULES_VADDR && address < MODULES_END))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (vmalloc_fault(address) < 0)
353 goto bad_area_nosemaphore;
354 return;
355 }
356 /*
357 * Don't take the mm semaphore here. If we fixup a prefetch
358 * fault we could otherwise deadlock.
359 */
360 goto bad_area_nosemaphore;
361 }
362
363 if (unlikely(error_code & (1 << 3)))
364 pgtable_bad(address, regs, error_code);
365
366 /*
367 * If we're in an interrupt or have no user
368 * context, we must not take the fault..
369 */
370 if (unlikely(in_atomic() || !mm))
371 goto bad_area_nosemaphore;
372
373 again:
374 /* When running in the kernel we expect faults to occur only to
375 * addresses in user space. All other faults represent errors in the
376 * kernel and should generate an OOPS. Unfortunatly, in the case of an
377 * erroneous fault occuring in a code path which already holds mmap_sem
378 * we will deadlock attempting to validate the fault against the
379 * address space. Luckily the kernel only validly references user
380 * space from well defined areas of code, which are listed in the
381 * exceptions table.
382 *
383 * As the vast majority of faults will be valid we will only perform
384 * the source reference check when there is a possibilty of a deadlock.
385 * Attempt to lock the address space, if we cannot we then validate the
386 * source. If this is invalid we can skip the address space check,
387 * thus avoiding the deadlock.
388 */
389 if (!down_read_trylock(&mm->mmap_sem)) {
390 if ((error_code & 4) == 0 &&
391 !search_exception_tables(regs->rip))
392 goto bad_area_nosemaphore;
393 down_read(&mm->mmap_sem);
394 }
395
396 vma = find_vma(mm, address);
397 if (!vma)
398 goto bad_area;
399 if (likely(vma->vm_start <= address))
400 goto good_area;
401 if (!(vma->vm_flags & VM_GROWSDOWN))
402 goto bad_area;
403 if (error_code & 4) {
404 // XXX: align red zone size with ABI
405 if (address + 128 < regs->rsp)
406 goto bad_area;
407 }
408 if (expand_stack(vma, address))
409 goto bad_area;
410/*
411 * Ok, we have a good vm_area for this memory access, so
412 * we can handle it..
413 */
414good_area:
415 info.si_code = SEGV_ACCERR;
416 write = 0;
417 switch (error_code & 3) {
418 default: /* 3: write, present */
419 /* fall through */
420 case 2: /* write, not present */
421 if (!(vma->vm_flags & VM_WRITE))
422 goto bad_area;
423 write++;
424 break;
425 case 1: /* read, present */
426 goto bad_area;
427 case 0: /* read, not present */
428 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
429 goto bad_area;
430 }
431
432 /*
433 * If for any reason at all we couldn't handle the fault,
434 * make sure we exit gracefully rather than endlessly redo
435 * the fault.
436 */
437 switch (handle_mm_fault(mm, vma, address, write)) {
Alexander Nyberg96800212005-08-04 16:14:57 +0200438 case VM_FAULT_MINOR:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 tsk->min_flt++;
440 break;
Alexander Nyberg96800212005-08-04 16:14:57 +0200441 case VM_FAULT_MAJOR:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 tsk->maj_flt++;
443 break;
Alexander Nyberg96800212005-08-04 16:14:57 +0200444 case VM_FAULT_SIGBUS:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 goto do_sigbus;
446 default:
447 goto out_of_memory;
448 }
449
450 up_read(&mm->mmap_sem);
451 return;
452
453/*
454 * Something tried to access memory that isn't in our memory map..
455 * Fix it, but check if it's kernel or user first..
456 */
457bad_area:
458 up_read(&mm->mmap_sem);
459
460bad_area_nosemaphore:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 /* User mode accesses just cause a SIGSEGV */
462 if (error_code & 4) {
463 if (is_prefetch(regs, address, error_code))
464 return;
465
466 /* Work around K8 erratum #100 K8 in compat mode
467 occasionally jumps to illegal addresses >4GB. We
468 catch this here in the page fault handler because
469 these addresses are not reachable. Just detect this
470 case and return. Any code segment in LDT is
471 compatibility mode. */
472 if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
473 (address >> 32))
474 return;
475
476 if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
477 printk(
478 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
479 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
480 tsk->comm, tsk->pid, address, regs->rip,
481 regs->rsp, error_code);
482 }
483
484 tsk->thread.cr2 = address;
485 /* Kernel addresses are always protection faults */
486 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
487 tsk->thread.trap_no = 14;
488 info.si_signo = SIGSEGV;
489 info.si_errno = 0;
490 /* info.si_code has been set above */
491 info.si_addr = (void __user *)address;
492 force_sig_info(SIGSEGV, &info, tsk);
493 return;
494 }
495
496no_context:
497
498 /* Are we prepared to handle this kernel fault? */
499 fixup = search_exception_tables(regs->rip);
500 if (fixup) {
501 regs->rip = fixup->fixup;
502 return;
503 }
504
505 /*
506 * Hall of shame of CPU/BIOS bugs.
507 */
508
509 if (is_prefetch(regs, address, error_code))
510 return;
511
512 if (is_errata93(regs, address))
513 return;
514
515/*
516 * Oops. The kernel tried to access some bad page. We'll have to
517 * terminate things with extreme prejudice.
518 */
519
Jan Beulich12091402005-09-12 18:49:24 +0200520 flags = oops_begin();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521
522 if (address < PAGE_SIZE)
523 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
524 else
525 printk(KERN_ALERT "Unable to handle kernel paging request");
526 printk(" at %016lx RIP: \n" KERN_ALERT,address);
527 printk_address(regs->rip);
528 printk("\n");
529 dump_pagetable(address);
Jan Beulich6e3f3612006-01-11 22:42:14 +0100530 tsk->thread.cr2 = address;
531 tsk->thread.trap_no = 14;
532 tsk->thread.error_code = error_code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 __die("Oops", regs, error_code);
534 /* Executive summary in case the body of the oops scrolled away */
535 printk(KERN_EMERG "CR2: %016lx\n", address);
Jan Beulich12091402005-09-12 18:49:24 +0200536 oops_end(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 do_exit(SIGKILL);
538
539/*
540 * We ran out of memory, or some other thing happened to us that made
541 * us unable to handle the page fault gracefully.
542 */
543out_of_memory:
544 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 if (current->pid == 1) {
546 yield();
547 goto again;
548 }
549 printk("VM: killing process %s\n", tsk->comm);
550 if (error_code & 4)
551 do_exit(SIGKILL);
552 goto no_context;
553
554do_sigbus:
555 up_read(&mm->mmap_sem);
556
557 /* Kernel mode? Handle exceptions or die */
558 if (!(error_code & 4))
559 goto no_context;
560
561 tsk->thread.cr2 = address;
562 tsk->thread.error_code = error_code;
563 tsk->thread.trap_no = 14;
564 info.si_signo = SIGBUS;
565 info.si_errno = 0;
566 info.si_code = BUS_ADRERR;
567 info.si_addr = (void __user *)address;
568 force_sig_info(SIGBUS, &info, tsk);
569 return;
570}
Andi Kleen9e43e1b2005-11-05 17:25:54 +0100571
572static int __init enable_pagefaulttrace(char *str)
573{
574 page_fault_trace = 1;
575 return 0;
576}
577__setup("pagefaulttrace", enable_pagefaulttrace);