blob: 889e83b5ff22801396e8426388d2203bf484223a [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * Page fault handler for SH with an MMU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1999 Niibe Yutaka
Paul Mundtdbdb4e92012-05-14 10:27:34 +09005 * Copyright (C) 2003 - 2012 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
Paul Mundt26ff6c12006-09-27 15:13:36 +09009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
Paul Mundt0f08f332006-09-27 17:03:56 +090016#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020018#include <linux/perf_event.h>
Paul Mundtdbdb4e92012-05-14 10:27:34 +090019#include <linux/kdebug.h>
Magnus Damme7cc9a72008-02-07 20:18:21 +090020#include <asm/io_trapped.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/mmu_context.h>
Paul Mundtdb2e1fa2007-02-14 14:13:10 +090022#include <asm/tlbflush.h>
David Howellse839ca52012-03-28 18:30:03 +010023#include <asm/traps.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Paul Mundt7433ab7702009-06-25 02:30:10 +090025static inline int notify_page_fault(struct pt_regs *regs, int trap)
26{
27 int ret = 0;
28
Paul Mundtc63c3102009-07-05 02:50:10 +090029 if (kprobes_built_in() && !user_mode(regs)) {
Paul Mundt7433ab7702009-06-25 02:30:10 +090030 preempt_disable();
31 if (kprobe_running() && kprobe_fault_handler(regs, trap))
32 ret = 1;
33 preempt_enable();
34 }
Paul Mundt7433ab7702009-06-25 02:30:10 +090035
36 return ret;
37}
38
Paul Mundtdbdb4e92012-05-14 10:27:34 +090039static void
40force_sig_info_fault(int si_signo, int si_code, unsigned long address,
41 struct task_struct *tsk)
42{
43 siginfo_t info;
44
45 info.si_signo = si_signo;
46 info.si_errno = 0;
47 info.si_code = si_code;
48 info.si_addr = (void __user *)address;
49
50 force_sig_info(si_signo, &info, tsk);
51}
52
Stuart Menefy45c0e0e2012-04-19 17:25:03 +090053/*
54 * This is useful to dump out the page tables associated with
55 * 'addr' in mm 'mm'.
56 */
57static void show_pte(struct mm_struct *mm, unsigned long addr)
58{
59 pgd_t *pgd;
60
61 if (mm)
62 pgd = mm->pgd;
63 else
64 pgd = get_TTB();
65
66 printk(KERN_ALERT "pgd = %p\n", pgd);
67 pgd += pgd_index(addr);
68 printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
69 sizeof(*pgd) * 2, (u64)pgd_val(*pgd));
70
71 do {
72 pud_t *pud;
73 pmd_t *pmd;
74 pte_t *pte;
75
76 if (pgd_none(*pgd))
77 break;
78
79 if (pgd_bad(*pgd)) {
80 printk("(bad)");
81 break;
82 }
83
84 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1)
86 printk(", *pud=%0*Lx", sizeof(*pud) * 2,
87 (u64)pud_val(*pud));
88
89 if (pud_none(*pud))
90 break;
91
92 if (pud_bad(*pud)) {
93 printk("(bad)");
94 break;
95 }
96
97 pmd = pmd_offset(pud, addr);
98 if (PTRS_PER_PMD != 1)
99 printk(", *pmd=%0*Lx", sizeof(*pmd) * 2,
100 (u64)pmd_val(*pmd));
101
102 if (pmd_none(*pmd))
103 break;
104
105 if (pmd_bad(*pmd)) {
106 printk("(bad)");
107 break;
108 }
109
110 /* We must not map this if we have highmem enabled */
111 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
112 break;
113
114 pte = pte_offset_kernel(pmd, addr);
115 printk(", *pte=%0*Lx", sizeof(*pte) * 2, (u64)pte_val(*pte));
116 } while (0);
117
118 printk("\n");
119}
120
Paul Mundt0f60bb22009-07-05 03:18:47 +0900121static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
122{
123 unsigned index = pgd_index(address);
124 pgd_t *pgd_k;
125 pud_t *pud, *pud_k;
126 pmd_t *pmd, *pmd_k;
127
128 pgd += index;
129 pgd_k = init_mm.pgd + index;
130
131 if (!pgd_present(*pgd_k))
132 return NULL;
133
134 pud = pud_offset(pgd, address);
135 pud_k = pud_offset(pgd_k, address);
136 if (!pud_present(*pud_k))
137 return NULL;
138
Matt Fleming5d9b4b12009-12-13 14:38:50 +0000139 if (!pud_present(*pud))
140 set_pud(pud, *pud_k);
141
Paul Mundt0f60bb22009-07-05 03:18:47 +0900142 pmd = pmd_offset(pud, address);
143 pmd_k = pmd_offset(pud_k, address);
144 if (!pmd_present(*pmd_k))
145 return NULL;
146
147 if (!pmd_present(*pmd))
148 set_pmd(pmd, *pmd_k);
Matt Fleming05dd2cd2009-07-13 11:38:04 +0000149 else {
150 /*
151 * The page tables are fully synchronised so there must
152 * be another reason for the fault. Return NULL here to
153 * signal that we have not taken care of the fault.
154 */
Paul Mundt0f60bb22009-07-05 03:18:47 +0900155 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
Matt Fleming05dd2cd2009-07-13 11:38:04 +0000156 return NULL;
157 }
Paul Mundt0f60bb22009-07-05 03:18:47 +0900158
159 return pmd_k;
160}
161
162/*
163 * Handle a fault on the vmalloc or module mapping area
164 */
165static noinline int vmalloc_fault(unsigned long address)
166{
167 pgd_t *pgd_k;
168 pmd_t *pmd_k;
169 pte_t *pte_k;
170
Paul Mundt0906a3a2009-09-03 17:21:10 +0900171 /* Make sure we are in vmalloc/module/P3 area: */
Stuart Menefy8d9a7842012-02-14 11:29:11 +0000172 if (!(address >= P3SEG && address < P3_ADDR_MAX))
Paul Mundt0f60bb22009-07-05 03:18:47 +0900173 return -1;
174
175 /*
176 * Synchronize this task's top level page-table
177 * with the 'reference' page table.
178 *
179 * Do _not_ use "current" here. We might be inside
180 * an interrupt in the middle of a task switch..
181 */
182 pgd_k = get_TTB();
Matt Fleming05dd2cd2009-07-13 11:38:04 +0000183 pmd_k = vmalloc_sync_one(pgd_k, address);
Paul Mundt0f60bb22009-07-05 03:18:47 +0900184 if (!pmd_k)
185 return -1;
186
187 pte_k = pte_offset_kernel(pmd_k, address);
188 if (!pte_present(*pte_k))
189 return -1;
190
191 return 0;
192}
193
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900194static void
195show_fault_oops(struct pt_regs *regs, unsigned long address)
196{
197 if (!oops_may_print())
198 return;
199
200 printk(KERN_ALERT "BUG: unable to handle kernel ");
201 if (address < PAGE_SIZE)
202 printk(KERN_CONT "NULL pointer dereference");
203 else
204 printk(KERN_CONT "paging request");
205
206 printk(KERN_CONT " at %08lx\n", address);
207 printk(KERN_ALERT "PC:");
208 printk_address(regs->pc, 1);
209
210 show_pte(NULL, address);
211}
212
213static noinline void
214no_context(struct pt_regs *regs, unsigned long writeaccess,
215 unsigned long address)
216{
217 /* Are we prepared to handle this kernel fault? */
218 if (fixup_exception(regs))
219 return;
220
221 if (handle_trapped_io(regs, address))
222 return;
223
224 /*
225 * Oops. The kernel tried to access some bad page. We'll have to
226 * terminate things with extreme prejudice.
227 */
228 bust_spinlocks(1);
229
230 show_fault_oops(regs, address);
231
232 die("Oops", regs, writeaccess);
233 bust_spinlocks(0);
234 do_exit(SIGKILL);
235}
236
237static void
238__bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
239 unsigned long address, int si_code)
240{
241 struct task_struct *tsk = current;
242
243 /* User mode accesses just cause a SIGSEGV */
244 if (user_mode(regs)) {
245 /*
246 * It's possible to have interrupts off here:
247 */
248 local_irq_enable();
249
250 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
251
252 return;
253 }
254
255 no_context(regs, writeaccess, address);
256}
257
258static noinline void
259bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess,
260 unsigned long address)
261{
262 __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR);
263}
264
265static void
266__bad_area(struct pt_regs *regs, unsigned long writeaccess,
267 unsigned long address, int si_code)
268{
269 struct mm_struct *mm = current->mm;
270
271 /*
272 * Something tried to access memory that isn't in our memory map..
273 * Fix it, but check if it's kernel or user first..
274 */
275 up_read(&mm->mmap_sem);
276
277 __bad_area_nosemaphore(regs, writeaccess, address, si_code);
278}
279
280static noinline void
281bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
282{
283 __bad_area(regs, writeaccess, address, SEGV_MAPERR);
284}
285
286static noinline void
287bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess,
288 unsigned long address)
289{
290 __bad_area(regs, writeaccess, address, SEGV_ACCERR);
291}
292
293static void out_of_memory(void)
294{
295 /*
296 * We ran out of memory, call the OOM killer, and return the userspace
297 * (which will retry the fault, or kill us if we got oom-killed):
298 */
299 up_read(&current->mm->mmap_sem);
300
301 pagefault_out_of_memory();
302}
303
304static void
305do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address)
306{
307 struct task_struct *tsk = current;
308 struct mm_struct *mm = tsk->mm;
309
310 up_read(&mm->mmap_sem);
311
312 /* Kernel mode? Handle exceptions or die: */
313 if (!user_mode(regs))
314 no_context(regs, writeaccess, address);
315
316 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
317}
318
319static noinline int
320mm_fault_error(struct pt_regs *regs, unsigned long writeaccess,
321 unsigned long address, unsigned int fault)
322{
323 /*
324 * Pagefault was interrupted by SIGKILL. We have no reason to
325 * continue pagefault.
326 */
327 if (fatal_signal_pending(current)) {
328 if (!(fault & VM_FAULT_RETRY))
329 up_read(&current->mm->mmap_sem);
330 if (!user_mode(regs))
331 no_context(regs, writeaccess, address);
332 return 1;
333 }
334
335 if (!(fault & VM_FAULT_ERROR))
336 return 0;
337
338 if (fault & VM_FAULT_OOM) {
339 /* Kernel mode? Handle exceptions or die: */
340 if (!user_mode(regs)) {
341 up_read(&current->mm->mmap_sem);
342 no_context(regs, writeaccess, address);
343 return 1;
344 }
345
346 out_of_memory();
347 } else {
348 if (fault & VM_FAULT_SIGBUS)
349 do_sigbus(regs, writeaccess, address);
350 else
351 BUG();
352 }
353
354 return 1;
355}
356
357static inline int access_error(int write, struct vm_area_struct *vma)
358{
359 if (write) {
360 /* write, present and write, not present: */
361 if (unlikely(!(vma->vm_flags & VM_WRITE)))
362 return 1;
363 return 0;
364 }
365
366 /* read, not present: */
367 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
368 return 1;
369
370 return 0;
371}
372
Paul Mundt0f60bb22009-07-05 03:18:47 +0900373static int fault_in_kernel_space(unsigned long address)
374{
375 return address >= TASK_SIZE;
376}
377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378/*
379 * This routine handles page faults. It determines the address,
380 * and the problem, and then passes it off to one of the appropriate
381 * routines.
382 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900383asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
384 unsigned long writeaccess,
385 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
Paul Mundt0f60bb22009-07-05 03:18:47 +0900387 unsigned long vec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 struct task_struct *tsk;
389 struct mm_struct *mm;
390 struct vm_area_struct * vma;
Nick Piggin83c54072007-07-19 01:47:05 -0700391 int fault;
Kautuk Consul11fd9822012-03-31 08:06:11 -0400392 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
393 (writeaccess ? FAULT_FLAG_WRITE : 0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 tsk = current;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900396 mm = tsk->mm;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900397 vec = lookup_exception_vector();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Paul Mundt0f60bb22009-07-05 03:18:47 +0900399 /*
400 * We fault-in kernel-space virtual memory on-demand. The
401 * 'reference' page table is init_mm.pgd.
402 *
403 * NOTE! We MUST NOT take any locks for this case. We may
404 * be in an interrupt or a critical region, and should
405 * only copy the information from the master page table,
406 * nothing more.
407 */
408 if (unlikely(fault_in_kernel_space(address))) {
409 if (vmalloc_fault(address) >= 0)
Stuart Menefy99a596f2006-11-21 15:38:05 +0900410 return;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900411 if (notify_page_fault(regs, vec))
Stuart Menefy96e14e52008-09-05 16:17:15 +0900412 return;
Stuart Menefy99a596f2006-11-21 15:38:05 +0900413
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900414 bad_area_nosemaphore(regs, writeaccess, address);
415 return;
Stuart Menefy99a596f2006-11-21 15:38:05 +0900416 }
417
Paul Mundt0f60bb22009-07-05 03:18:47 +0900418 if (unlikely(notify_page_fault(regs, vec)))
Paul Mundt7433ab7702009-06-25 02:30:10 +0900419 return;
420
421 /* Only enable interrupts if they were on before the fault */
422 if ((regs->sr & SR_IMASK) != SR_IMASK)
423 local_irq_enable();
424
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200425 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Paul Mundt7433ab7702009-06-25 02:30:10 +0900426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 /*
Paul Mundt0f60bb22009-07-05 03:18:47 +0900428 * If we're in an interrupt, have no user context or are running
429 * in an atomic region then we must not take the fault:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 */
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900431 if (unlikely(in_atomic() || !mm)) {
432 bad_area_nosemaphore(regs, writeaccess, address);
433 return;
434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Kautuk Consul11fd9822012-03-31 08:06:11 -0400436retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 down_read(&mm->mmap_sem);
438
439 vma = find_vma(mm, address);
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900440 if (unlikely(!vma)) {
441 bad_area(regs, writeaccess, address);
442 return;
443 }
444 if (likely(vma->vm_start <= address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 goto good_area;
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900446 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
447 bad_area(regs, writeaccess, address);
448 return;
449 }
450 if (unlikely(expand_stack(vma, address))) {
451 bad_area(regs, writeaccess, address);
452 return;
453 }
Paul Mundt0f60bb22009-07-05 03:18:47 +0900454
455 /*
456 * Ok, we have a good vm_area for this memory access, so
457 * we can handle it..
458 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459good_area:
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900460 if (unlikely(access_error(writeaccess, vma))) {
461 bad_area_access_error(regs, writeaccess, address);
462 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464
465 /*
466 * If for any reason at all we couldn't handle the fault,
467 * make sure we exit gracefully rather than endlessly redo
468 * the fault.
469 */
Kautuk Consul11fd9822012-03-31 08:06:11 -0400470 fault = handle_mm_fault(mm, vma, address, flags);
471
Paul Mundtdbdb4e92012-05-14 10:27:34 +0900472 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
473 if (mm_fault_error(regs, writeaccess, address, fault))
474 return;
Kautuk Consul11fd9822012-03-31 08:06:11 -0400475
476 if (flags & FAULT_FLAG_ALLOW_RETRY) {
477 if (fault & VM_FAULT_MAJOR) {
478 tsk->maj_flt++;
479 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
480 regs, address);
481 } else {
482 tsk->min_flt++;
483 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
484 regs, address);
485 }
486 if (fault & VM_FAULT_RETRY) {
487 flags &= ~FAULT_FLAG_ALLOW_RETRY;
488
489 /*
490 * No need to up_read(&mm->mmap_sem) as we would
491 * have already released it in __lock_page_or_retry
492 * in mm/filemap.c.
493 */
494 goto retry;
495 }
Paul Mundt7433ab7702009-06-25 02:30:10 +0900496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 up_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499}
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900500
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900501/*
502 * Called with interrupts disabled.
503 */
Paul Mundt112e5842009-08-15 02:49:40 +0900504asmlinkage int __kprobes
505handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
506 unsigned long address)
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900507{
508 pgd_t *pgd;
509 pud_t *pud;
510 pmd_t *pmd;
511 pte_t *pte;
512 pte_t entry;
Paul Mundt3d586952008-09-21 13:56:39 +0900513
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900514 /*
515 * We don't take page faults for P1, P2, and parts of P4, these
516 * are always mapped, whether it be due to legacy behaviour in
517 * 29-bit mode, or due to PMB configuration in 32-bit mode.
518 */
519 if (address >= P3SEG && address < P3_ADDR_MAX) {
520 pgd = pgd_offset_k(address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900521 } else {
Paul Mundt0f1a3942007-11-19 13:05:18 +0900522 if (unlikely(address >= TASK_SIZE || !current->mm))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900523 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900524
Paul Mundt0f1a3942007-11-19 13:05:18 +0900525 pgd = pgd_offset(current->mm, address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900526 }
527
528 pud = pud_offset(pgd, address);
529 if (pud_none_or_clear_bad(pud))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900530 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900531 pmd = pmd_offset(pud, address);
532 if (pmd_none_or_clear_bad(pmd))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900533 return 1;
Paul Mundt0f1a3942007-11-19 13:05:18 +0900534 pte = pte_offset_kernel(pmd, address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900535 entry = *pte;
536 if (unlikely(pte_none(entry) || pte_not_present(entry)))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900537 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900538 if (unlikely(writeaccess && !pte_write(entry)))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900539 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900540
541 if (writeaccess)
542 entry = pte_mkdirty(entry);
543 entry = pte_mkyoung(entry);
544
Paul Mundt8010fbe2009-08-15 03:06:41 +0900545 set_pte(pte, entry);
546
Hideo Saitoa602cc02008-02-14 14:45:08 +0900547#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
548 /*
Paul Mundt8010fbe2009-08-15 03:06:41 +0900549 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
550 * the case of an initial page write exception, so we need to
551 * flush it in order to avoid potential TLB entry duplication.
Hideo Saitoa602cc02008-02-14 14:45:08 +0900552 */
Paul Mundt8010fbe2009-08-15 03:06:41 +0900553 if (writeaccess == 2)
554 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
Hideo Saitoa602cc02008-02-14 14:45:08 +0900555#endif
556
Russell King4b3073e2009-12-18 16:40:18 +0000557 update_mmu_cache(NULL, address, pte);
Paul Mundt0f1a3942007-11-19 13:05:18 +0900558
Paul Mundt8010fbe2009-08-15 03:06:41 +0900559 return 0;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900560}