blob: 5bdff5c3e6cb807d1ff0e2f4a5346fee5443c2ac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/fault.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
Russell King67306da2008-12-14 18:01:44 +000014#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Nicolas Pitre25ce1dd2007-12-03 15:21:57 -050016#include <linux/kprobes.h>
Russell King33fa9b12008-09-06 11:35:55 +010017#include <linux/uaccess.h>
Nicolas Pitre252d4c22008-09-11 11:52:02 -040018#include <linux/page-flags.h>
Catalin Marinas412bb0a2009-07-24 12:37:09 +010019#include <linux/sched.h>
Russell King65cec8e2009-08-17 20:02:06 +010020#include <linux/highmem.h>
Jamie Iles7ada1892010-02-02 20:24:58 +010021#include <linux/perf_event.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Jamie Iles5a567d72011-10-08 11:20:42 +010023#include <asm/exception.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/pgtable.h>
David Howells9f97da72012-03-28 18:30:01 +010025#include <asm/system_misc.h>
26#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29#include "fault.h"
30
Catalin Marinas09529f72009-07-24 12:34:55 +010031#ifdef CONFIG_MMU
Nicolas Pitre25ce1dd2007-12-03 15:21:57 -050032
33#ifdef CONFIG_KPROBES
34static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
35{
36 int ret = 0;
37
38 if (!user_mode(regs)) {
39 /* kprobe_running() needs smp_processor_id() */
40 preempt_disable();
41 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
42 ret = 1;
43 preempt_enable();
44 }
45
46 return ret;
47}
48#else
49static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
50{
51 return 0;
52}
53#endif
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055/*
56 * This is useful to dump out the page tables associated with
57 * 'addr' in mm 'mm'.
58 */
59void show_pte(struct mm_struct *mm, unsigned long addr)
60{
61 pgd_t *pgd;
62
63 if (!mm)
64 mm = &init_mm;
65
66 printk(KERN_ALERT "pgd = %p\n", mm->pgd);
67 pgd = pgd_offset(mm, addr);
Will Deacon29a38192011-02-15 14:31:37 +010068 printk(KERN_ALERT "[%08lx] *pgd=%08llx",
69 addr, (long long)pgd_val(*pgd));
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
71 do {
Russell King516295e2010-11-21 16:27:49 +000072 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 pmd_t *pmd;
74 pte_t *pte;
75
76 if (pgd_none(*pgd))
77 break;
78
79 if (pgd_bad(*pgd)) {
80 printk("(bad)");
81 break;
82 }
83
Russell King516295e2010-11-21 16:27:49 +000084 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1)
Catalin Marinas140d5dc2011-07-13 16:32:58 +010086 printk(", *pud=%08llx", (long long)pud_val(*pud));
Russell King516295e2010-11-21 16:27:49 +000087
88 if (pud_none(*pud))
89 break;
90
91 if (pud_bad(*pud)) {
92 printk("(bad)");
93 break;
94 }
95
96 pmd = pmd_offset(pud, addr);
Nicolas Pitreda46c792008-09-30 16:10:11 +010097 if (PTRS_PER_PMD != 1)
Will Deacon29a38192011-02-15 14:31:37 +010098 printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100 if (pmd_none(*pmd))
101 break;
102
103 if (pmd_bad(*pmd)) {
104 printk("(bad)");
105 break;
106 }
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /* We must not map this if we have highmem enabled */
Nicolas Pitre252d4c22008-09-11 11:52:02 -0400109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110 break;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 pte = pte_offset_map(pmd, addr);
Will Deacon29a38192011-02-15 14:31:37 +0100113 printk(", *pte=%08llx", (long long)pte_val(*pte));
Catalin Marinasf7b81562011-11-22 17:30:31 +0000114#ifndef CONFIG_ARM_LPAE
Will Deacon29a38192011-02-15 14:31:37 +0100115 printk(", *ppte=%08llx",
116 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
Catalin Marinasf7b81562011-11-22 17:30:31 +0000117#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 pte_unmap(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 } while(0);
120
121 printk("\n");
122}
Catalin Marinas09529f72009-07-24 12:34:55 +0100123#else /* CONFIG_MMU */
124void show_pte(struct mm_struct *mm, unsigned long addr)
125{ }
126#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128/*
129 * Oops. The kernel tried to access some page that wasn't present.
130 */
131static void
132__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
133 struct pt_regs *regs)
134{
135 /*
136 * Are we prepared to handle this kernel fault?
137 */
138 if (fixup_exception(regs))
139 return;
140
141 /*
142 * No handler, we'll have to terminate things with extreme prejudice.
143 */
144 bust_spinlocks(1);
145 printk(KERN_ALERT
146 "Unable to handle kernel %s at virtual address %08lx\n",
147 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
148 "paging request", addr);
149
150 show_pte(mm, addr);
151 die("Oops", regs, fsr);
152 bust_spinlocks(0);
153 do_exit(SIGKILL);
154}
155
156/*
157 * Something tried to access memory that isn't in our memory map..
158 * User mode accesses just cause a SIGSEGV
159 */
160static void
161__do_user_fault(struct task_struct *tsk, unsigned long addr,
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700162 unsigned int fsr, unsigned int sig, int code,
163 struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
165 struct siginfo si;
166
167#ifdef CONFIG_DEBUG_USER
168 if (user_debug & UDBG_SEGV) {
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700169 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
170 tsk->comm, sig, addr, fsr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 show_pte(tsk->mm, addr);
172 show_regs(regs);
173 }
174#endif
175
176 tsk->thread.address = addr;
177 tsk->thread.error_code = fsr;
178 tsk->thread.trap_no = 14;
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700179 si.si_signo = sig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 si.si_errno = 0;
181 si.si_code = code;
182 si.si_addr = (void __user *)addr;
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700183 force_sig_info(sig, &si, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184}
185
Russell Kinge5beac32006-09-27 16:13:48 +0100186void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
Russell Kinge5beac32006-09-27 16:13:48 +0100188 struct task_struct *tsk = current;
189 struct mm_struct *mm = tsk->active_mm;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 /*
192 * If we are in kernel mode at this point, we
193 * have no context to handle this fault with.
194 */
195 if (user_mode(regs))
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700196 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 else
198 __do_kernel_fault(mm, addr, fsr, regs);
199}
200
Catalin Marinas09529f72009-07-24 12:34:55 +0100201#ifdef CONFIG_MMU
Nick Piggin5c72fc52007-07-20 09:21:06 +0200202#define VM_FAULT_BADMAP 0x010000
203#define VM_FAULT_BADACCESS 0x020000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204
Russell Kingd374bf12009-09-20 12:53:01 +0100205/*
206 * Check that the permissions on the VMA allow for the fault which occurred.
207 * If we encountered a write fault, we must have write permission, otherwise
208 * we allow any permission.
209 */
210static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
211{
212 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
213
214 if (fsr & FSR_WRITE)
215 mask = VM_WRITE;
Russell Kingdf297bf2009-09-20 13:18:47 +0100216 if (fsr & FSR_LNX_PF)
217 mask = VM_EXEC;
Russell Kingd374bf12009-09-20 12:53:01 +0100218
219 return vma->vm_flags & mask ? false : true;
220}
221
222static int __kprobes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
Kautuk Consul8878a532011-11-27 17:49:50 +0100224 unsigned int flags, struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 struct vm_area_struct *vma;
Russell Kingd374bf12009-09-20 12:53:01 +0100227 int fault;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 vma = find_vma(mm, addr);
230 fault = VM_FAULT_BADMAP;
Russell Kingd374bf12009-09-20 12:53:01 +0100231 if (unlikely(!vma))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 goto out;
Russell Kingd374bf12009-09-20 12:53:01 +0100233 if (unlikely(vma->vm_start > addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 goto check_stack;
235
236 /*
237 * Ok, we have a good vm_area for this
238 * memory access, so we can handle it.
239 */
240good_area:
Russell Kingd374bf12009-09-20 12:53:01 +0100241 if (access_error(fsr, vma)) {
242 fault = VM_FAULT_BADACCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 goto out;
Russell Kingd374bf12009-09-20 12:53:01 +0100244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Kautuk Consul8878a532011-11-27 17:49:50 +0100246 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248check_stack:
249 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
250 goto good_area;
251out:
252 return fault;
253}
254
Nicolas Pitre785d3cd2007-12-03 15:27:56 -0500255static int __kprobes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
257{
258 struct task_struct *tsk;
259 struct mm_struct *mm;
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700260 int fault, sig, code;
Kautuk Consul8878a532011-11-27 17:49:50 +0100261 int write = fsr & FSR_WRITE;
262 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
263 (write ? FAULT_FLAG_WRITE : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Nicolas Pitre25ce1dd2007-12-03 15:21:57 -0500265 if (notify_page_fault(regs, fsr))
266 return 0;
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 tsk = current;
269 mm = tsk->mm;
270
Russell King02fe2842011-06-25 11:44:06 +0100271 /* Enable interrupts if they were enabled in the parent context. */
272 if (interrupts_enabled(regs))
273 local_irq_enable();
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /*
276 * If we're in an interrupt or have no user
277 * context, we must not take the fault..
278 */
Peter Zijlstra6edaf682006-12-06 20:32:18 -0800279 if (in_atomic() || !mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 goto no_context;
281
Russell King840ff6a2005-09-20 17:52:13 +0100282 /*
283 * As per x86, we may deadlock here. However, since the kernel only
284 * validly references user space from well defined areas of the code,
285 * we can bug out early if this is from code which shouldn't.
286 */
287 if (!down_read_trylock(&mm->mmap_sem)) {
288 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
289 goto no_context;
Kautuk Consul8878a532011-11-27 17:49:50 +0100290retry:
Russell King840ff6a2005-09-20 17:52:13 +0100291 down_read(&mm->mmap_sem);
Russell Kingbf456992009-09-20 12:52:19 +0100292 } else {
293 /*
294 * The above down_read_trylock() might have succeeded in
295 * which case, we'll have missed the might_sleep() from
296 * down_read()
297 */
298 might_sleep();
Imre Deak1d212712009-10-05 13:40:44 +0100299#ifdef CONFIG_DEBUG_VM
300 if (!user_mode(regs) &&
301 !search_exception_tables(regs->ARM_pc))
302 goto no_context;
303#endif
Russell King840ff6a2005-09-20 17:52:13 +0100304 }
305
Kautuk Consul8878a532011-11-27 17:49:50 +0100306 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
307
308 /* If we need to retry but a fatal signal is pending, handle the
309 * signal first. We do not need to release the mmap_sem because
310 * it would already be released in __lock_page_or_retry in
311 * mm/filemap.c. */
312 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
313 return 0;
314
315 /*
316 * Major/minor page fault accounting is only done on the
317 * initial attempt. If we go through a retry, it is extremely
318 * likely that the page will be found in page cache at that point.
319 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +0200321 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
Kautuk Consul8878a532011-11-27 17:49:50 +0100322 if (flags & FAULT_FLAG_ALLOW_RETRY) {
323 if (fault & VM_FAULT_MAJOR) {
324 tsk->maj_flt++;
325 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
326 regs, addr);
327 } else {
328 tsk->min_flt++;
329 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
330 regs, addr);
331 }
332 if (fault & VM_FAULT_RETRY) {
333 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
334 * of starvation. */
335 flags &= ~FAULT_FLAG_ALLOW_RETRY;
336 goto retry;
337 }
338 }
339
340 up_read(&mm->mmap_sem);
Jamie Iles7ada1892010-02-02 20:24:58 +0100341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 /*
Russell Kingff2afb92005-08-04 14:17:33 +0100343 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 */
Nick Piggin5c72fc52007-07-20 09:21:06 +0200345 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return 0;
347
Russell Kingb42c6342009-09-20 12:47:40 +0100348 if (fault & VM_FAULT_OOM) {
349 /*
350 * We ran out of memory, call the OOM killer, and return to
351 * userspace (which will retry the fault, or kill us if we
352 * got oom-killed)
353 */
354 pagefault_out_of_memory();
355 return 0;
356 }
357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 * If we are in kernel mode at this point, we
360 * have no context to handle this fault with.
361 */
362 if (!user_mode(regs))
363 goto no_context;
364
Nick Piggin83c54072007-07-19 01:47:05 -0700365 if (fault & VM_FAULT_SIGBUS) {
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700366 /*
367 * We had some memory, but were unable to
368 * successfully fix up this page fault.
369 */
370 sig = SIGBUS;
371 code = BUS_ADRERR;
Nick Piggin83c54072007-07-19 01:47:05 -0700372 } else {
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700373 /*
374 * Something tried to access memory that
375 * isn't in our memory map..
376 */
377 sig = SIGSEGV;
378 code = fault == VM_FAULT_BADACCESS ?
379 SEGV_ACCERR : SEGV_MAPERR;
akpm@osdl.org2d137c22005-04-16 15:23:55 -0700380 }
381
382 __do_user_fault(tsk, addr, fsr, sig, code, regs);
383 return 0;
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385no_context:
386 __do_kernel_fault(mm, addr, fsr, regs);
387 return 0;
388}
Catalin Marinas09529f72009-07-24 12:34:55 +0100389#else /* CONFIG_MMU */
390static int
391do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
392{
393 return 0;
394}
395#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397/*
398 * First Level Translation Fault Handler
399 *
400 * We enter here because the first level page table doesn't contain
401 * a valid entry for the address.
402 *
403 * If the address is in kernel space (>= TASK_SIZE), then we are
404 * probably faulting in the vmalloc() area.
405 *
406 * If the init_task's first level page tables contains the relevant
407 * entry, we copy the it to this task. If not, we send the process
408 * a signal, fixup the exception, or oops the kernel.
409 *
410 * NOTE! We MUST NOT take any locks for this case. We may be in an
411 * interrupt or a critical region, and should only copy the information
412 * from the master page table, nothing more.
413 */
Catalin Marinas09529f72009-07-24 12:34:55 +0100414#ifdef CONFIG_MMU
Nicolas Pitre785d3cd2007-12-03 15:27:56 -0500415static int __kprobes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416do_translation_fault(unsigned long addr, unsigned int fsr,
417 struct pt_regs *regs)
418{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 unsigned int index;
420 pgd_t *pgd, *pgd_k;
Russell King516295e2010-11-21 16:27:49 +0000421 pud_t *pud, *pud_k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 pmd_t *pmd, *pmd_k;
423
424 if (addr < TASK_SIZE)
425 return do_page_fault(addr, fsr, regs);
426
Anfei5e27fb72010-06-08 15:16:49 +0100427 if (user_mode(regs))
428 goto bad_area;
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 index = pgd_index(addr);
431
432 /*
433 * FIXME: CP15 C1 is write only on ARMv3 architectures.
434 */
435 pgd = cpu_get_pgd() + index;
436 pgd_k = init_mm.pgd + index;
437
438 if (pgd_none(*pgd_k))
439 goto bad_area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 if (!pgd_present(*pgd))
441 set_pgd(pgd, *pgd_k);
442
Russell King516295e2010-11-21 16:27:49 +0000443 pud = pud_offset(pgd, addr);
444 pud_k = pud_offset(pgd_k, addr);
445
446 if (pud_none(*pud_k))
447 goto bad_area;
448 if (!pud_present(*pud))
449 set_pud(pud, *pud_k);
450
451 pmd = pmd_offset(pud, addr);
452 pmd_k = pmd_offset(pud_k, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Catalin Marinasf7b81562011-11-22 17:30:31 +0000454#ifdef CONFIG_ARM_LPAE
455 /*
456 * Only one hardware entry per PMD with LPAE.
457 */
458 index = 0;
459#else
Kirill A. Shutemov33a9c412010-07-22 13:20:22 +0100460 /*
461 * On ARM one Linux PGD entry contains two hardware entries (see page
462 * tables layout in pgtable.h). We normally guarantee that we always
463 * fill both L1 entries. But create_mapping() doesn't follow the rule.
464 * It can create inidividual L1 entries, so here we have to call
465 * pmd_none() check for the entry really corresponded to address, not
466 * for the first of pair.
467 */
468 index = (addr >> SECTION_SHIFT) & 1;
Catalin Marinasf7b81562011-11-22 17:30:31 +0000469#endif
Kirill A. Shutemov33a9c412010-07-22 13:20:22 +0100470 if (pmd_none(pmd_k[index]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 goto bad_area;
472
473 copy_pmd(pmd, pmd_k);
474 return 0;
475
476bad_area:
Russell Kinge5beac32006-09-27 16:13:48 +0100477 do_bad_area(addr, fsr, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return 0;
479}
Catalin Marinas09529f72009-07-24 12:34:55 +0100480#else /* CONFIG_MMU */
481static int
482do_translation_fault(unsigned long addr, unsigned int fsr,
483 struct pt_regs *regs)
484{
485 return 0;
486}
487#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489/*
490 * Some section permission faults need to be handled gracefully.
491 * They can happen due to a __{get,put}_user during an oops.
492 */
493static int
494do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
495{
Russell Kinge5beac32006-09-27 16:13:48 +0100496 do_bad_area(addr, fsr, regs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 return 0;
498}
499
500/*
501 * This abort handler always returns "fault".
502 */
503static int
504do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
505{
506 return 1;
507}
508
Catalin Marinas136848d2011-11-22 17:30:28 +0000509struct fsr_info {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
511 int sig;
Russell Kingcfb08102005-06-30 11:06:49 +0100512 int code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 const char *name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514};
515
Catalin Marinas136848d2011-11-22 17:30:28 +0000516/* FSR definition */
Catalin Marinasf7b81562011-11-22 17:30:31 +0000517#ifdef CONFIG_ARM_LPAE
518#include "fsr-3level.c"
519#else
Catalin Marinas136848d2011-11-22 17:30:28 +0000520#include "fsr-2level.c"
Catalin Marinasf7b81562011-11-22 17:30:31 +0000521#endif
Catalin Marinas136848d2011-11-22 17:30:28 +0000522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523void __init
524hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
Kirill A. Shutemov6338a6a2010-07-22 13:18:19 +0100525 int sig, int code, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Kirill A. Shutemov6338a6a2010-07-22 13:18:19 +0100527 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
528 BUG();
529
530 fsr_info[nr].fn = fn;
531 fsr_info[nr].sig = sig;
532 fsr_info[nr].code = code;
533 fsr_info[nr].name = name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534}
535
536/*
537 * Dispatch a data abort to the relevant handler.
538 */
Russell King7ab3f8d2007-03-02 15:01:36 +0000539asmlinkage void __exception
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
541{
Russell Kingc88d6aa2009-09-20 12:41:58 +0100542 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
Russell Kingcfb08102005-06-30 11:06:49 +0100543 struct siginfo info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Russell Kingdf297bf2009-09-20 13:18:47 +0100545 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return;
547
548 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
549 inf->name, fsr, addr);
Russell Kingcfb08102005-06-30 11:06:49 +0100550
551 info.si_signo = inf->sig;
552 info.si_errno = 0;
553 info.si_code = inf->code;
554 info.si_addr = (void __user *)addr;
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -0700555 arm_notify_die("", regs, &info, fsr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556}
557
Will Deacon3a4b5dc2010-09-03 10:39:59 +0100558void __init
559hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
560 int sig, int code, const char *name)
561{
562 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
563 BUG();
564
565 ifsr_info[nr].fn = fn;
566 ifsr_info[nr].sig = sig;
567 ifsr_info[nr].code = code;
568 ifsr_info[nr].name = name;
569}
570
Russell King7ab3f8d2007-03-02 15:01:36 +0000571asmlinkage void __exception
Kirill A. Shutemov4fb28472009-09-25 13:39:47 +0100572do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Kirill A. Shutemovd25ef8b2009-09-25 13:40:49 +0100574 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
575 struct siginfo info;
576
577 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
578 return;
579
580 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
581 inf->name, ifsr, addr);
582
583 info.si_signo = inf->sig;
584 info.si_errno = 0;
585 info.si_code = inf->code;
586 info.si_addr = (void __user *)addr;
587 arm_notify_die("", regs, &info, ifsr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
Catalin Marinasf7b81562011-11-22 17:30:31 +0000590#ifndef CONFIG_ARM_LPAE
Kirill A. Shutemov993bf4e2010-07-22 13:23:25 +0100591static int __init exceptions_init(void)
592{
593 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
594 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
595 "I-cache maintenance fault");
596 }
597
Kirill A. Shutemovb8ab5392010-07-26 11:20:41 +0100598 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
599 /*
600 * TODO: Access flag faults introduced in ARMv6K.
601 * Runtime check for 'K' extension is needed
602 */
603 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
604 "section access flag fault");
605 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
606 "section access flag fault");
607 }
608
Kirill A. Shutemov993bf4e2010-07-22 13:23:25 +0100609 return 0;
610}
611
612arch_initcall(exceptions_init);
Catalin Marinasf7b81562011-11-22 17:30:31 +0000613#endif