blob: d4c34d757f0d5b9ccf5a0350aca9584c8ee5e971 [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * Page fault handler for SH with an MMU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt0f60bb22009-07-05 03:18:47 +09005 * Copyright (C) 2003 - 2009 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
Paul Mundt26ff6c12006-09-27 15:13:36 +09009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
Paul Mundt0f08f332006-09-27 17:03:56 +090016#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020018#include <linux/perf_event.h>
Magnus Damme7cc9a72008-02-07 20:18:21 +090019#include <asm/io_trapped.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/mmu_context.h>
Paul Mundtdb2e1fa2007-02-14 14:13:10 +090022#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Paul Mundt7433ab7702009-06-25 02:30:10 +090024static inline int notify_page_fault(struct pt_regs *regs, int trap)
25{
26 int ret = 0;
27
Paul Mundtc63c3102009-07-05 02:50:10 +090028 if (kprobes_built_in() && !user_mode(regs)) {
Paul Mundt7433ab7702009-06-25 02:30:10 +090029 preempt_disable();
30 if (kprobe_running() && kprobe_fault_handler(regs, trap))
31 ret = 1;
32 preempt_enable();
33 }
Paul Mundt7433ab7702009-06-25 02:30:10 +090034
35 return ret;
36}
37
Paul Mundt0f60bb22009-07-05 03:18:47 +090038static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
39{
40 unsigned index = pgd_index(address);
41 pgd_t *pgd_k;
42 pud_t *pud, *pud_k;
43 pmd_t *pmd, *pmd_k;
44
45 pgd += index;
46 pgd_k = init_mm.pgd + index;
47
48 if (!pgd_present(*pgd_k))
49 return NULL;
50
51 pud = pud_offset(pgd, address);
52 pud_k = pud_offset(pgd_k, address);
53 if (!pud_present(*pud_k))
54 return NULL;
55
Matt Fleming5d9b4b12009-12-13 14:38:50 +000056 if (!pud_present(*pud))
57 set_pud(pud, *pud_k);
58
Paul Mundt0f60bb22009-07-05 03:18:47 +090059 pmd = pmd_offset(pud, address);
60 pmd_k = pmd_offset(pud_k, address);
61 if (!pmd_present(*pmd_k))
62 return NULL;
63
64 if (!pmd_present(*pmd))
65 set_pmd(pmd, *pmd_k);
Matt Fleming05dd2cd2009-07-13 11:38:04 +000066 else {
67 /*
68 * The page tables are fully synchronised so there must
69 * be another reason for the fault. Return NULL here to
70 * signal that we have not taken care of the fault.
71 */
Paul Mundt0f60bb22009-07-05 03:18:47 +090072 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
Matt Fleming05dd2cd2009-07-13 11:38:04 +000073 return NULL;
74 }
Paul Mundt0f60bb22009-07-05 03:18:47 +090075
76 return pmd_k;
77}
78
79/*
80 * Handle a fault on the vmalloc or module mapping area
81 */
82static noinline int vmalloc_fault(unsigned long address)
83{
84 pgd_t *pgd_k;
85 pmd_t *pmd_k;
86 pte_t *pte_k;
87
Paul Mundt0906a3a2009-09-03 17:21:10 +090088 /* Make sure we are in vmalloc/module/P3 area: */
89 if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
Paul Mundt0f60bb22009-07-05 03:18:47 +090090 return -1;
91
92 /*
93 * Synchronize this task's top level page-table
94 * with the 'reference' page table.
95 *
96 * Do _not_ use "current" here. We might be inside
97 * an interrupt in the middle of a task switch..
98 */
99 pgd_k = get_TTB();
Matt Fleming05dd2cd2009-07-13 11:38:04 +0000100 pmd_k = vmalloc_sync_one(pgd_k, address);
Paul Mundt0f60bb22009-07-05 03:18:47 +0900101 if (!pmd_k)
102 return -1;
103
104 pte_k = pte_offset_kernel(pmd_k, address);
105 if (!pte_present(*pte_k))
106 return -1;
107
108 return 0;
109}
110
111static int fault_in_kernel_space(unsigned long address)
112{
113 return address >= TASK_SIZE;
114}
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/*
117 * This routine handles page faults. It determines the address,
118 * and the problem, and then passes it off to one of the appropriate
119 * routines.
120 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900121asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
122 unsigned long writeaccess,
123 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124{
Paul Mundt0f60bb22009-07-05 03:18:47 +0900125 unsigned long vec;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct task_struct *tsk;
127 struct mm_struct *mm;
128 struct vm_area_struct * vma;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900129 int si_code;
Nick Piggin83c54072007-07-19 01:47:05 -0700130 int fault;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900131 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 tsk = current;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900134 mm = tsk->mm;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900135 si_code = SEGV_MAPERR;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900136 vec = lookup_exception_vector();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Paul Mundt0f60bb22009-07-05 03:18:47 +0900138 /*
139 * We fault-in kernel-space virtual memory on-demand. The
140 * 'reference' page table is init_mm.pgd.
141 *
142 * NOTE! We MUST NOT take any locks for this case. We may
143 * be in an interrupt or a critical region, and should
144 * only copy the information from the master page table,
145 * nothing more.
146 */
147 if (unlikely(fault_in_kernel_space(address))) {
148 if (vmalloc_fault(address) >= 0)
Stuart Menefy99a596f2006-11-21 15:38:05 +0900149 return;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900150 if (notify_page_fault(regs, vec))
Stuart Menefy96e14e52008-09-05 16:17:15 +0900151 return;
Stuart Menefy99a596f2006-11-21 15:38:05 +0900152
Paul Mundt0f60bb22009-07-05 03:18:47 +0900153 goto bad_area_nosemaphore;
Stuart Menefy99a596f2006-11-21 15:38:05 +0900154 }
155
Paul Mundt0f60bb22009-07-05 03:18:47 +0900156 if (unlikely(notify_page_fault(regs, vec)))
Paul Mundt7433ab7702009-06-25 02:30:10 +0900157 return;
158
159 /* Only enable interrupts if they were on before the fault */
160 if ((regs->sr & SR_IMASK) != SR_IMASK)
161 local_irq_enable();
162
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200163 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
Paul Mundt7433ab7702009-06-25 02:30:10 +0900164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 /*
Paul Mundt0f60bb22009-07-05 03:18:47 +0900166 * If we're in an interrupt, have no user context or are running
167 * in an atomic region then we must not take the fault:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 */
169 if (in_atomic() || !mm)
170 goto no_context;
171
172 down_read(&mm->mmap_sem);
173
174 vma = find_vma(mm, address);
175 if (!vma)
176 goto bad_area;
177 if (vma->vm_start <= address)
178 goto good_area;
179 if (!(vma->vm_flags & VM_GROWSDOWN))
180 goto bad_area;
181 if (expand_stack(vma, address))
182 goto bad_area;
Paul Mundt0f60bb22009-07-05 03:18:47 +0900183
184 /*
185 * Ok, we have a good vm_area for this memory access, so
186 * we can handle it..
187 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188good_area:
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900189 si_code = SEGV_ACCERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 if (writeaccess) {
191 if (!(vma->vm_flags & VM_WRITE))
192 goto bad_area;
193 } else {
Jason Barondf67b3d2006-09-29 01:58:58 -0700194 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 goto bad_area;
196 }
197
198 /*
199 * If for any reason at all we couldn't handle the fault,
200 * make sure we exit gracefully rather than endlessly redo
201 * the fault.
202 */
Linus Torvaldsd06063c2009-04-10 09:01:23 -0700203 fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
Nick Piggin83c54072007-07-19 01:47:05 -0700204 if (unlikely(fault & VM_FAULT_ERROR)) {
205 if (fault & VM_FAULT_OOM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 goto out_of_memory;
Nick Piggin83c54072007-07-19 01:47:05 -0700207 else if (fault & VM_FAULT_SIGBUS)
208 goto do_sigbus;
209 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
Paul Mundt7433ab7702009-06-25 02:30:10 +0900211 if (fault & VM_FAULT_MAJOR) {
Nick Piggin83c54072007-07-19 01:47:05 -0700212 tsk->maj_flt++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200213 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
Paul Mundt7433ab7702009-06-25 02:30:10 +0900214 regs, address);
215 } else {
Nick Piggin83c54072007-07-19 01:47:05 -0700216 tsk->min_flt++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200217 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
Paul Mundt7433ab7702009-06-25 02:30:10 +0900218 regs, address);
219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 up_read(&mm->mmap_sem);
222 return;
223
Paul Mundt0f60bb22009-07-05 03:18:47 +0900224 /*
225 * Something tried to access memory that isn't in our memory map..
226 * Fix it, but check if it's kernel or user first..
227 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228bad_area:
229 up_read(&mm->mmap_sem);
230
Stuart Menefy99a596f2006-11-21 15:38:05 +0900231bad_area_nosemaphore:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 if (user_mode(regs)) {
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900233 info.si_signo = SIGSEGV;
234 info.si_errno = 0;
235 info.si_code = si_code;
236 info.si_addr = (void *) address;
237 force_sig_info(SIGSEGV, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return;
239 }
240
241no_context:
242 /* Are we prepared to handle this kernel fault? */
243 if (fixup_exception(regs))
244 return;
245
Magnus Damme7cc9a72008-02-07 20:18:21 +0900246 if (handle_trapped_io(regs, address))
247 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248/*
249 * Oops. The kernel tried to access some bad page. We'll have to
250 * terminate things with extreme prejudice.
251 *
252 */
Paul Mundt0630e452007-06-18 19:02:47 +0900253
254 bust_spinlocks(1);
255
256 if (oops_may_print()) {
Paul Mundtb62ad832008-01-10 14:07:03 +0900257 unsigned long page;
Paul Mundt0630e452007-06-18 19:02:47 +0900258
259 if (address < PAGE_SIZE)
260 printk(KERN_ALERT "Unable to handle kernel NULL "
261 "pointer dereference");
262 else
263 printk(KERN_ALERT "Unable to handle kernel paging "
264 "request");
265 printk(" at virtual address %08lx\n", address);
266 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
267 page = (unsigned long)get_TTB();
268 if (page) {
Paul Mundt06f862c2007-08-01 16:39:51 +0900269 page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT];
Paul Mundt0630e452007-06-18 19:02:47 +0900270 printk(KERN_ALERT "*pde = %08lx\n", page);
271 if (page & _PAGE_PRESENT) {
272 page &= PAGE_MASK;
273 address &= 0x003ff000;
274 page = ((__typeof__(page) *)
275 __va(page))[address >>
276 PAGE_SHIFT];
277 printk(KERN_ALERT "*pte = %08lx\n", page);
278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 }
280 }
Paul Mundt0630e452007-06-18 19:02:47 +0900281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 die("Oops", regs, writeaccess);
Paul Mundt0630e452007-06-18 19:02:47 +0900283 bust_spinlocks(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 do_exit(SIGKILL);
285
286/*
287 * We ran out of memory, or some other thing happened to us that made
288 * us unable to handle the page fault gracefully.
289 */
290out_of_memory:
291 up_read(&mm->mmap_sem);
Nick Piggin6b6b18e2010-04-22 16:06:26 +0000292 if (!user_mode(regs))
293 goto no_context;
294 pagefault_out_of_memory();
295 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297do_sigbus:
298 up_read(&mm->mmap_sem);
299
300 /*
301 * Send a sigbus, regardless of whether we were in kernel
302 * or user mode.
303 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900304 info.si_signo = SIGBUS;
305 info.si_errno = 0;
306 info.si_code = BUS_ADRERR;
307 info.si_addr = (void *)address;
308 force_sig_info(SIGBUS, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 /* Kernel mode? Handle exceptions or die */
311 if (!user_mode(regs))
312 goto no_context;
313}
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900314
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900315/*
316 * Called with interrupts disabled.
317 */
Paul Mundt112e5842009-08-15 02:49:40 +0900318asmlinkage int __kprobes
319handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
320 unsigned long address)
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900321{
322 pgd_t *pgd;
323 pud_t *pud;
324 pmd_t *pmd;
325 pte_t *pte;
326 pte_t entry;
Paul Mundt3d586952008-09-21 13:56:39 +0900327
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900328 /*
329 * We don't take page faults for P1, P2, and parts of P4, these
330 * are always mapped, whether it be due to legacy behaviour in
331 * 29-bit mode, or due to PMB configuration in 32-bit mode.
332 */
333 if (address >= P3SEG && address < P3_ADDR_MAX) {
334 pgd = pgd_offset_k(address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900335 } else {
Paul Mundt0f1a3942007-11-19 13:05:18 +0900336 if (unlikely(address >= TASK_SIZE || !current->mm))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900337 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900338
Paul Mundt0f1a3942007-11-19 13:05:18 +0900339 pgd = pgd_offset(current->mm, address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900340 }
341
342 pud = pud_offset(pgd, address);
343 if (pud_none_or_clear_bad(pud))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900344 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900345 pmd = pmd_offset(pud, address);
346 if (pmd_none_or_clear_bad(pmd))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900347 return 1;
Paul Mundt0f1a3942007-11-19 13:05:18 +0900348 pte = pte_offset_kernel(pmd, address);
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900349 entry = *pte;
350 if (unlikely(pte_none(entry) || pte_not_present(entry)))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900351 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900352 if (unlikely(writeaccess && !pte_write(entry)))
Paul Mundt8010fbe2009-08-15 03:06:41 +0900353 return 1;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900354
355 if (writeaccess)
356 entry = pte_mkdirty(entry);
357 entry = pte_mkyoung(entry);
358
Paul Mundt8010fbe2009-08-15 03:06:41 +0900359 set_pte(pte, entry);
360
Hideo Saitoa602cc02008-02-14 14:45:08 +0900361#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
362 /*
Paul Mundt8010fbe2009-08-15 03:06:41 +0900363 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
364 * the case of an initial page write exception, so we need to
365 * flush it in order to avoid potential TLB entry duplication.
Hideo Saitoa602cc02008-02-14 14:45:08 +0900366 */
Paul Mundt8010fbe2009-08-15 03:06:41 +0900367 if (writeaccess == 2)
368 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
Hideo Saitoa602cc02008-02-14 14:45:08 +0900369#endif
370
Russell King4b3073e2009-12-18 16:40:18 +0000371 update_mmu_cache(NULL, address, pte);
Paul Mundt0f1a3942007-11-19 13:05:18 +0900372
Paul Mundt8010fbe2009-08-15 03:06:41 +0900373 return 0;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900374}