blob: 964c6767dc737e0dd0cd3ff361efa8e5415d8715 [file] [log] [blame]
Paul Mundt26ff6c12006-09-27 15:13:36 +09001/*
2 * Page fault handler for SH with an MMU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright (C) 1999 Niibe Yutaka
Paul Mundt3a2e1172007-05-01 16:33:10 +09005 * Copyright (C) 2003 - 2007 Paul Mundt
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
Paul Mundt26ff6c12006-09-27 15:13:36 +09009 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
Paul Mundt0f08f332006-09-27 17:03:56 +090016#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/mmu_context.h>
Paul Mundtdb2e1fa2007-02-14 14:13:10 +090020#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/kgdb.h>
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023/*
24 * This routine handles page faults. It determines the address,
25 * and the problem, and then passes it off to one of the appropriate
26 * routines.
27 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090028asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
29 unsigned long writeaccess,
30 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031{
32 struct task_struct *tsk;
33 struct mm_struct *mm;
34 struct vm_area_struct * vma;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090035 int si_code;
Nick Piggin83c54072007-07-19 01:47:05 -070036 int fault;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090037 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Paul Mundtafbfb522006-12-04 18:17:28 +090039 trace_hardirqs_on();
40 local_irq_enable();
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#ifdef CONFIG_SH_KGDB
43 if (kgdb_nofault && kgdb_bus_err_hook)
44 kgdb_bus_err_hook();
45#endif
46
47 tsk = current;
48 mm = tsk->mm;
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +090049 si_code = SEGV_MAPERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Stuart Menefy99a596f2006-11-21 15:38:05 +090051 if (unlikely(address >= TASK_SIZE)) {
52 /*
53 * Synchronize this task's top level page-table
54 * with the 'reference' page table.
55 *
56 * Do _not_ use "tsk" here. We might be inside
57 * an interrupt in the middle of a task switch..
58 */
59 int offset = pgd_index(address);
60 pgd_t *pgd, *pgd_k;
61 pud_t *pud, *pud_k;
62 pmd_t *pmd, *pmd_k;
63
64 pgd = get_TTB() + offset;
65 pgd_k = swapper_pg_dir + offset;
66
67 /* This will never happen with the folded page table. */
68 if (!pgd_present(*pgd)) {
69 if (!pgd_present(*pgd_k))
70 goto bad_area_nosemaphore;
71 set_pgd(pgd, *pgd_k);
72 return;
73 }
74
75 pud = pud_offset(pgd, address);
76 pud_k = pud_offset(pgd_k, address);
77 if (pud_present(*pud) || !pud_present(*pud_k))
78 goto bad_area_nosemaphore;
79 set_pud(pud, *pud_k);
80
81 pmd = pmd_offset(pud, address);
82 pmd_k = pmd_offset(pud_k, address);
83 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
84 goto bad_area_nosemaphore;
85 set_pmd(pmd, *pmd_k);
86
87 return;
88 }
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 /*
91 * If we're in an interrupt or have no user
92 * context, we must not take the fault..
93 */
94 if (in_atomic() || !mm)
95 goto no_context;
96
97 down_read(&mm->mmap_sem);
98
99 vma = find_vma(mm, address);
100 if (!vma)
101 goto bad_area;
102 if (vma->vm_start <= address)
103 goto good_area;
104 if (!(vma->vm_flags & VM_GROWSDOWN))
105 goto bad_area;
106 if (expand_stack(vma, address))
107 goto bad_area;
108/*
109 * Ok, we have a good vm_area for this memory access, so
110 * we can handle it..
111 */
112good_area:
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900113 si_code = SEGV_ACCERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 if (writeaccess) {
115 if (!(vma->vm_flags & VM_WRITE))
116 goto bad_area;
117 } else {
Jason Barondf67b3d2006-09-29 01:58:58 -0700118 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 goto bad_area;
120 }
121
122 /*
123 * If for any reason at all we couldn't handle the fault,
124 * make sure we exit gracefully rather than endlessly redo
125 * the fault.
126 */
127survive:
Nick Piggin83c54072007-07-19 01:47:05 -0700128 fault = handle_mm_fault(mm, vma, address, writeaccess);
129 if (unlikely(fault & VM_FAULT_ERROR)) {
130 if (fault & VM_FAULT_OOM)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 goto out_of_memory;
Nick Piggin83c54072007-07-19 01:47:05 -0700132 else if (fault & VM_FAULT_SIGBUS)
133 goto do_sigbus;
134 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
Nick Piggin83c54072007-07-19 01:47:05 -0700136 if (fault & VM_FAULT_MAJOR)
137 tsk->maj_flt++;
138 else
139 tsk->min_flt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 up_read(&mm->mmap_sem);
142 return;
143
144/*
145 * Something tried to access memory that isn't in our memory map..
146 * Fix it, but check if it's kernel or user first..
147 */
148bad_area:
149 up_read(&mm->mmap_sem);
150
Stuart Menefy99a596f2006-11-21 15:38:05 +0900151bad_area_nosemaphore:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 if (user_mode(regs)) {
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900153 info.si_signo = SIGSEGV;
154 info.si_errno = 0;
155 info.si_code = si_code;
156 info.si_addr = (void *) address;
157 force_sig_info(SIGSEGV, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 return;
159 }
160
161no_context:
162 /* Are we prepared to handle this kernel fault? */
163 if (fixup_exception(regs))
164 return;
165
166/*
167 * Oops. The kernel tried to access some bad page. We'll have to
168 * terminate things with extreme prejudice.
169 *
170 */
Paul Mundt0630e452007-06-18 19:02:47 +0900171
172 bust_spinlocks(1);
173
174 if (oops_may_print()) {
175 __typeof__(pte_val(__pte(0))) page;
176
177 if (address < PAGE_SIZE)
178 printk(KERN_ALERT "Unable to handle kernel NULL "
179 "pointer dereference");
180 else
181 printk(KERN_ALERT "Unable to handle kernel paging "
182 "request");
183 printk(" at virtual address %08lx\n", address);
184 printk(KERN_ALERT "pc = %08lx\n", regs->pc);
185 page = (unsigned long)get_TTB();
186 if (page) {
187 page = ((__typeof__(page) *) __va(page))[address >>
188 PGDIR_SHIFT];
189 printk(KERN_ALERT "*pde = %08lx\n", page);
190 if (page & _PAGE_PRESENT) {
191 page &= PAGE_MASK;
192 address &= 0x003ff000;
193 page = ((__typeof__(page) *)
194 __va(page))[address >>
195 PAGE_SHIFT];
196 printk(KERN_ALERT "*pte = %08lx\n", page);
197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 }
199 }
Paul Mundt0630e452007-06-18 19:02:47 +0900200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 die("Oops", regs, writeaccess);
Paul Mundt0630e452007-06-18 19:02:47 +0900202 bust_spinlocks(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 do_exit(SIGKILL);
204
205/*
206 * We ran out of memory, or some other thing happened to us that made
207 * us unable to handle the page fault gracefully.
208 */
209out_of_memory:
210 up_read(&mm->mmap_sem);
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -0700211 if (is_init(current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 yield();
213 down_read(&mm->mmap_sem);
214 goto survive;
215 }
216 printk("VM: killing process %s\n", tsk->comm);
217 if (user_mode(regs))
218 do_exit(SIGKILL);
219 goto no_context;
220
221do_sigbus:
222 up_read(&mm->mmap_sem);
223
224 /*
225 * Send a sigbus, regardless of whether we were in kernel
226 * or user mode.
227 */
Stuart Menefyb5a1bcb2006-11-21 13:34:04 +0900228 info.si_signo = SIGBUS;
229 info.si_errno = 0;
230 info.si_code = BUS_ADRERR;
231 info.si_addr = (void *)address;
232 force_sig_info(SIGBUS, &info, tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234 /* Kernel mode? Handle exceptions or die */
235 if (!user_mode(regs))
236 goto no_context;
237}
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900238
239#ifdef CONFIG_SH_STORE_QUEUES
240/*
241 * This is a special case for the SH-4 store queues, as pages for this
242 * space still need to be faulted in before it's possible to flush the
243 * store queue cache for writeout to the remapped region.
244 */
245#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
246#else
247#define P3_ADDR_MAX P4SEG
248#endif
249
250/*
251 * Called with interrupts disabled.
252 */
253asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
254 unsigned long writeaccess,
255 unsigned long address)
256{
257 pgd_t *pgd;
258 pud_t *pud;
259 pmd_t *pmd;
260 pte_t *pte;
261 pte_t entry;
262 struct mm_struct *mm = current->mm;
Paul Mundtb8947442007-05-10 14:25:01 +0900263 spinlock_t *ptl = NULL;
Paul Mundtdb2e1fa2007-02-14 14:13:10 +0900264 int ret = 1;
265
266#ifdef CONFIG_SH_KGDB
267 if (kgdb_nofault && kgdb_bus_err_hook)
268 kgdb_bus_err_hook();
269#endif
270
271 /*
272 * We don't take page faults for P1, P2, and parts of P4, these
273 * are always mapped, whether it be due to legacy behaviour in
274 * 29-bit mode, or due to PMB configuration in 32-bit mode.
275 */
276 if (address >= P3SEG && address < P3_ADDR_MAX) {
277 pgd = pgd_offset_k(address);
278 mm = NULL;
279 } else {
280 if (unlikely(address >= TASK_SIZE || !mm))
281 return 1;
282
283 pgd = pgd_offset(mm, address);
284 }
285
286 pud = pud_offset(pgd, address);
287 if (pud_none_or_clear_bad(pud))
288 return 1;
289 pmd = pmd_offset(pud, address);
290 if (pmd_none_or_clear_bad(pmd))
291 return 1;
292
293 if (mm)
294 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
295 else
296 pte = pte_offset_kernel(pmd, address);
297
298 entry = *pte;
299 if (unlikely(pte_none(entry) || pte_not_present(entry)))
300 goto unlock;
301 if (unlikely(writeaccess && !pte_write(entry)))
302 goto unlock;
303
304 if (writeaccess)
305 entry = pte_mkdirty(entry);
306 entry = pte_mkyoung(entry);
307
308#ifdef CONFIG_CPU_SH4
309 /*
310 * ITLB is not affected by "ldtlb" instruction.
311 * So, we need to flush the entry by ourselves.
312 */
313 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
314#endif
315
316 set_pte(pte, entry);
317 update_mmu_cache(NULL, address, entry);
318 ret = 0;
319unlock:
320 if (mm)
321 pte_unmap_unlock(pte, ptl);
322 return ret;
323}