blob: 7225dad87094d81e89459e5a61909fa5b2d10ca0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * MMU fault handling support.
3 *
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/interrupt.h>
Prasanna S Panchamukhi1f7ad572005-09-06 15:19:30 -070011#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070012#include <linux/kdebug.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070013#include <linux/prefetch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
15#include <asm/pgtable.h>
16#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/uaccess.h>
18
Jan Beulich620de2f2008-02-04 23:43:03 -080019extern int die(char *, struct pt_regs *, long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070021#ifdef CONFIG_KPROBES
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020022static inline int notify_page_fault(struct pt_regs *regs, int trap)
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070023{
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020024 int ret = 0;
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070025
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020026 if (!user_mode(regs)) {
27 /* kprobe_running() needs smp_processor_id() */
28 preempt_disable();
Harvey Harrison45e18c22008-03-06 09:49:01 -080029 if (kprobe_running() && kprobe_fault_handler(regs, trap))
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020030 ret = 1;
31 preempt_enable();
32 }
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070033
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020034 return ret;
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070035}
36#else
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020037static inline int notify_page_fault(struct pt_regs *regs, int trap)
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070038{
Christoph Hellwig576fe0b2007-05-16 14:52:19 +020039 return 0;
Anil S Keshavamurthyae9a5b82006-06-26 00:25:26 -070040}
41#endif
42
Linus Torvalds1da177e2005-04-16 15:20:36 -070043/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
45 * (inside region 5, on ia64) and that page is present.
46 */
47static int
48mapped_kernel_page_is_present (unsigned long address)
49{
50 pgd_t *pgd;
51 pud_t *pud;
52 pmd_t *pmd;
53 pte_t *ptep, pte;
54
55 pgd = pgd_offset_k(address);
56 if (pgd_none(*pgd) || pgd_bad(*pgd))
57 return 0;
58
59 pud = pud_offset(pgd, address);
60 if (pud_none(*pud) || pud_bad(*pud))
61 return 0;
62
63 pmd = pmd_offset(pud, address);
64 if (pmd_none(*pmd) || pmd_bad(*pmd))
65 return 0;
66
67 ptep = pte_offset_kernel(pmd, address);
68 if (!ptep)
69 return 0;
70
71 pte = *ptep;
72 return pte_present(pte);
73}
74
Kautuk Consulf28fa722012-06-14 13:11:37 -070075# define VM_READ_BIT 0
76# define VM_WRITE_BIT 1
77# define VM_EXEC_BIT 2
78
Prasanna S Panchamukhi1f7ad572005-09-06 15:19:30 -070079void __kprobes
Linus Torvalds1da177e2005-04-16 15:20:36 -070080ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
81{
82 int signal = SIGSEGV, code = SEGV_MAPERR;
83 struct vm_area_struct *vma, *prev_vma;
84 struct mm_struct *mm = current->mm;
85 struct siginfo si;
86 unsigned long mask;
Nick Piggin83c54072007-07-19 01:47:05 -070087 int fault;
Kautuk Consulf28fa722012-06-14 13:11:37 -070088 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
89
90 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
91 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
92
Christoph Lameter0ffe9842006-03-28 22:54:38 -080093 /* mmap_sem is performance critical.... */
94 prefetchw(&mm->mmap_sem);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 /*
97 * If we're in an interrupt or have no user context, we must not take the fault..
98 */
99 if (in_atomic() || !mm)
100 goto no_context;
101
102#ifdef CONFIG_VIRTUAL_MEM_MAP
103 /*
104 * If fault is in region 5 and we are in the kernel, we may already
105 * have the mmap_sem (pfn_valid macro is called during mmap). There
106 * is no vma for region 5 addr's anyway, so skip getting the semaphore
107 * and go directly to the exception handling code.
108 */
109
110 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
111 goto bad_area_no_up;
112#endif
113
Anil S Keshavamurthy7213b252005-06-23 00:09:27 -0700114 /*
115 * This is to handle the kprobes on user space access instructions
116 */
Christoph Hellwig576fe0b2007-05-16 14:52:19 +0200117 if (notify_page_fault(regs, TRAP_BRKPT))
Anil S Keshavamurthy7213b252005-06-23 00:09:27 -0700118 return;
119
Johannes Weiner759496b2013-09-12 15:13:39 -0700120 if (user_mode(regs))
121 flags |= FAULT_FLAG_USER;
122 if (mask & VM_WRITE)
123 flags |= FAULT_FLAG_WRITE;
Kautuk Consulf28fa722012-06-14 13:11:37 -0700124retry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 down_read(&mm->mmap_sem);
126
127 vma = find_vma_prev(mm, address, &prev_vma);
Andrew Burgesse8c59c02007-08-16 10:30:46 -0700128 if (!vma && !prev_vma )
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 goto bad_area;
130
Andrew Burgesse8c59c02007-08-16 10:30:46 -0700131 /*
132 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
133 *
134 * May find no vma, but could be that the last vm area is the
135 * register backing store that needs to expand upwards, in
136 * this case vma will be null, but prev_vma will ne non-null
137 */
138 if (( !vma && prev_vma ) || (address < vma->vm_start) )
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 goto check_expansion;
140
141 good_area:
142 code = SEGV_ACCERR;
143
144 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
147 || (1 << VM_EXEC_BIT) != VM_EXEC)
148# error File is out of sync with <linux/mm.h>. Please update.
149# endif
150
Jason Barondf67b3d2006-09-29 01:58:58 -0700151 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
152 goto bad_area;
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 if ((vma->vm_flags & mask) != mask)
155 goto bad_area;
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 /*
158 * If for any reason at all we couldn't handle the fault, make
159 * sure we exit gracefully rather than endlessly redo the
160 * fault.
161 */
Kautuk Consulf28fa722012-06-14 13:11:37 -0700162 fault = handle_mm_fault(mm, vma, address, flags);
163
164 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
165 return;
166
Nick Piggin83c54072007-07-19 01:47:05 -0700167 if (unlikely(fault & VM_FAULT_ERROR)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 /*
169 * We ran out of memory, or some other thing happened
170 * to us that made us unable to handle the page fault
171 * gracefully.
172 */
Nick Piggin83c54072007-07-19 01:47:05 -0700173 if (fault & VM_FAULT_OOM) {
174 goto out_of_memory;
175 } else if (fault & VM_FAULT_SIGBUS) {
176 signal = SIGBUS;
177 goto bad_area;
178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 BUG();
180 }
Kautuk Consulf28fa722012-06-14 13:11:37 -0700181
182 if (flags & FAULT_FLAG_ALLOW_RETRY) {
183 if (fault & VM_FAULT_MAJOR)
184 current->maj_flt++;
185 else
186 current->min_flt++;
187 if (fault & VM_FAULT_RETRY) {
188 flags &= ~FAULT_FLAG_ALLOW_RETRY;
Shaohua Li45cac652012-10-08 16:32:19 -0700189 flags |= FAULT_FLAG_TRIED;
Kautuk Consulf28fa722012-06-14 13:11:37 -0700190
191 /* No need to up_read(&mm->mmap_sem) as we would
192 * have already released it in __lock_page_or_retry
193 * in mm/filemap.c.
194 */
195
196 goto retry;
197 }
198 }
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 up_read(&mm->mmap_sem);
201 return;
202
203 check_expansion:
204 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
Andrew Burgesse8c59c02007-08-16 10:30:46 -0700205 if (!vma)
206 goto bad_area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 if (!(vma->vm_flags & VM_GROWSDOWN))
208 goto bad_area;
209 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
210 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
211 goto bad_area;
212 if (expand_stack(vma, address))
213 goto bad_area;
214 } else {
215 vma = prev_vma;
216 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
217 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
218 goto bad_area;
Hugh Dickins46dea3d2005-10-29 18:16:20 -0700219 /*
220 * Since the register backing store is accessed sequentially,
221 * we disallow growing it by more than a page at a time.
222 */
223 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
224 goto bad_area;
225 if (expand_upwards(vma, address))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 goto bad_area;
227 }
228 goto good_area;
229
230 bad_area:
231 up_read(&mm->mmap_sem);
232#ifdef CONFIG_VIRTUAL_MEM_MAP
233 bad_area_no_up:
234#endif
235 if ((isr & IA64_ISR_SP)
236 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
237 {
238 /*
239 * This fault was due to a speculative load or lfetch.fault, set the "ed"
240 * bit in the psr to ensure forward progress. (Target register will get a
241 * NaT for ld.s, lfetch will be canceled.)
242 */
243 ia64_psr(regs)->ed = 1;
244 return;
245 }
246 if (user_mode(regs)) {
247 si.si_signo = signal;
248 si.si_errno = 0;
249 si.si_code = code;
250 si.si_addr = (void __user *) address;
251 si.si_isr = isr;
252 si.si_flags = __ISR_VALID;
253 force_sig_info(signal, &si, current);
254 return;
255 }
256
257 no_context:
Tony Luckf0a8d3c2005-04-25 13:22:44 -0700258 if ((isr & IA64_ISR_SP)
259 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
260 {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /*
Tony Luckf0a8d3c2005-04-25 13:22:44 -0700262 * This fault was due to a speculative load or lfetch.fault, set the "ed"
263 * bit in the psr to ensure forward progress. (Target register will get a
264 * NaT for ld.s, lfetch will be canceled.)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 */
266 ia64_psr(regs)->ed = 1;
267 return;
268 }
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 /*
271 * Since we have no vma's for region 5, we might get here even if the address is
272 * valid, due to the VHPT walker inserting a non present translation that becomes
273 * stale. If that happens, the non present fault handler already purged the stale
274 * translation, which fixed the problem. So, we check to see if the translation is
275 * valid, and return if it is.
276 */
277 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
278 return;
279
Kiyoshi Ueda63028aa2005-08-24 18:03:43 -0400280 if (ia64_done_with_exception(regs))
281 return;
282
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 /*
284 * Oops. The kernel tried to access some bad page. We'll have to terminate things
285 * with extreme prejudice.
286 */
287 bust_spinlocks(1);
288
289 if (address < PAGE_SIZE)
290 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
291 else
292 printk(KERN_ALERT "Unable to handle kernel paging request at "
293 "virtual address %016lx\n", address);
Jan Beulich620de2f2008-02-04 23:43:03 -0800294 if (die("Oops", regs, isr))
295 regs = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 bust_spinlocks(0);
Jan Beulich620de2f2008-02-04 23:43:03 -0800297 if (regs)
298 do_exit(SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 return;
300
301 out_of_memory:
302 up_read(&mm->mmap_sem);
npiggin@suse.de0c3b96e2010-05-07 14:34:33 -0700303 if (!user_mode(regs))
304 goto no_context;
305 pagefault_out_of_memory();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}