Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * MMU fault handling support. |
| 3 | * |
| 4 | * Copyright (C) 1998-2002 Hewlett-Packard Co |
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 6 | */ |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Prasanna S Panchamukhi | 1f7ad57 | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 11 | #include <linux/kprobes.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 12 | #include <linux/kdebug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | #include <asm/pgtable.h> |
| 15 | #include <asm/processor.h> |
| 16 | #include <asm/system.h> |
| 17 | #include <asm/uaccess.h> |
| 18 | |
Jan Beulich | 620de2f | 2008-02-04 23:43:03 -0800 | [diff] [blame] | 19 | extern int die(char *, struct pt_regs *, long); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 21 | #ifdef CONFIG_KPROBES |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 22 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 23 | { |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 24 | int ret = 0; |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 25 | |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 26 | if (!user_mode(regs)) { |
| 27 | /* kprobe_running() needs smp_processor_id() */ |
| 28 | preempt_disable(); |
Harvey Harrison | 45e18c2 | 2008-03-06 09:49:01 -0800 | [diff] [blame] | 29 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 30 | ret = 1; |
| 31 | preempt_enable(); |
| 32 | } |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 33 | |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 34 | return ret; |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 35 | } |
| 36 | #else |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 37 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 38 | { |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 39 | return 0; |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 40 | } |
| 41 | #endif |
| 42 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment |
| 45 | * (inside region 5, on ia64) and that page is present. |
| 46 | */ |
| 47 | static int |
| 48 | mapped_kernel_page_is_present (unsigned long address) |
| 49 | { |
| 50 | pgd_t *pgd; |
| 51 | pud_t *pud; |
| 52 | pmd_t *pmd; |
| 53 | pte_t *ptep, pte; |
| 54 | |
| 55 | pgd = pgd_offset_k(address); |
| 56 | if (pgd_none(*pgd) || pgd_bad(*pgd)) |
| 57 | return 0; |
| 58 | |
| 59 | pud = pud_offset(pgd, address); |
| 60 | if (pud_none(*pud) || pud_bad(*pud)) |
| 61 | return 0; |
| 62 | |
| 63 | pmd = pmd_offset(pud, address); |
| 64 | if (pmd_none(*pmd) || pmd_bad(*pmd)) |
| 65 | return 0; |
| 66 | |
| 67 | ptep = pte_offset_kernel(pmd, address); |
| 68 | if (!ptep) |
| 69 | return 0; |
| 70 | |
| 71 | pte = *ptep; |
| 72 | return pte_present(pte); |
| 73 | } |
| 74 | |
Prasanna S Panchamukhi | 1f7ad57 | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 75 | void __kprobes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
| 77 | { |
| 78 | int signal = SIGSEGV, code = SEGV_MAPERR; |
| 79 | struct vm_area_struct *vma, *prev_vma; |
| 80 | struct mm_struct *mm = current->mm; |
| 81 | struct siginfo si; |
| 82 | unsigned long mask; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 83 | int fault; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
Christoph Lameter | 0ffe984 | 2006-03-28 22:54:38 -0800 | [diff] [blame] | 85 | /* mmap_sem is performance critical.... */ |
| 86 | prefetchw(&mm->mmap_sem); |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /* |
| 89 | * If we're in an interrupt or have no user context, we must not take the fault.. |
| 90 | */ |
| 91 | if (in_atomic() || !mm) |
| 92 | goto no_context; |
| 93 | |
| 94 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 95 | /* |
| 96 | * If fault is in region 5 and we are in the kernel, we may already |
| 97 | * have the mmap_sem (pfn_valid macro is called during mmap). There |
| 98 | * is no vma for region 5 addr's anyway, so skip getting the semaphore |
| 99 | * and go directly to the exception handling code. |
| 100 | */ |
| 101 | |
| 102 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) |
| 103 | goto bad_area_no_up; |
| 104 | #endif |
| 105 | |
Anil S Keshavamurthy | 7213b25 | 2005-06-23 00:09:27 -0700 | [diff] [blame] | 106 | /* |
| 107 | * This is to handle the kprobes on user space access instructions |
| 108 | */ |
Christoph Hellwig | 576fe0b | 2007-05-16 14:52:19 +0200 | [diff] [blame] | 109 | if (notify_page_fault(regs, TRAP_BRKPT)) |
Anil S Keshavamurthy | 7213b25 | 2005-06-23 00:09:27 -0700 | [diff] [blame] | 110 | return; |
| 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | down_read(&mm->mmap_sem); |
| 113 | |
| 114 | vma = find_vma_prev(mm, address, &prev_vma); |
Andrew Burgess | e8c59c0 | 2007-08-16 10:30:46 -0700 | [diff] [blame] | 115 | if (!vma && !prev_vma ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | goto bad_area; |
| 117 | |
Andrew Burgess | e8c59c0 | 2007-08-16 10:30:46 -0700 | [diff] [blame] | 118 | /* |
| 119 | * find_vma_prev() returns vma such that address < vma->vm_end or NULL |
| 120 | * |
| 121 | * May find no vma, but could be that the last vm area is the |
| 122 | * register backing store that needs to expand upwards, in |
| 123 | * this case vma will be null, but prev_vma will ne non-null |
| 124 | */ |
| 125 | if (( !vma && prev_vma ) || (address < vma->vm_start) ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | goto check_expansion; |
| 127 | |
| 128 | good_area: |
| 129 | code = SEGV_ACCERR; |
| 130 | |
| 131 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ |
| 132 | |
| 133 | # define VM_READ_BIT 0 |
| 134 | # define VM_WRITE_BIT 1 |
| 135 | # define VM_EXEC_BIT 2 |
| 136 | |
| 137 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ |
| 138 | || (1 << VM_EXEC_BIT) != VM_EXEC) |
| 139 | # error File is out of sync with <linux/mm.h>. Please update. |
| 140 | # endif |
| 141 | |
Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 142 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) |
| 143 | goto bad_area; |
| 144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) |
Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 146 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
| 148 | if ((vma->vm_flags & mask) != mask) |
| 149 | goto bad_area; |
| 150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | /* |
| 152 | * If for any reason at all we couldn't handle the fault, make |
| 153 | * sure we exit gracefully rather than endlessly redo the |
| 154 | * fault. |
| 155 | */ |
Linus Torvalds | d06063c | 2009-04-10 09:01:23 -0700 | [diff] [blame] | 156 | fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 157 | if (unlikely(fault & VM_FAULT_ERROR)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | /* |
| 159 | * We ran out of memory, or some other thing happened |
| 160 | * to us that made us unable to handle the page fault |
| 161 | * gracefully. |
| 162 | */ |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 163 | if (fault & VM_FAULT_OOM) { |
| 164 | goto out_of_memory; |
| 165 | } else if (fault & VM_FAULT_SIGBUS) { |
| 166 | signal = SIGBUS; |
| 167 | goto bad_area; |
| 168 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | BUG(); |
| 170 | } |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 171 | if (fault & VM_FAULT_MAJOR) |
| 172 | current->maj_flt++; |
| 173 | else |
| 174 | current->min_flt++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | up_read(&mm->mmap_sem); |
| 176 | return; |
| 177 | |
| 178 | check_expansion: |
| 179 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { |
Andrew Burgess | e8c59c0 | 2007-08-16 10:30:46 -0700 | [diff] [blame] | 180 | if (!vma) |
| 181 | goto bad_area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 183 | goto bad_area; |
| 184 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) |
| 185 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) |
| 186 | goto bad_area; |
| 187 | if (expand_stack(vma, address)) |
| 188 | goto bad_area; |
| 189 | } else { |
| 190 | vma = prev_vma; |
| 191 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) |
| 192 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) |
| 193 | goto bad_area; |
Hugh Dickins | 46dea3d | 2005-10-29 18:16:20 -0700 | [diff] [blame] | 194 | /* |
| 195 | * Since the register backing store is accessed sequentially, |
| 196 | * we disallow growing it by more than a page at a time. |
| 197 | */ |
| 198 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) |
| 199 | goto bad_area; |
| 200 | if (expand_upwards(vma, address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | goto bad_area; |
| 202 | } |
| 203 | goto good_area; |
| 204 | |
| 205 | bad_area: |
| 206 | up_read(&mm->mmap_sem); |
| 207 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 208 | bad_area_no_up: |
| 209 | #endif |
| 210 | if ((isr & IA64_ISR_SP) |
| 211 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) |
| 212 | { |
| 213 | /* |
| 214 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
| 215 | * bit in the psr to ensure forward progress. (Target register will get a |
| 216 | * NaT for ld.s, lfetch will be canceled.) |
| 217 | */ |
| 218 | ia64_psr(regs)->ed = 1; |
| 219 | return; |
| 220 | } |
| 221 | if (user_mode(regs)) { |
| 222 | si.si_signo = signal; |
| 223 | si.si_errno = 0; |
| 224 | si.si_code = code; |
| 225 | si.si_addr = (void __user *) address; |
| 226 | si.si_isr = isr; |
| 227 | si.si_flags = __ISR_VALID; |
| 228 | force_sig_info(signal, &si, current); |
| 229 | return; |
| 230 | } |
| 231 | |
| 232 | no_context: |
Tony Luck | f0a8d3c | 2005-04-25 13:22:44 -0700 | [diff] [blame] | 233 | if ((isr & IA64_ISR_SP) |
| 234 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) |
| 235 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | /* |
Tony Luck | f0a8d3c | 2005-04-25 13:22:44 -0700 | [diff] [blame] | 237 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
| 238 | * bit in the psr to ensure forward progress. (Target register will get a |
| 239 | * NaT for ld.s, lfetch will be canceled.) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | */ |
| 241 | ia64_psr(regs)->ed = 1; |
| 242 | return; |
| 243 | } |
| 244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | /* |
| 246 | * Since we have no vma's for region 5, we might get here even if the address is |
| 247 | * valid, due to the VHPT walker inserting a non present translation that becomes |
| 248 | * stale. If that happens, the non present fault handler already purged the stale |
| 249 | * translation, which fixed the problem. So, we check to see if the translation is |
| 250 | * valid, and return if it is. |
| 251 | */ |
| 252 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) |
| 253 | return; |
| 254 | |
Kiyoshi Ueda | 63028aa | 2005-08-24 18:03:43 -0400 | [diff] [blame] | 255 | if (ia64_done_with_exception(regs)) |
| 256 | return; |
| 257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | /* |
| 259 | * Oops. The kernel tried to access some bad page. We'll have to terminate things |
| 260 | * with extreme prejudice. |
| 261 | */ |
| 262 | bust_spinlocks(1); |
| 263 | |
| 264 | if (address < PAGE_SIZE) |
| 265 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); |
| 266 | else |
| 267 | printk(KERN_ALERT "Unable to handle kernel paging request at " |
| 268 | "virtual address %016lx\n", address); |
Jan Beulich | 620de2f | 2008-02-04 23:43:03 -0800 | [diff] [blame] | 269 | if (die("Oops", regs, isr)) |
| 270 | regs = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | bust_spinlocks(0); |
Jan Beulich | 620de2f | 2008-02-04 23:43:03 -0800 | [diff] [blame] | 272 | if (regs) |
| 273 | do_exit(SIGKILL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | return; |
| 275 | |
| 276 | out_of_memory: |
| 277 | up_read(&mm->mmap_sem); |
npiggin@suse.de | 0c3b96e | 2010-05-07 14:34:33 -0700 | [diff] [blame] | 278 | if (!user_mode(regs)) |
| 279 | goto no_context; |
| 280 | pagefault_out_of_memory(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | } |