Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * MMU fault handling support. |
| 3 | * |
| 4 | * Copyright (C) 1998-2002 Hewlett-Packard Co |
| 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 6 | */ |
| 7 | #include <linux/sched.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/smp_lock.h> |
| 11 | #include <linux/interrupt.h> |
Prasanna S Panchamukhi | 1f7ad57 | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 12 | #include <linux/kprobes.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | |
| 14 | #include <asm/pgtable.h> |
| 15 | #include <asm/processor.h> |
| 16 | #include <asm/system.h> |
| 17 | #include <asm/uaccess.h> |
Anil S Keshavamurthy | 7213b25 | 2005-06-23 00:09:27 -0700 | [diff] [blame] | 18 | #include <asm/kdebug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
| 20 | extern void die (char *, struct pt_regs *, long); |
| 21 | |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 22 | #ifdef CONFIG_KPROBES |
| 23 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
| 24 | |
| 25 | /* Hook to register for page fault notifications */ |
| 26 | int register_page_fault_notifier(struct notifier_block *nb) |
| 27 | { |
| 28 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
| 29 | } |
| 30 | |
| 31 | int unregister_page_fault_notifier(struct notifier_block *nb) |
| 32 | { |
| 33 | return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb); |
| 34 | } |
| 35 | |
| 36 | static inline int notify_page_fault(enum die_val val, const char *str, |
| 37 | struct pt_regs *regs, long err, int trap, int sig) |
| 38 | { |
| 39 | struct die_args args = { |
| 40 | .regs = regs, |
| 41 | .str = str, |
| 42 | .err = err, |
| 43 | .trapnr = trap, |
| 44 | .signr = sig |
| 45 | }; |
| 46 | return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args); |
| 47 | } |
| 48 | #else |
| 49 | static inline int notify_page_fault(enum die_val val, const char *str, |
| 50 | struct pt_regs *regs, long err, int trap, int sig) |
| 51 | { |
| 52 | return NOTIFY_DONE; |
| 53 | } |
| 54 | #endif |
| 55 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | * Return TRUE if ADDRESS points at a page in the kernel's mapped segment |
| 58 | * (inside region 5, on ia64) and that page is present. |
| 59 | */ |
| 60 | static int |
| 61 | mapped_kernel_page_is_present (unsigned long address) |
| 62 | { |
| 63 | pgd_t *pgd; |
| 64 | pud_t *pud; |
| 65 | pmd_t *pmd; |
| 66 | pte_t *ptep, pte; |
| 67 | |
| 68 | pgd = pgd_offset_k(address); |
| 69 | if (pgd_none(*pgd) || pgd_bad(*pgd)) |
| 70 | return 0; |
| 71 | |
| 72 | pud = pud_offset(pgd, address); |
| 73 | if (pud_none(*pud) || pud_bad(*pud)) |
| 74 | return 0; |
| 75 | |
| 76 | pmd = pmd_offset(pud, address); |
| 77 | if (pmd_none(*pmd) || pmd_bad(*pmd)) |
| 78 | return 0; |
| 79 | |
| 80 | ptep = pte_offset_kernel(pmd, address); |
| 81 | if (!ptep) |
| 82 | return 0; |
| 83 | |
| 84 | pte = *ptep; |
| 85 | return pte_present(pte); |
| 86 | } |
| 87 | |
Prasanna S Panchamukhi | 1f7ad57 | 2005-09-06 15:19:30 -0700 | [diff] [blame] | 88 | void __kprobes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) |
| 90 | { |
| 91 | int signal = SIGSEGV, code = SEGV_MAPERR; |
| 92 | struct vm_area_struct *vma, *prev_vma; |
| 93 | struct mm_struct *mm = current->mm; |
| 94 | struct siginfo si; |
| 95 | unsigned long mask; |
| 96 | |
Christoph Lameter | 0ffe984 | 2006-03-28 22:54:38 -0800 | [diff] [blame] | 97 | /* mmap_sem is performance critical.... */ |
| 98 | prefetchw(&mm->mmap_sem); |
| 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* |
| 101 | * If we're in an interrupt or have no user context, we must not take the fault.. |
| 102 | */ |
| 103 | if (in_atomic() || !mm) |
| 104 | goto no_context; |
| 105 | |
| 106 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 107 | /* |
| 108 | * If fault is in region 5 and we are in the kernel, we may already |
| 109 | * have the mmap_sem (pfn_valid macro is called during mmap). There |
| 110 | * is no vma for region 5 addr's anyway, so skip getting the semaphore |
| 111 | * and go directly to the exception handling code. |
| 112 | */ |
| 113 | |
| 114 | if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) |
| 115 | goto bad_area_no_up; |
| 116 | #endif |
| 117 | |
Anil S Keshavamurthy | 7213b25 | 2005-06-23 00:09:27 -0700 | [diff] [blame] | 118 | /* |
| 119 | * This is to handle the kprobes on user space access instructions |
| 120 | */ |
Anil S Keshavamurthy | ae9a5b8 | 2006-06-26 00:25:26 -0700 | [diff] [blame] | 121 | if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT, |
Anil S Keshavamurthy | 7213b25 | 2005-06-23 00:09:27 -0700 | [diff] [blame] | 122 | SIGSEGV) == NOTIFY_STOP) |
| 123 | return; |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | down_read(&mm->mmap_sem); |
| 126 | |
| 127 | vma = find_vma_prev(mm, address, &prev_vma); |
| 128 | if (!vma) |
| 129 | goto bad_area; |
| 130 | |
| 131 | /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */ |
| 132 | if (address < vma->vm_start) |
| 133 | goto check_expansion; |
| 134 | |
| 135 | good_area: |
| 136 | code = SEGV_ACCERR; |
| 137 | |
| 138 | /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ |
| 139 | |
| 140 | # define VM_READ_BIT 0 |
| 141 | # define VM_WRITE_BIT 1 |
| 142 | # define VM_EXEC_BIT 2 |
| 143 | |
| 144 | # if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \ |
| 145 | || (1 << VM_EXEC_BIT) != VM_EXEC) |
| 146 | # error File is out of sync with <linux/mm.h>. Please update. |
| 147 | # endif |
| 148 | |
Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 149 | if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) |
| 150 | goto bad_area; |
| 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) |
Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 153 | | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
| 155 | if ((vma->vm_flags & mask) != mask) |
| 156 | goto bad_area; |
| 157 | |
| 158 | survive: |
| 159 | /* |
| 160 | * If for any reason at all we couldn't handle the fault, make |
| 161 | * sure we exit gracefully rather than endlessly redo the |
| 162 | * fault. |
| 163 | */ |
| 164 | switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) { |
| 165 | case VM_FAULT_MINOR: |
| 166 | ++current->min_flt; |
| 167 | break; |
| 168 | case VM_FAULT_MAJOR: |
| 169 | ++current->maj_flt; |
| 170 | break; |
| 171 | case VM_FAULT_SIGBUS: |
| 172 | /* |
| 173 | * We ran out of memory, or some other thing happened |
| 174 | * to us that made us unable to handle the page fault |
| 175 | * gracefully. |
| 176 | */ |
| 177 | signal = SIGBUS; |
| 178 | goto bad_area; |
| 179 | case VM_FAULT_OOM: |
| 180 | goto out_of_memory; |
| 181 | default: |
| 182 | BUG(); |
| 183 | } |
| 184 | up_read(&mm->mmap_sem); |
| 185 | return; |
| 186 | |
| 187 | check_expansion: |
| 188 | if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { |
| 189 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 190 | goto bad_area; |
| 191 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) |
| 192 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) |
| 193 | goto bad_area; |
| 194 | if (expand_stack(vma, address)) |
| 195 | goto bad_area; |
| 196 | } else { |
| 197 | vma = prev_vma; |
| 198 | if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) |
| 199 | || REGION_OFFSET(address) >= RGN_MAP_LIMIT) |
| 200 | goto bad_area; |
Hugh Dickins | 46dea3d | 2005-10-29 18:16:20 -0700 | [diff] [blame] | 201 | /* |
| 202 | * Since the register backing store is accessed sequentially, |
| 203 | * we disallow growing it by more than a page at a time. |
| 204 | */ |
| 205 | if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) |
| 206 | goto bad_area; |
| 207 | if (expand_upwards(vma, address)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | goto bad_area; |
| 209 | } |
| 210 | goto good_area; |
| 211 | |
| 212 | bad_area: |
| 213 | up_read(&mm->mmap_sem); |
| 214 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
| 215 | bad_area_no_up: |
| 216 | #endif |
| 217 | if ((isr & IA64_ISR_SP) |
| 218 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) |
| 219 | { |
| 220 | /* |
| 221 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
| 222 | * bit in the psr to ensure forward progress. (Target register will get a |
| 223 | * NaT for ld.s, lfetch will be canceled.) |
| 224 | */ |
| 225 | ia64_psr(regs)->ed = 1; |
| 226 | return; |
| 227 | } |
| 228 | if (user_mode(regs)) { |
| 229 | si.si_signo = signal; |
| 230 | si.si_errno = 0; |
| 231 | si.si_code = code; |
| 232 | si.si_addr = (void __user *) address; |
| 233 | si.si_isr = isr; |
| 234 | si.si_flags = __ISR_VALID; |
| 235 | force_sig_info(signal, &si, current); |
| 236 | return; |
| 237 | } |
| 238 | |
| 239 | no_context: |
Tony Luck | f0a8d3c | 2005-04-25 13:22:44 -0700 | [diff] [blame] | 240 | if ((isr & IA64_ISR_SP) |
| 241 | || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) |
| 242 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | /* |
Tony Luck | f0a8d3c | 2005-04-25 13:22:44 -0700 | [diff] [blame] | 244 | * This fault was due to a speculative load or lfetch.fault, set the "ed" |
| 245 | * bit in the psr to ensure forward progress. (Target register will get a |
| 246 | * NaT for ld.s, lfetch will be canceled.) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | */ |
| 248 | ia64_psr(regs)->ed = 1; |
| 249 | return; |
| 250 | } |
| 251 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | /* |
| 253 | * Since we have no vma's for region 5, we might get here even if the address is |
| 254 | * valid, due to the VHPT walker inserting a non present translation that becomes |
| 255 | * stale. If that happens, the non present fault handler already purged the stale |
| 256 | * translation, which fixed the problem. So, we check to see if the translation is |
| 257 | * valid, and return if it is. |
| 258 | */ |
| 259 | if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address)) |
| 260 | return; |
| 261 | |
Kiyoshi Ueda | 63028aa | 2005-08-24 18:03:43 -0400 | [diff] [blame] | 262 | if (ia64_done_with_exception(regs)) |
| 263 | return; |
| 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* |
| 266 | * Oops. The kernel tried to access some bad page. We'll have to terminate things |
| 267 | * with extreme prejudice. |
| 268 | */ |
| 269 | bust_spinlocks(1); |
| 270 | |
| 271 | if (address < PAGE_SIZE) |
| 272 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address); |
| 273 | else |
| 274 | printk(KERN_ALERT "Unable to handle kernel paging request at " |
| 275 | "virtual address %016lx\n", address); |
| 276 | die("Oops", regs, isr); |
| 277 | bust_spinlocks(0); |
| 278 | do_exit(SIGKILL); |
| 279 | return; |
| 280 | |
| 281 | out_of_memory: |
| 282 | up_read(&mm->mmap_sem); |
Sukadev Bhattiprolu | f400e19 | 2006-09-29 02:00:07 -0700 | [diff] [blame^] | 283 | if (is_init(current)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | yield(); |
| 285 | down_read(&mm->mmap_sem); |
| 286 | goto survive; |
| 287 | } |
| 288 | printk(KERN_CRIT "VM: killing process %s\n", current->comm); |
| 289 | if (user_mode(regs)) |
| 290 | do_exit(SIGKILL); |
| 291 | goto no_context; |
| 292 | } |