Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 1 | /* |
| 2 | * OpenRISC fault.c |
| 3 | * |
| 4 | * Linux architectural port borrowing liberally from similar works of |
| 5 | * others. All original copyrights apply as per the original source |
| 6 | * declaration. |
| 7 | * |
| 8 | * Modifications for the OpenRISC architecture: |
| 9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> |
| 10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or |
| 13 | * modify it under the terms of the GNU General Public License |
| 14 | * as published by the Free Software Foundation; either version |
| 15 | * 2 of the License, or (at your option) any later version. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/sched.h> |
| 22 | |
| 23 | #include <asm/uaccess.h> |
| 24 | #include <asm/siginfo.h> |
| 25 | #include <asm/signal.h> |
| 26 | |
| 27 | #define NUM_TLB_ENTRIES 64 |
| 28 | #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1)) |
| 29 | |
| 30 | unsigned long pte_misses; /* updated by do_page_fault() */ |
| 31 | unsigned long pte_errors; /* updated by do_page_fault() */ |
| 32 | |
| 33 | /* __PHX__ :: - check the vmalloc_fault in do_page_fault() |
| 34 | * - also look into include/asm-or32/mmu_context.h |
| 35 | */ |
| 36 | volatile pgd_t *current_pgd; |
| 37 | |
| 38 | extern void die(char *, struct pt_regs *, long); |
| 39 | |
| 40 | /* |
| 41 | * This routine handles page faults. It determines the address, |
| 42 | * and the problem, and then passes it off to one of the appropriate |
| 43 | * routines. |
| 44 | * |
| 45 | * If this routine detects a bad access, it returns 1, otherwise it |
| 46 | * returns 0. |
| 47 | */ |
| 48 | |
| 49 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, |
| 50 | unsigned long vector, int write_acc) |
| 51 | { |
| 52 | struct task_struct *tsk; |
| 53 | struct mm_struct *mm; |
| 54 | struct vm_area_struct *vma; |
| 55 | siginfo_t info; |
| 56 | int fault; |
Kautuk Consul | 4971f2b | 2012-03-31 08:00:51 -0400 | [diff] [blame^] | 57 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 58 | |
| 59 | tsk = current; |
| 60 | |
| 61 | /* |
| 62 | * We fault-in kernel-space virtual memory on-demand. The |
| 63 | * 'reference' page table is init_mm.pgd. |
| 64 | * |
| 65 | * NOTE! We MUST NOT take any locks for this case. We may |
| 66 | * be in an interrupt or a critical region, and should |
| 67 | * only copy the information from the master page table, |
| 68 | * nothing more. |
| 69 | * |
| 70 | * NOTE2: This is done so that, when updating the vmalloc |
| 71 | * mappings we don't have to walk all processes pgdirs and |
| 72 | * add the high mappings all at once. Instead we do it as they |
| 73 | * are used. However vmalloc'ed page entries have the PAGE_GLOBAL |
| 74 | * bit set so sometimes the TLB can use a lingering entry. |
| 75 | * |
| 76 | * This verifies that the fault happens in kernel space |
| 77 | * and that the fault was not a protection error. |
| 78 | */ |
| 79 | |
| 80 | if (address >= VMALLOC_START && |
| 81 | (vector != 0x300 && vector != 0x400) && |
| 82 | !user_mode(regs)) |
| 83 | goto vmalloc_fault; |
| 84 | |
| 85 | /* If exceptions were enabled, we can reenable them here */ |
| 86 | if (user_mode(regs)) { |
| 87 | /* Exception was in userspace: reenable interrupts */ |
| 88 | local_irq_enable(); |
| 89 | } else { |
| 90 | /* If exception was in a syscall, then IRQ's may have |
| 91 | * been enabled or disabled. If they were enabled, |
| 92 | * reenable them. |
| 93 | */ |
| 94 | if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE)) |
| 95 | local_irq_enable(); |
| 96 | } |
| 97 | |
| 98 | mm = tsk->mm; |
| 99 | info.si_code = SEGV_MAPERR; |
| 100 | |
| 101 | /* |
| 102 | * If we're in an interrupt or have no user |
| 103 | * context, we must not take the fault.. |
| 104 | */ |
| 105 | |
| 106 | if (in_interrupt() || !mm) |
| 107 | goto no_context; |
| 108 | |
Kautuk Consul | 4971f2b | 2012-03-31 08:00:51 -0400 | [diff] [blame^] | 109 | retry: |
Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 110 | down_read(&mm->mmap_sem); |
| 111 | vma = find_vma(mm, address); |
| 112 | |
| 113 | if (!vma) |
| 114 | goto bad_area; |
| 115 | |
| 116 | if (vma->vm_start <= address) |
| 117 | goto good_area; |
| 118 | |
| 119 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 120 | goto bad_area; |
| 121 | |
| 122 | if (user_mode(regs)) { |
| 123 | /* |
| 124 | * accessing the stack below usp is always a bug. |
| 125 | * we get page-aligned addresses so we can only check |
| 126 | * if we're within a page from usp, but that might be |
| 127 | * enough to catch brutal errors at least. |
| 128 | */ |
| 129 | if (address + PAGE_SIZE < regs->sp) |
| 130 | goto bad_area; |
| 131 | } |
| 132 | if (expand_stack(vma, address)) |
| 133 | goto bad_area; |
| 134 | |
| 135 | /* |
| 136 | * Ok, we have a good vm_area for this memory access, so |
| 137 | * we can handle it.. |
| 138 | */ |
| 139 | |
| 140 | good_area: |
| 141 | info.si_code = SEGV_ACCERR; |
| 142 | |
| 143 | /* first do some preliminary protection checks */ |
| 144 | |
| 145 | if (write_acc) { |
| 146 | if (!(vma->vm_flags & VM_WRITE)) |
| 147 | goto bad_area; |
Kautuk Consul | 4971f2b | 2012-03-31 08:00:51 -0400 | [diff] [blame^] | 148 | flags |= FAULT_FLAG_WRITE; |
Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 149 | } else { |
| 150 | /* not present */ |
| 151 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 152 | goto bad_area; |
| 153 | } |
| 154 | |
| 155 | /* are we trying to execute nonexecutable area */ |
| 156 | if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC)) |
| 157 | goto bad_area; |
| 158 | |
| 159 | /* |
| 160 | * If for any reason at all we couldn't handle the fault, |
| 161 | * make sure we exit gracefully rather than endlessly redo |
| 162 | * the fault. |
| 163 | */ |
| 164 | |
Kautuk Consul | 4971f2b | 2012-03-31 08:00:51 -0400 | [diff] [blame^] | 165 | fault = handle_mm_fault(mm, vma, address, flags); |
| 166 | |
| 167 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
| 168 | return; |
| 169 | |
Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 170 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 171 | if (fault & VM_FAULT_OOM) |
| 172 | goto out_of_memory; |
| 173 | else if (fault & VM_FAULT_SIGBUS) |
| 174 | goto do_sigbus; |
| 175 | BUG(); |
| 176 | } |
Kautuk Consul | 4971f2b | 2012-03-31 08:00:51 -0400 | [diff] [blame^] | 177 | |
| 178 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 179 | /*RGD modeled on Cris */ |
| 180 | if (fault & VM_FAULT_MAJOR) |
| 181 | tsk->maj_flt++; |
| 182 | else |
| 183 | tsk->min_flt++; |
| 184 | if (fault & VM_FAULT_RETRY) { |
| 185 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
| 186 | |
| 187 | /* No need to up_read(&mm->mmap_sem) as we would |
| 188 | * have already released it in __lock_page_or_retry |
| 189 | * in mm/filemap.c. |
| 190 | */ |
| 191 | |
| 192 | goto retry; |
| 193 | } |
| 194 | } |
Jonas Bonn | 61e85e3 | 2011-06-04 11:06:11 +0300 | [diff] [blame] | 195 | |
| 196 | up_read(&mm->mmap_sem); |
| 197 | return; |
| 198 | |
| 199 | /* |
| 200 | * Something tried to access memory that isn't in our memory map.. |
| 201 | * Fix it, but check if it's kernel or user first.. |
| 202 | */ |
| 203 | |
| 204 | bad_area: |
| 205 | up_read(&mm->mmap_sem); |
| 206 | |
| 207 | bad_area_nosemaphore: |
| 208 | |
| 209 | /* User mode accesses just cause a SIGSEGV */ |
| 210 | |
| 211 | if (user_mode(regs)) { |
| 212 | info.si_signo = SIGSEGV; |
| 213 | info.si_errno = 0; |
| 214 | /* info.si_code has been set above */ |
| 215 | info.si_addr = (void *)address; |
| 216 | force_sig_info(SIGSEGV, &info, tsk); |
| 217 | return; |
| 218 | } |
| 219 | |
| 220 | no_context: |
| 221 | |
| 222 | /* Are we prepared to handle this kernel fault? |
| 223 | * |
| 224 | * (The kernel has valid exception-points in the source |
| 225 | * when it acesses user-memory. When it fails in one |
| 226 | * of those points, we find it in a table and do a jump |
| 227 | * to some fixup code that loads an appropriate error |
| 228 | * code) |
| 229 | */ |
| 230 | |
| 231 | { |
| 232 | const struct exception_table_entry *entry; |
| 233 | |
| 234 | __asm__ __volatile__("l.nop 42"); |
| 235 | |
| 236 | if ((entry = search_exception_tables(regs->pc)) != NULL) { |
| 237 | /* Adjust the instruction pointer in the stackframe */ |
| 238 | regs->pc = entry->fixup; |
| 239 | return; |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * Oops. The kernel tried to access some bad page. We'll have to |
| 245 | * terminate things with extreme prejudice. |
| 246 | */ |
| 247 | |
| 248 | if ((unsigned long)(address) < PAGE_SIZE) |
| 249 | printk(KERN_ALERT |
| 250 | "Unable to handle kernel NULL pointer dereference"); |
| 251 | else |
| 252 | printk(KERN_ALERT "Unable to handle kernel access"); |
| 253 | printk(" at virtual address 0x%08lx\n", address); |
| 254 | |
| 255 | die("Oops", regs, write_acc); |
| 256 | |
| 257 | do_exit(SIGKILL); |
| 258 | |
| 259 | /* |
| 260 | * We ran out of memory, or some other thing happened to us that made |
| 261 | * us unable to handle the page fault gracefully. |
| 262 | */ |
| 263 | |
| 264 | out_of_memory: |
| 265 | __asm__ __volatile__("l.nop 42"); |
| 266 | __asm__ __volatile__("l.nop 1"); |
| 267 | |
| 268 | up_read(&mm->mmap_sem); |
| 269 | printk("VM: killing process %s\n", tsk->comm); |
| 270 | if (user_mode(regs)) |
| 271 | do_exit(SIGKILL); |
| 272 | goto no_context; |
| 273 | |
| 274 | do_sigbus: |
| 275 | up_read(&mm->mmap_sem); |
| 276 | |
| 277 | /* |
| 278 | * Send a sigbus, regardless of whether we were in kernel |
| 279 | * or user mode. |
| 280 | */ |
| 281 | info.si_signo = SIGBUS; |
| 282 | info.si_errno = 0; |
| 283 | info.si_code = BUS_ADRERR; |
| 284 | info.si_addr = (void *)address; |
| 285 | force_sig_info(SIGBUS, &info, tsk); |
| 286 | |
| 287 | /* Kernel mode? Handle exceptions or die */ |
| 288 | if (!user_mode(regs)) |
| 289 | goto no_context; |
| 290 | return; |
| 291 | |
| 292 | vmalloc_fault: |
| 293 | { |
| 294 | /* |
| 295 | * Synchronize this task's top level page-table |
| 296 | * with the 'reference' page table. |
| 297 | * |
| 298 | * Use current_pgd instead of tsk->active_mm->pgd |
| 299 | * since the latter might be unavailable if this |
| 300 | * code is executed in a misfortunately run irq |
| 301 | * (like inside schedule() between switch_mm and |
| 302 | * switch_to...). |
| 303 | */ |
| 304 | |
| 305 | int offset = pgd_index(address); |
| 306 | pgd_t *pgd, *pgd_k; |
| 307 | pud_t *pud, *pud_k; |
| 308 | pmd_t *pmd, *pmd_k; |
| 309 | pte_t *pte_k; |
| 310 | |
| 311 | /* |
| 312 | phx_warn("do_page_fault(): vmalloc_fault will not work, " |
| 313 | "since current_pgd assign a proper value somewhere\n" |
| 314 | "anyhow we don't need this at the moment\n"); |
| 315 | |
| 316 | phx_mmu("vmalloc_fault"); |
| 317 | */ |
| 318 | pgd = (pgd_t *)current_pgd + offset; |
| 319 | pgd_k = init_mm.pgd + offset; |
| 320 | |
| 321 | /* Since we're two-level, we don't need to do both |
| 322 | * set_pgd and set_pmd (they do the same thing). If |
| 323 | * we go three-level at some point, do the right thing |
| 324 | * with pgd_present and set_pgd here. |
| 325 | * |
| 326 | * Also, since the vmalloc area is global, we don't |
| 327 | * need to copy individual PTE's, it is enough to |
| 328 | * copy the pgd pointer into the pte page of the |
| 329 | * root task. If that is there, we'll find our pte if |
| 330 | * it exists. |
| 331 | */ |
| 332 | |
| 333 | pud = pud_offset(pgd, address); |
| 334 | pud_k = pud_offset(pgd_k, address); |
| 335 | if (!pud_present(*pud_k)) |
| 336 | goto no_context; |
| 337 | |
| 338 | pmd = pmd_offset(pud, address); |
| 339 | pmd_k = pmd_offset(pud_k, address); |
| 340 | |
| 341 | if (!pmd_present(*pmd_k)) |
| 342 | goto bad_area_nosemaphore; |
| 343 | |
| 344 | set_pmd(pmd, *pmd_k); |
| 345 | |
| 346 | /* Make sure the actual PTE exists as well to |
| 347 | * catch kernel vmalloc-area accesses to non-mapped |
| 348 | * addresses. If we don't do this, this will just |
| 349 | * silently loop forever. |
| 350 | */ |
| 351 | |
| 352 | pte_k = pte_offset_kernel(pmd_k, address); |
| 353 | if (!pte_present(*pte_k)) |
| 354 | goto no_context; |
| 355 | |
| 356 | return; |
| 357 | } |
| 358 | } |