Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* $Id: fault.c,v 1.14 2004/01/13 05:52:11 kkojima Exp $ |
| 2 | * |
| 3 | * linux/arch/sh/mm/fault.c |
| 4 | * Copyright (C) 1999 Niibe Yutaka |
| 5 | * Copyright (C) 2003 Paul Mundt |
| 6 | * |
| 7 | * Based on linux/arch/i386/mm/fault.c: |
| 8 | * Copyright (C) 1995 Linus Torvalds |
| 9 | */ |
| 10 | |
| 11 | #include <linux/signal.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/errno.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/ptrace.h> |
| 18 | #include <linux/mman.h> |
| 19 | #include <linux/mm.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/smp_lock.h> |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/module.h> |
| 24 | |
| 25 | #include <asm/system.h> |
| 26 | #include <asm/io.h> |
| 27 | #include <asm/uaccess.h> |
| 28 | #include <asm/pgalloc.h> |
| 29 | #include <asm/mmu_context.h> |
| 30 | #include <asm/cacheflush.h> |
| 31 | #include <asm/kgdb.h> |
| 32 | |
| 33 | extern void die(const char *,struct pt_regs *,long); |
| 34 | |
| 35 | /* |
| 36 | * This routine handles page faults. It determines the address, |
| 37 | * and the problem, and then passes it off to one of the appropriate |
| 38 | * routines. |
| 39 | */ |
| 40 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, |
| 41 | unsigned long address) |
| 42 | { |
| 43 | struct task_struct *tsk; |
| 44 | struct mm_struct *mm; |
| 45 | struct vm_area_struct * vma; |
| 46 | unsigned long page; |
| 47 | |
| 48 | #ifdef CONFIG_SH_KGDB |
| 49 | if (kgdb_nofault && kgdb_bus_err_hook) |
| 50 | kgdb_bus_err_hook(); |
| 51 | #endif |
| 52 | |
| 53 | tsk = current; |
| 54 | mm = tsk->mm; |
| 55 | |
| 56 | /* |
| 57 | * If we're in an interrupt or have no user |
| 58 | * context, we must not take the fault.. |
| 59 | */ |
| 60 | if (in_atomic() || !mm) |
| 61 | goto no_context; |
| 62 | |
| 63 | down_read(&mm->mmap_sem); |
| 64 | |
| 65 | vma = find_vma(mm, address); |
| 66 | if (!vma) |
| 67 | goto bad_area; |
| 68 | if (vma->vm_start <= address) |
| 69 | goto good_area; |
| 70 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 71 | goto bad_area; |
| 72 | if (expand_stack(vma, address)) |
| 73 | goto bad_area; |
| 74 | /* |
| 75 | * Ok, we have a good vm_area for this memory access, so |
| 76 | * we can handle it.. |
| 77 | */ |
| 78 | good_area: |
| 79 | if (writeaccess) { |
| 80 | if (!(vma->vm_flags & VM_WRITE)) |
| 81 | goto bad_area; |
| 82 | } else { |
| 83 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 84 | goto bad_area; |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * If for any reason at all we couldn't handle the fault, |
| 89 | * make sure we exit gracefully rather than endlessly redo |
| 90 | * the fault. |
| 91 | */ |
| 92 | survive: |
| 93 | switch (handle_mm_fault(mm, vma, address, writeaccess)) { |
| 94 | case VM_FAULT_MINOR: |
| 95 | tsk->min_flt++; |
| 96 | break; |
| 97 | case VM_FAULT_MAJOR: |
| 98 | tsk->maj_flt++; |
| 99 | break; |
| 100 | case VM_FAULT_SIGBUS: |
| 101 | goto do_sigbus; |
| 102 | case VM_FAULT_OOM: |
| 103 | goto out_of_memory; |
| 104 | default: |
| 105 | BUG(); |
| 106 | } |
| 107 | |
| 108 | up_read(&mm->mmap_sem); |
| 109 | return; |
| 110 | |
| 111 | /* |
| 112 | * Something tried to access memory that isn't in our memory map.. |
| 113 | * Fix it, but check if it's kernel or user first.. |
| 114 | */ |
| 115 | bad_area: |
| 116 | up_read(&mm->mmap_sem); |
| 117 | |
| 118 | if (user_mode(regs)) { |
| 119 | tsk->thread.address = address; |
| 120 | tsk->thread.error_code = writeaccess; |
| 121 | force_sig(SIGSEGV, tsk); |
| 122 | return; |
| 123 | } |
| 124 | |
| 125 | no_context: |
| 126 | /* Are we prepared to handle this kernel fault? */ |
| 127 | if (fixup_exception(regs)) |
| 128 | return; |
| 129 | |
| 130 | /* |
| 131 | * Oops. The kernel tried to access some bad page. We'll have to |
| 132 | * terminate things with extreme prejudice. |
| 133 | * |
| 134 | */ |
| 135 | if (address < PAGE_SIZE) |
| 136 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); |
| 137 | else |
| 138 | printk(KERN_ALERT "Unable to handle kernel paging request"); |
| 139 | printk(" at virtual address %08lx\n", address); |
| 140 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); |
| 141 | asm volatile("mov.l %1, %0" |
| 142 | : "=r" (page) |
| 143 | : "m" (__m(MMU_TTB))); |
| 144 | if (page) { |
| 145 | page = ((unsigned long *) page)[address >> 22]; |
| 146 | printk(KERN_ALERT "*pde = %08lx\n", page); |
| 147 | if (page & _PAGE_PRESENT) { |
| 148 | page &= PAGE_MASK; |
| 149 | address &= 0x003ff000; |
| 150 | page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT]; |
| 151 | printk(KERN_ALERT "*pte = %08lx\n", page); |
| 152 | } |
| 153 | } |
| 154 | die("Oops", regs, writeaccess); |
| 155 | do_exit(SIGKILL); |
| 156 | |
| 157 | /* |
| 158 | * We ran out of memory, or some other thing happened to us that made |
| 159 | * us unable to handle the page fault gracefully. |
| 160 | */ |
| 161 | out_of_memory: |
| 162 | up_read(&mm->mmap_sem); |
| 163 | if (current->pid == 1) { |
| 164 | yield(); |
| 165 | down_read(&mm->mmap_sem); |
| 166 | goto survive; |
| 167 | } |
| 168 | printk("VM: killing process %s\n", tsk->comm); |
| 169 | if (user_mode(regs)) |
| 170 | do_exit(SIGKILL); |
| 171 | goto no_context; |
| 172 | |
| 173 | do_sigbus: |
| 174 | up_read(&mm->mmap_sem); |
| 175 | |
| 176 | /* |
| 177 | * Send a sigbus, regardless of whether we were in kernel |
| 178 | * or user mode. |
| 179 | */ |
| 180 | tsk->thread.address = address; |
| 181 | tsk->thread.error_code = writeaccess; |
| 182 | tsk->thread.trap_no = 14; |
| 183 | force_sig(SIGBUS, tsk); |
| 184 | |
| 185 | /* Kernel mode? Handle exceptions or die */ |
| 186 | if (!user_mode(regs)) |
| 187 | goto no_context; |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * Called with interrupt disabled. |
| 192 | */ |
| 193 | asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, |
| 194 | unsigned long address) |
| 195 | { |
| 196 | unsigned long addrmax = P4SEG; |
| 197 | pgd_t *dir; |
| 198 | pmd_t *pmd; |
| 199 | pte_t *pte; |
| 200 | pte_t entry; |
| 201 | |
| 202 | #ifdef CONFIG_SH_KGDB |
| 203 | if (kgdb_nofault && kgdb_bus_err_hook) |
| 204 | kgdb_bus_err_hook(); |
| 205 | #endif |
| 206 | |
| 207 | #ifdef CONFIG_SH_STORE_QUEUES |
| 208 | addrmax = P4SEG_STORE_QUE + 0x04000000; |
| 209 | #endif |
| 210 | |
| 211 | if (address >= P3SEG && address < addrmax) |
| 212 | dir = pgd_offset_k(address); |
| 213 | else if (address >= TASK_SIZE) |
| 214 | return 1; |
| 215 | else if (!current->mm) |
| 216 | return 1; |
| 217 | else |
| 218 | dir = pgd_offset(current->mm, address); |
| 219 | |
| 220 | pmd = pmd_offset(dir, address); |
| 221 | if (pmd_none(*pmd)) |
| 222 | return 1; |
| 223 | if (pmd_bad(*pmd)) { |
| 224 | pmd_ERROR(*pmd); |
| 225 | pmd_clear(pmd); |
| 226 | return 1; |
| 227 | } |
| 228 | pte = pte_offset_kernel(pmd, address); |
| 229 | entry = *pte; |
| 230 | if (pte_none(entry) || pte_not_present(entry) |
| 231 | || (writeaccess && !pte_write(entry))) |
| 232 | return 1; |
| 233 | |
| 234 | if (writeaccess) |
| 235 | entry = pte_mkdirty(entry); |
| 236 | entry = pte_mkyoung(entry); |
| 237 | |
| 238 | #ifdef CONFIG_CPU_SH4 |
| 239 | /* |
| 240 | * ITLB is not affected by "ldtlb" instruction. |
| 241 | * So, we need to flush the entry by ourselves. |
| 242 | */ |
| 243 | |
| 244 | { |
| 245 | unsigned long flags; |
| 246 | local_irq_save(flags); |
| 247 | __flush_tlb_page(get_asid(), address&PAGE_MASK); |
| 248 | local_irq_restore(flags); |
| 249 | } |
| 250 | #endif |
| 251 | |
| 252 | set_pte(pte, entry); |
| 253 | update_mmu_cache(NULL, address, entry); |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 259 | { |
| 260 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { |
| 261 | unsigned long flags; |
| 262 | unsigned long asid; |
| 263 | unsigned long saved_asid = MMU_NO_ASID; |
| 264 | |
| 265 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; |
| 266 | page &= PAGE_MASK; |
| 267 | |
| 268 | local_irq_save(flags); |
| 269 | if (vma->vm_mm != current->mm) { |
| 270 | saved_asid = get_asid(); |
| 271 | set_asid(asid); |
| 272 | } |
| 273 | __flush_tlb_page(asid, page); |
| 274 | if (saved_asid != MMU_NO_ASID) |
| 275 | set_asid(saved_asid); |
| 276 | local_irq_restore(flags); |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 281 | unsigned long end) |
| 282 | { |
| 283 | struct mm_struct *mm = vma->vm_mm; |
| 284 | |
| 285 | if (mm->context != NO_CONTEXT) { |
| 286 | unsigned long flags; |
| 287 | int size; |
| 288 | |
| 289 | local_irq_save(flags); |
| 290 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 291 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
| 292 | mm->context = NO_CONTEXT; |
| 293 | if (mm == current->mm) |
| 294 | activate_context(mm); |
| 295 | } else { |
| 296 | unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; |
| 297 | unsigned long saved_asid = MMU_NO_ASID; |
| 298 | |
| 299 | start &= PAGE_MASK; |
| 300 | end += (PAGE_SIZE - 1); |
| 301 | end &= PAGE_MASK; |
| 302 | if (mm != current->mm) { |
| 303 | saved_asid = get_asid(); |
| 304 | set_asid(asid); |
| 305 | } |
| 306 | while (start < end) { |
| 307 | __flush_tlb_page(asid, start); |
| 308 | start += PAGE_SIZE; |
| 309 | } |
| 310 | if (saved_asid != MMU_NO_ASID) |
| 311 | set_asid(saved_asid); |
| 312 | } |
| 313 | local_irq_restore(flags); |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 318 | { |
| 319 | unsigned long flags; |
| 320 | int size; |
| 321 | |
| 322 | local_irq_save(flags); |
| 323 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
| 324 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
| 325 | flush_tlb_all(); |
| 326 | } else { |
| 327 | unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; |
| 328 | unsigned long saved_asid = get_asid(); |
| 329 | |
| 330 | start &= PAGE_MASK; |
| 331 | end += (PAGE_SIZE - 1); |
| 332 | end &= PAGE_MASK; |
| 333 | set_asid(asid); |
| 334 | while (start < end) { |
| 335 | __flush_tlb_page(asid, start); |
| 336 | start += PAGE_SIZE; |
| 337 | } |
| 338 | set_asid(saved_asid); |
| 339 | } |
| 340 | local_irq_restore(flags); |
| 341 | } |
| 342 | |
| 343 | void flush_tlb_mm(struct mm_struct *mm) |
| 344 | { |
| 345 | /* Invalidate all TLB of this process. */ |
| 346 | /* Instead of invalidating each TLB, we get new MMU context. */ |
| 347 | if (mm->context != NO_CONTEXT) { |
| 348 | unsigned long flags; |
| 349 | |
| 350 | local_irq_save(flags); |
| 351 | mm->context = NO_CONTEXT; |
| 352 | if (mm == current->mm) |
| 353 | activate_context(mm); |
| 354 | local_irq_restore(flags); |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | void flush_tlb_all(void) |
| 359 | { |
| 360 | unsigned long flags, status; |
| 361 | |
| 362 | /* |
| 363 | * Flush all the TLB. |
| 364 | * |
| 365 | * Write to the MMU control register's bit: |
| 366 | * TF-bit for SH-3, TI-bit for SH-4. |
| 367 | * It's same position, bit #2. |
| 368 | */ |
| 369 | local_irq_save(flags); |
| 370 | status = ctrl_inl(MMUCR); |
| 371 | status |= 0x04; |
| 372 | ctrl_outl(status, MMUCR); |
| 373 | local_irq_restore(flags); |
| 374 | } |