Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 1 | // TODO VM_EXEC flag work-around, cache aliasing |
| 2 | /* |
| 3 | * arch/xtensa/mm/fault.c |
| 4 | * |
| 5 | * This file is subject to the terms and conditions of the GNU General Public |
| 6 | * License. See the file "COPYING" in the main directory of this archive |
| 7 | * for more details. |
| 8 | * |
Marc Gauthier | 1bbedc3 | 2012-10-15 03:55:36 +0400 | [diff] [blame] | 9 | * Copyright (C) 2001 - 2010 Tensilica Inc. |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 10 | * |
| 11 | * Chris Zankel <chris@zankel.net> |
| 12 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> |
| 13 | */ |
| 14 | |
| 15 | #include <linux/mm.h> |
Paul Gortmaker | 6cc306e | 2016-07-23 14:01:45 -0400 | [diff] [blame] | 16 | #include <linux/extable.h> |
Alexey Dobriyan | 5a891ed | 2009-03-10 12:55:49 -0700 | [diff] [blame] | 17 | #include <linux/hardirq.h> |
Max Filippov | af885de | 2015-06-04 13:42:22 +0300 | [diff] [blame] | 18 | #include <linux/perf_event.h> |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 19 | #include <linux/uaccess.h> |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 20 | #include <asm/mmu_context.h> |
| 21 | #include <asm/cacheflush.h> |
| 22 | #include <asm/hardirq.h> |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 23 | #include <asm/pgalloc.h> |
| 24 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 25 | DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 26 | void bad_page_fault(struct pt_regs*, unsigned long, int); |
| 27 | |
| 28 | /* |
| 29 | * This routine handles page faults. It determines the address, |
| 30 | * and the problem, and then passes it off to one of the appropriate |
| 31 | * routines. |
| 32 | * |
| 33 | * Note: does not handle Miss and MultiHit. |
| 34 | */ |
| 35 | |
| 36 | void do_page_fault(struct pt_regs *regs) |
| 37 | { |
| 38 | struct vm_area_struct * vma; |
| 39 | struct mm_struct *mm = current->mm; |
| 40 | unsigned int exccause = regs->exccause; |
| 41 | unsigned int address = regs->excvaddr; |
| 42 | siginfo_t info; |
| 43 | |
| 44 | int is_write, is_exec; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 45 | int fault; |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 46 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 47 | |
| 48 | info.si_code = SEGV_MAPERR; |
| 49 | |
| 50 | /* We fault-in kernel-space virtual memory on-demand. The |
| 51 | * 'reference' page table is init_mm.pgd. |
| 52 | */ |
| 53 | if (address >= TASK_SIZE && !user_mode(regs)) |
| 54 | goto vmalloc_fault; |
| 55 | |
| 56 | /* If we're in an interrupt or have no user |
| 57 | * context, we must not take the fault.. |
| 58 | */ |
David Hildenbrand | 70ffdb9 | 2015-05-11 17:52:11 +0200 | [diff] [blame] | 59 | if (faulthandler_disabled() || !mm) { |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 60 | bad_page_fault(regs, address, SIGSEGV); |
| 61 | return; |
| 62 | } |
| 63 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 64 | is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0; |
| 65 | is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE || |
| 66 | exccause == EXCCAUSE_ITLB_MISS || |
| 67 | exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 68 | |
Max Filippov | c130d3b | 2017-12-15 12:00:30 -0800 | [diff] [blame] | 69 | pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n", |
| 70 | current->comm, current->pid, |
| 71 | address, exccause, regs->pc, |
| 72 | is_write ? "w" : "", is_exec ? "x" : ""); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 73 | |
Johannes Weiner | 759496b | 2013-09-12 15:13:39 -0700 | [diff] [blame] | 74 | if (user_mode(regs)) |
| 75 | flags |= FAULT_FLAG_USER; |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 76 | retry: |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 77 | down_read(&mm->mmap_sem); |
| 78 | vma = find_vma(mm, address); |
| 79 | |
| 80 | if (!vma) |
| 81 | goto bad_area; |
| 82 | if (vma->vm_start <= address) |
| 83 | goto good_area; |
| 84 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 85 | goto bad_area; |
| 86 | if (expand_stack(vma, address)) |
| 87 | goto bad_area; |
| 88 | |
| 89 | /* Ok, we have a good vm_area for this memory access, so |
| 90 | * we can handle it.. |
| 91 | */ |
| 92 | |
| 93 | good_area: |
| 94 | info.si_code = SEGV_ACCERR; |
| 95 | |
| 96 | if (is_write) { |
| 97 | if (!(vma->vm_flags & VM_WRITE)) |
| 98 | goto bad_area; |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 99 | flags |= FAULT_FLAG_WRITE; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 100 | } else if (is_exec) { |
| 101 | if (!(vma->vm_flags & VM_EXEC)) |
| 102 | goto bad_area; |
| 103 | } else /* Allow read even from write-only pages. */ |
| 104 | if (!(vma->vm_flags & (VM_READ | VM_WRITE))) |
| 105 | goto bad_area; |
| 106 | |
| 107 | /* If for any reason at all we couldn't handle the fault, |
| 108 | * make sure we exit gracefully rather than endlessly redo |
| 109 | * the fault. |
| 110 | */ |
Kirill A. Shutemov | dcddffd | 2016-07-26 15:25:18 -0700 | [diff] [blame] | 111 | fault = handle_mm_fault(vma, address, flags); |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 112 | |
| 113 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) |
| 114 | return; |
| 115 | |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 116 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 117 | if (fault & VM_FAULT_OOM) |
| 118 | goto out_of_memory; |
Linus Torvalds | 33692f2 | 2015-01-29 10:51:32 -0800 | [diff] [blame] | 119 | else if (fault & VM_FAULT_SIGSEGV) |
| 120 | goto bad_area; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 121 | else if (fault & VM_FAULT_SIGBUS) |
| 122 | goto do_sigbus; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 123 | BUG(); |
| 124 | } |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 125 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| 126 | if (fault & VM_FAULT_MAJOR) |
| 127 | current->maj_flt++; |
| 128 | else |
| 129 | current->min_flt++; |
| 130 | if (fault & VM_FAULT_RETRY) { |
| 131 | flags &= ~FAULT_FLAG_ALLOW_RETRY; |
Shaohua Li | 45cac65 | 2012-10-08 16:32:19 -0700 | [diff] [blame] | 132 | flags |= FAULT_FLAG_TRIED; |
Kautuk Consul | f107701 | 2012-07-30 14:39:21 -0700 | [diff] [blame] | 133 | |
| 134 | /* No need to up_read(&mm->mmap_sem) as we would |
| 135 | * have already released it in __lock_page_or_retry |
| 136 | * in mm/filemap.c. |
| 137 | */ |
| 138 | |
| 139 | goto retry; |
| 140 | } |
| 141 | } |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 142 | |
| 143 | up_read(&mm->mmap_sem); |
Max Filippov | af885de | 2015-06-04 13:42:22 +0300 | [diff] [blame] | 144 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
| 145 | if (flags & VM_FAULT_MAJOR) |
| 146 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); |
Jan Kara | 0e8fb93 | 2016-03-17 14:19:55 -0700 | [diff] [blame] | 147 | else |
Max Filippov | af885de | 2015-06-04 13:42:22 +0300 | [diff] [blame] | 148 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); |
| 149 | |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 150 | return; |
| 151 | |
| 152 | /* Something tried to access memory that isn't in our memory map.. |
| 153 | * Fix it, but check if it's kernel or user first.. |
| 154 | */ |
| 155 | bad_area: |
| 156 | up_read(&mm->mmap_sem); |
| 157 | if (user_mode(regs)) { |
| 158 | current->thread.bad_vaddr = address; |
| 159 | current->thread.error_code = is_write; |
| 160 | info.si_signo = SIGSEGV; |
| 161 | info.si_errno = 0; |
| 162 | /* info.si_code has been set above */ |
| 163 | info.si_addr = (void *) address; |
| 164 | force_sig_info(SIGSEGV, &info, current); |
| 165 | return; |
| 166 | } |
| 167 | bad_page_fault(regs, address, SIGSEGV); |
| 168 | return; |
| 169 | |
| 170 | |
| 171 | /* We ran out of memory, or some other thing happened to us that made |
| 172 | * us unable to handle the page fault gracefully. |
| 173 | */ |
| 174 | out_of_memory: |
| 175 | up_read(&mm->mmap_sem); |
Nick Piggin | f76f5d7 | 2010-06-04 14:14:51 -0700 | [diff] [blame] | 176 | if (!user_mode(regs)) |
| 177 | bad_page_fault(regs, address, SIGKILL); |
| 178 | else |
| 179 | pagefault_out_of_memory(); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 180 | return; |
| 181 | |
| 182 | do_sigbus: |
| 183 | up_read(&mm->mmap_sem); |
| 184 | |
| 185 | /* Send a sigbus, regardless of whether we were in kernel |
| 186 | * or user mode. |
| 187 | */ |
| 188 | current->thread.bad_vaddr = address; |
| 189 | info.si_code = SIGBUS; |
| 190 | info.si_errno = 0; |
| 191 | info.si_code = BUS_ADRERR; |
| 192 | info.si_addr = (void *) address; |
| 193 | force_sig_info(SIGBUS, &info, current); |
| 194 | |
| 195 | /* Kernel mode? Handle exceptions or die */ |
| 196 | if (!user_mode(regs)) |
| 197 | bad_page_fault(regs, address, SIGBUS); |
Marc Gauthier | 1bbedc3 | 2012-10-15 03:55:36 +0400 | [diff] [blame] | 198 | return; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 199 | |
| 200 | vmalloc_fault: |
| 201 | { |
| 202 | /* Synchronize this task's top level page-table |
| 203 | * with the 'reference' page table. |
| 204 | */ |
| 205 | struct mm_struct *act_mm = current->active_mm; |
| 206 | int index = pgd_index(address); |
| 207 | pgd_t *pgd, *pgd_k; |
| 208 | pmd_t *pmd, *pmd_k; |
| 209 | pte_t *pte_k; |
| 210 | |
| 211 | if (act_mm == NULL) |
| 212 | goto bad_page_fault; |
| 213 | |
| 214 | pgd = act_mm->pgd + index; |
| 215 | pgd_k = init_mm.pgd + index; |
| 216 | |
| 217 | if (!pgd_present(*pgd_k)) |
| 218 | goto bad_page_fault; |
| 219 | |
| 220 | pgd_val(*pgd) = pgd_val(*pgd_k); |
| 221 | |
| 222 | pmd = pmd_offset(pgd, address); |
| 223 | pmd_k = pmd_offset(pgd_k, address); |
| 224 | if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) |
| 225 | goto bad_page_fault; |
| 226 | |
| 227 | pmd_val(*pmd) = pmd_val(*pmd_k); |
| 228 | pte_k = pte_offset_kernel(pmd_k, address); |
| 229 | |
| 230 | if (!pte_present(*pte_k)) |
| 231 | goto bad_page_fault; |
| 232 | return; |
| 233 | } |
| 234 | bad_page_fault: |
| 235 | bad_page_fault(regs, address, SIGKILL); |
| 236 | return; |
| 237 | } |
| 238 | |
| 239 | |
| 240 | void |
| 241 | bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) |
| 242 | { |
| 243 | extern void die(const char*, struct pt_regs*, long); |
| 244 | const struct exception_table_entry *entry; |
| 245 | |
| 246 | /* Are we prepared to handle this kernel fault? */ |
| 247 | if ((entry = search_exception_tables(regs->pc)) != NULL) { |
Max Filippov | c130d3b | 2017-12-15 12:00:30 -0800 | [diff] [blame] | 248 | pr_debug("%s: Exception at pc=%#010lx (%lx)\n", |
| 249 | current->comm, regs->pc, entry->fixup); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 250 | current->thread.bad_uaddr = address; |
| 251 | regs->pc = entry->fixup; |
| 252 | return; |
| 253 | } |
| 254 | |
| 255 | /* Oops. The kernel tried to access some bad page. We'll have to |
| 256 | * terminate things with extreme prejudice. |
| 257 | */ |
Max Filippov | c130d3b | 2017-12-15 12:00:30 -0800 | [diff] [blame] | 258 | pr_alert("Unable to handle kernel paging request at virtual " |
| 259 | "address %08lx\n pc = %08lx, ra = %08lx\n", |
| 260 | address, regs->pc, regs->areg[0]); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 261 | die("Oops", regs, sig); |
| 262 | do_exit(sig); |
| 263 | } |