Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 1 | /* |
Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 | * arch/xtensa/mm/tlb.c |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 3 | * |
| 4 | * Logic that manipulates the Xtensa MMU. Derived from MIPS. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2003 Tensilica Inc. |
| 11 | * |
| 12 | * Joe Taylor |
| 13 | * Chris Zankel <chris@zankel.net> |
| 14 | * Marc Gauthier |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <asm/processor.h> |
| 19 | #include <asm/mmu_context.h> |
| 20 | #include <asm/tlbflush.h> |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 21 | #include <asm/cacheflush.h> |
| 22 | |
| 23 | |
| 24 | static inline void __flush_itlb_all (void) |
| 25 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 26 | int w, i; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 27 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 28 | for (w = 0; w < ITLB_ARF_WAYS; w++) { |
| 29 | for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) { |
| 30 | int e = w + (i << PAGE_SHIFT); |
| 31 | invalidate_itlb_entry_no_isync(e); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 32 | } |
| 33 | } |
| 34 | asm volatile ("isync\n"); |
| 35 | } |
| 36 | |
| 37 | static inline void __flush_dtlb_all (void) |
| 38 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 39 | int w, i; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 40 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 41 | for (w = 0; w < DTLB_ARF_WAYS; w++) { |
| 42 | for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) { |
| 43 | int e = w + (i << PAGE_SHIFT); |
| 44 | invalidate_dtlb_entry_no_isync(e); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 45 | } |
| 46 | } |
| 47 | asm volatile ("isync\n"); |
| 48 | } |
| 49 | |
| 50 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 51 | void local_flush_tlb_all(void) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 52 | { |
| 53 | __flush_itlb_all(); |
| 54 | __flush_dtlb_all(); |
| 55 | } |
| 56 | |
| 57 | /* If mm is current, we simply assign the current task a new ASID, thus, |
| 58 | * invalidating all previous tlb entries. If mm is someone else's user mapping, |
| 59 | * wie invalidate the context, thus, when that user mapping is swapped in, |
| 60 | * a new context will be assigned to it. |
| 61 | */ |
| 62 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 63 | void local_flush_tlb_mm(struct mm_struct *mm) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 64 | { |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 65 | int cpu = smp_processor_id(); |
| 66 | |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 67 | if (mm == current->active_mm) { |
Max Filippov | 382cb5b | 2012-11-05 07:44:03 +0400 | [diff] [blame] | 68 | unsigned long flags; |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 69 | local_irq_save(flags); |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 70 | mm->context.asid[cpu] = NO_CONTEXT; |
| 71 | activate_context(mm, cpu); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 72 | local_irq_restore(flags); |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 73 | } else { |
| 74 | mm->context.asid[cpu] = NO_CONTEXT; |
| 75 | mm->context.cpu = -1; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 76 | } |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 77 | } |
| 78 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 79 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 80 | #define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) |
| 81 | #define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) |
| 82 | #if _ITLB_ENTRIES > _DTLB_ENTRIES |
| 83 | # define _TLB_ENTRIES _ITLB_ENTRIES |
| 84 | #else |
| 85 | # define _TLB_ENTRIES _DTLB_ENTRIES |
| 86 | #endif |
| 87 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 88 | void local_flush_tlb_range(struct vm_area_struct *vma, |
| 89 | unsigned long start, unsigned long end) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 90 | { |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 91 | int cpu = smp_processor_id(); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 92 | struct mm_struct *mm = vma->vm_mm; |
| 93 | unsigned long flags; |
| 94 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 95 | if (mm->context.asid[cpu] == NO_CONTEXT) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 96 | return; |
| 97 | |
| 98 | #if 0 |
| 99 | printk("[tlbrange<%02lx,%08lx,%08lx>]\n", |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 100 | (unsigned long)mm->context.asid[cpu], start, end); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 101 | #endif |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 102 | local_irq_save(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 103 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 104 | if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 105 | int oldpid = get_rasid_register(); |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 106 | |
| 107 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 108 | start &= PAGE_MASK; |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 109 | if (vma->vm_flags & VM_EXEC) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 110 | while(start < end) { |
| 111 | invalidate_itlb_mapping(start); |
| 112 | invalidate_dtlb_mapping(start); |
| 113 | start += PAGE_SIZE; |
| 114 | } |
| 115 | else |
| 116 | while(start < end) { |
| 117 | invalidate_dtlb_mapping(start); |
| 118 | start += PAGE_SIZE; |
| 119 | } |
| 120 | |
| 121 | set_rasid_register(oldpid); |
| 122 | } else { |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 123 | local_flush_tlb_mm(mm); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 124 | } |
| 125 | local_irq_restore(flags); |
| 126 | } |
| 127 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 128 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 129 | { |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 130 | int cpu = smp_processor_id(); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 131 | struct mm_struct* mm = vma->vm_mm; |
| 132 | unsigned long flags; |
| 133 | int oldpid; |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 134 | |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 135 | if (mm->context.asid[cpu] == NO_CONTEXT) |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 136 | return; |
| 137 | |
Max Filippov | 87962c4 | 2013-05-15 19:02:06 +0400 | [diff] [blame] | 138 | local_irq_save(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 139 | |
Chris Zankel | c4c4594 | 2012-11-28 16:53:51 -0800 | [diff] [blame] | 140 | oldpid = get_rasid_register(); |
Max Filippov | f615136 | 2013-10-17 02:42:26 +0400 | [diff] [blame] | 141 | set_rasid_register(ASID_INSERT(mm->context.asid[cpu])); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 142 | |
| 143 | if (vma->vm_flags & VM_EXEC) |
| 144 | invalidate_itlb_mapping(page); |
| 145 | invalidate_dtlb_mapping(page); |
| 146 | |
| 147 | set_rasid_register(oldpid); |
| 148 | |
| 149 | local_irq_restore(flags); |
Chris Zankel | 3f65ce4 | 2005-06-23 22:01:24 -0700 | [diff] [blame] | 150 | } |
Max Filippov | a99e07e | 2013-05-15 19:34:05 +0400 | [diff] [blame] | 151 | |
Max Filippov | 04c6b3e | 2014-02-14 14:08:48 +0400 | [diff] [blame] | 152 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 153 | { |
| 154 | if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET && |
| 155 | end - start < _TLB_ENTRIES << PAGE_SHIFT) { |
| 156 | start &= PAGE_MASK; |
| 157 | while (start < end) { |
| 158 | invalidate_itlb_mapping(start); |
| 159 | invalidate_dtlb_mapping(start); |
| 160 | start += PAGE_SIZE; |
| 161 | } |
| 162 | } else { |
| 163 | local_flush_tlb_all(); |
| 164 | } |
| 165 | } |
| 166 | |
Max Filippov | a99e07e | 2013-05-15 19:34:05 +0400 | [diff] [blame] | 167 | #ifdef CONFIG_DEBUG_TLB_SANITY |
| 168 | |
| 169 | static unsigned get_pte_for_vaddr(unsigned vaddr) |
| 170 | { |
| 171 | struct task_struct *task = get_current(); |
| 172 | struct mm_struct *mm = task->mm; |
| 173 | pgd_t *pgd; |
| 174 | pmd_t *pmd; |
| 175 | pte_t *pte; |
| 176 | |
| 177 | if (!mm) |
| 178 | mm = task->active_mm; |
| 179 | pgd = pgd_offset(mm, vaddr); |
| 180 | if (pgd_none_or_clear_bad(pgd)) |
| 181 | return 0; |
| 182 | pmd = pmd_offset(pgd, vaddr); |
| 183 | if (pmd_none_or_clear_bad(pmd)) |
| 184 | return 0; |
| 185 | pte = pte_offset_map(pmd, vaddr); |
| 186 | if (!pte) |
| 187 | return 0; |
| 188 | return pte_val(*pte); |
| 189 | } |
| 190 | |
| 191 | enum { |
| 192 | TLB_SUSPICIOUS = 1, |
| 193 | TLB_INSANE = 2, |
| 194 | }; |
| 195 | |
| 196 | static void tlb_insane(void) |
| 197 | { |
| 198 | BUG_ON(1); |
| 199 | } |
| 200 | |
| 201 | static void tlb_suspicious(void) |
| 202 | { |
| 203 | WARN_ON(1); |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE), |
| 208 | * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE. |
| 209 | * |
| 210 | * Check that valid TLB entries either have the same PA as the PTE, or PTE is |
| 211 | * marked as non-present. Non-present PTE and the page with non-zero refcount |
| 212 | * and zero mapcount is normal for batched TLB flush operation. Zero refcount |
| 213 | * means that the page was freed prematurely. Non-zero mapcount is unusual, |
| 214 | * but does not necessary means an error, thus marked as suspicious. |
| 215 | */ |
| 216 | static int check_tlb_entry(unsigned w, unsigned e, bool dtlb) |
| 217 | { |
| 218 | unsigned tlbidx = w | (e << PAGE_SHIFT); |
| 219 | unsigned r0 = dtlb ? |
| 220 | read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx); |
| 221 | unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT); |
| 222 | unsigned pte = get_pte_for_vaddr(vpn); |
| 223 | unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK; |
| 224 | unsigned tlb_asid = r0 & ASID_MASK; |
| 225 | bool kernel = tlb_asid == 1; |
| 226 | int rc = 0; |
| 227 | |
| 228 | if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) { |
| 229 | pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n", |
| 230 | dtlb ? 'D' : 'I', w, e, vpn, |
| 231 | kernel ? "kernel" : "user"); |
| 232 | rc |= TLB_INSANE; |
| 233 | } |
| 234 | |
| 235 | if (tlb_asid == mm_asid) { |
| 236 | unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) : |
| 237 | read_itlb_translation(tlbidx); |
| 238 | if ((pte ^ r1) & PAGE_MASK) { |
| 239 | pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n", |
| 240 | dtlb ? 'D' : 'I', w, e, r0, r1, pte); |
| 241 | if (pte == 0 || !pte_present(__pte(pte))) { |
| 242 | struct page *p = pfn_to_page(r1 >> PAGE_SHIFT); |
| 243 | pr_err("page refcount: %d, mapcount: %d\n", |
| 244 | page_count(p), |
| 245 | page_mapcount(p)); |
| 246 | if (!page_count(p)) |
| 247 | rc |= TLB_INSANE; |
Kirill A. Shutemov | e1534ae | 2016-01-15 16:53:46 -0800 | [diff] [blame] | 248 | else if (page_mapcount(p)) |
Max Filippov | a99e07e | 2013-05-15 19:34:05 +0400 | [diff] [blame] | 249 | rc |= TLB_SUSPICIOUS; |
| 250 | } else { |
| 251 | rc |= TLB_INSANE; |
| 252 | } |
| 253 | } |
| 254 | } |
| 255 | return rc; |
| 256 | } |
| 257 | |
| 258 | void check_tlb_sanity(void) |
| 259 | { |
| 260 | unsigned long flags; |
| 261 | unsigned w, e; |
| 262 | int bug = 0; |
| 263 | |
| 264 | local_irq_save(flags); |
| 265 | for (w = 0; w < DTLB_ARF_WAYS; ++w) |
| 266 | for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e) |
| 267 | bug |= check_tlb_entry(w, e, true); |
| 268 | for (w = 0; w < ITLB_ARF_WAYS; ++w) |
| 269 | for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e) |
| 270 | bug |= check_tlb_entry(w, e, false); |
| 271 | if (bug & TLB_INSANE) |
| 272 | tlb_insane(); |
| 273 | if (bug & TLB_SUSPICIOUS) |
| 274 | tlb_suspicious(); |
| 275 | local_irq_restore(flags); |
| 276 | } |
| 277 | |
| 278 | #endif /* CONFIG_DEBUG_TLB_SANITY */ |