Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * TLB Management (flush/create/diagnostics) for ARC700 |
| 3 | * |
| 4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 9 | * |
| 10 | * vineetg: Aug 2011 |
| 11 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue |
| 12 | * |
| 13 | * vineetg: May 2011 |
| 14 | * -No need to flush_cache_page( ) for each call to update_mmu_cache() |
| 15 | * some of the LMBench tests improved amazingly |
| 16 | * = page-fault thrice as fast (75 usec to 28 usec) |
| 17 | * = mmap twice as fast (9.6 msec to 4.6 msec), |
| 18 | * = fork (5.3 msec to 3.7 msec) |
| 19 | * |
| 20 | * vineetg: April 2011 : |
| 21 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, |
| 22 | * helps avoid a shift when preparing PD0 from PTE |
| 23 | * |
| 24 | * vineetg: April 2011 : Preparing for MMU V3 |
| 25 | * -MMU v2/v3 BCRs decoded differently |
| 26 | * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 |
| 27 | * -tlb_entry_erase( ) can be void |
| 28 | * -local_flush_tlb_range( ): |
| 29 | * = need not "ceil" @end |
| 30 | * = walks MMU only if range spans < 32 entries, as opposed to 256 |
| 31 | * |
| 32 | * Vineetg: Sept 10th 2008 |
| 33 | * -Changes related to MMU v2 (Rel 4.8) |
| 34 | * |
| 35 | * Vineetg: Aug 29th 2008 |
| 36 | * -In TLB Flush operations (Metal Fix MMU) there is a explict command to |
| 37 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, |
| 38 | * it fails. Thus need to load it with ANY valid value before invoking |
| 39 | * TLBIVUTLB cmd |
| 40 | * |
| 41 | * Vineetg: Aug 21th 2008: |
| 42 | * -Reduced the duration of IRQ lockouts in TLB Flush routines |
| 43 | * -Multiple copies of TLB erase code seperated into a "single" function |
| 44 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID |
| 45 | * in interrupt-safe region. |
| 46 | * |
| 47 | * Vineetg: April 23rd Bug #93131 |
| 48 | * Problem: tlb_flush_kernel_range() doesnt do anything if the range to |
| 49 | * flush is more than the size of TLB itself. |
| 50 | * |
| 51 | * Rahul Trivedi : Codito Technologies 2004 |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 52 | */ |
| 53 | |
| 54 | #include <linux/module.h> |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 55 | #include <linux/bug.h> |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 56 | #include <asm/arcregs.h> |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 57 | #include <asm/setup.h> |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 58 | #include <asm/mmu_context.h> |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 59 | #include <asm/mmu.h> |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 60 | |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 61 | /* Need for ARC MMU v2 |
| 62 | * |
| 63 | * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. |
| 64 | * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages |
| 65 | * map into same set, there would be contention for the 2 ways causing severe |
| 66 | * Thrashing. |
| 67 | * |
| 68 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has |
| 69 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. |
| 70 | * Given this, the thrasing problem should never happen because once the 3 |
| 71 | * J-TLB entries are created (even though 3rd will knock out one of the prev |
| 72 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy |
| 73 | * |
| 74 | * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. |
| 75 | * This is a simple design for keeping them in sync. So what do we do? |
| 76 | * The solution which James came up was pretty neat. It utilised the assoc |
| 77 | * of uTLBs by not invalidating always but only when absolutely necessary. |
| 78 | * |
| 79 | * - Existing TLB commands work as before |
| 80 | * - New command (TLBWriteNI) for TLB write without clearing uTLBs |
| 81 | * - New command (TLBIVUTLB) to invalidate uTLBs. |
| 82 | * |
| 83 | * The uTLBs need only be invalidated when pages are being removed from the |
| 84 | * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB |
| 85 | * as a result of a miss, the removed entry is still allowed to exist in the |
| 86 | * uTLBs as it is still valid and present in the OS page table. This allows the |
| 87 | * full associativity of the uTLBs to hide the limited associativity of the main |
| 88 | * TLB. |
| 89 | * |
| 90 | * During a miss handler, the new "TLBWriteNI" command is used to load |
| 91 | * entries without clearing the uTLBs. |
| 92 | * |
| 93 | * When the OS page table is updated, TLB entries that may be associated with a |
| 94 | * removed page are removed (flushed) from the TLB using TLBWrite. In this |
| 95 | * circumstance, the uTLBs must also be cleared. This is done by using the |
| 96 | * existing TLBWrite command. An explicit IVUTLB is also required for those |
| 97 | * corner cases when TLBWrite was not executed at all because the corresp |
| 98 | * J-TLB entry got evicted/replaced. |
| 99 | */ |
| 100 | |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 101 | |
Vineet Gupta | f1f3347 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 102 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 103 | DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 104 | |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 105 | /* |
| 106 | * Utility Routine to erase a J-TLB entry |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 107 | * Caller needs to setup Index Reg (manually or via getIndex) |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 108 | */ |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 109 | static inline void __tlb_entry_erase(void) |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 110 | { |
| 111 | write_aux_reg(ARC_REG_TLBPD1, 0); |
| 112 | write_aux_reg(ARC_REG_TLBPD0, 0); |
| 113 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
| 114 | } |
| 115 | |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 116 | #if (CONFIG_ARC_MMU_VER < 4) |
| 117 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 118 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
| 119 | { |
| 120 | unsigned int idx; |
| 121 | |
| 122 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
| 123 | |
| 124 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
| 125 | idx = read_aux_reg(ARC_REG_TLBINDEX); |
| 126 | |
| 127 | return idx; |
| 128 | } |
| 129 | |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 130 | static void tlb_entry_erase(unsigned int vaddr_n_asid) |
| 131 | { |
| 132 | unsigned int idx; |
| 133 | |
| 134 | /* Locate the TLB entry for this vaddr + ASID */ |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 135 | idx = tlb_entry_lkup(vaddr_n_asid); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 136 | |
| 137 | /* No error means entry found, zero it out */ |
| 138 | if (likely(!(idx & TLB_LKUP_ERR))) { |
| 139 | __tlb_entry_erase(); |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 140 | } else { |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 141 | /* Duplicate entry error */ |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 142 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
| 143 | vaddr_n_asid); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 144 | } |
| 145 | } |
| 146 | |
| 147 | /**************************************************************************** |
| 148 | * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) |
| 149 | * |
| 150 | * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB |
| 151 | * |
| 152 | * utlb_invalidate ( ) |
| 153 | * -For v2 MMU calls Flush uTLB Cmd |
| 154 | * -For v1 MMU does nothing (except for Metal Fix v1 MMU) |
| 155 | * This is because in v1 TLBWrite itself invalidate uTLBs |
| 156 | ***************************************************************************/ |
| 157 | |
| 158 | static void utlb_invalidate(void) |
| 159 | { |
| 160 | #if (CONFIG_ARC_MMU_VER >= 2) |
| 161 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 162 | #if (CONFIG_ARC_MMU_VER == 2) |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 163 | /* MMU v2 introduced the uTLB Flush command. |
| 164 | * There was however an obscure hardware bug, where uTLB flush would |
| 165 | * fail when a prior probe for J-TLB (both totally unrelated) would |
| 166 | * return lkup err - because the entry didnt exist in MMU. |
| 167 | * The Workround was to set Index reg with some valid value, prior to |
| 168 | * flush. This was fixed in MMU v3 hence not needed any more |
| 169 | */ |
| 170 | unsigned int idx; |
| 171 | |
| 172 | /* make sure INDEX Reg is valid */ |
| 173 | idx = read_aux_reg(ARC_REG_TLBINDEX); |
| 174 | |
| 175 | /* If not write some dummy val */ |
| 176 | if (unlikely(idx & TLB_LKUP_ERR)) |
| 177 | write_aux_reg(ARC_REG_TLBINDEX, 0xa); |
| 178 | #endif |
| 179 | |
| 180 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); |
| 181 | #endif |
| 182 | |
| 183 | } |
| 184 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 185 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) |
| 186 | { |
| 187 | unsigned int idx; |
| 188 | |
| 189 | /* |
| 190 | * First verify if entry for this vaddr+ASID already exists |
| 191 | * This also sets up PD0 (vaddr, ASID..) for final commit |
| 192 | */ |
| 193 | idx = tlb_entry_lkup(pd0); |
| 194 | |
| 195 | /* |
| 196 | * If Not already present get a free slot from MMU. |
| 197 | * Otherwise, Probe would have located the entry and set INDEX Reg |
| 198 | * with existing location. This will cause Write CMD to over-write |
| 199 | * existing entry with new PD0 and PD1 |
| 200 | */ |
| 201 | if (likely(idx & TLB_LKUP_ERR)) |
| 202 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); |
| 203 | |
| 204 | /* setup the other half of TLB entry (pfn, rwx..) */ |
| 205 | write_aux_reg(ARC_REG_TLBPD1, pd1); |
| 206 | |
| 207 | /* |
| 208 | * Commit the Entry to MMU |
| 209 | * It doesnt sound safe to use the TLBWriteNI cmd here |
| 210 | * which doesn't flush uTLBs. I'd rather be safe than sorry. |
| 211 | */ |
| 212 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
| 213 | } |
| 214 | |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 215 | #else /* CONFIG_ARC_MMU_VER >= 4) */ |
| 216 | |
| 217 | static void utlb_invalidate(void) |
| 218 | { |
| 219 | /* No need since uTLB is always in sync with JTLB */ |
| 220 | } |
| 221 | |
| 222 | static void tlb_entry_erase(unsigned int vaddr_n_asid) |
| 223 | { |
| 224 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT); |
| 225 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); |
| 226 | } |
| 227 | |
| 228 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) |
| 229 | { |
| 230 | write_aux_reg(ARC_REG_TLBPD0, pd0); |
| 231 | write_aux_reg(ARC_REG_TLBPD1, pd1); |
| 232 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); |
| 233 | } |
| 234 | |
| 235 | #endif |
| 236 | |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 237 | /* |
| 238 | * Un-conditionally (without lookup) erase the entire MMU contents |
| 239 | */ |
| 240 | |
| 241 | noinline void local_flush_tlb_all(void) |
| 242 | { |
| 243 | unsigned long flags; |
| 244 | unsigned int entry; |
| 245 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
| 246 | |
| 247 | local_irq_save(flags); |
| 248 | |
| 249 | /* Load PD0 and PD1 with template for a Blank Entry */ |
| 250 | write_aux_reg(ARC_REG_TLBPD1, 0); |
| 251 | write_aux_reg(ARC_REG_TLBPD0, 0); |
| 252 | |
| 253 | for (entry = 0; entry < mmu->num_tlb; entry++) { |
| 254 | /* write this entry to the TLB */ |
| 255 | write_aux_reg(ARC_REG_TLBINDEX, entry); |
| 256 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
| 257 | } |
| 258 | |
| 259 | utlb_invalidate(); |
| 260 | |
| 261 | local_irq_restore(flags); |
| 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Flush the entrie MM for userland. The fastest way is to move to Next ASID |
| 266 | */ |
| 267 | noinline void local_flush_tlb_mm(struct mm_struct *mm) |
| 268 | { |
| 269 | /* |
| 270 | * Small optimisation courtesy IA64 |
| 271 | * flush_mm called during fork,exit,munmap etc, multiple times as well. |
| 272 | * Only for fork( ) do we need to move parent to a new MMU ctxt, |
| 273 | * all other cases are NOPs, hence this check. |
| 274 | */ |
| 275 | if (atomic_read(&mm->mm_users) == 0) |
| 276 | return; |
| 277 | |
| 278 | /* |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 279 | * - Move to a new ASID, but only if the mm is still wired in |
| 280 | * (Android Binder ended up calling this for vma->mm != tsk->mm, |
| 281 | * causing h/w - s/w ASID to get out of sync) |
| 282 | * - Also get_new_mmu_context() new implementation allocates a new |
| 283 | * ASID only if it is not allocated already - so unallocate first |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 284 | */ |
Vineet Gupta | 3daa48d | 2013-07-24 13:53:45 -0700 | [diff] [blame] | 285 | destroy_context(mm); |
| 286 | if (current->mm == mm) |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 287 | get_new_mmu_context(mm); |
| 288 | } |
| 289 | |
| 290 | /* |
| 291 | * Flush a Range of TLB entries for userland. |
| 292 | * @start is inclusive, while @end is exclusive |
| 293 | * Difference between this and Kernel Range Flush is |
| 294 | * -Here the fastest way (if range is too large) is to move to next ASID |
| 295 | * without doing any explicit Shootdown |
| 296 | * -In case of kernel Flush, entry has to be shot down explictly |
| 297 | */ |
| 298 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 299 | unsigned long end) |
| 300 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 301 | const unsigned int cpu = smp_processor_id(); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 302 | unsigned long flags; |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 303 | |
| 304 | /* If range @start to @end is more than 32 TLB entries deep, |
| 305 | * its better to move to a new ASID rather than searching for |
| 306 | * individual entries and then shooting them down |
| 307 | * |
| 308 | * The calc above is rough, doesn't account for unaligned parts, |
| 309 | * since this is heuristics based anyways |
| 310 | */ |
| 311 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { |
| 312 | local_flush_tlb_mm(vma->vm_mm); |
| 313 | return; |
| 314 | } |
| 315 | |
| 316 | /* |
| 317 | * @start moved to page start: this alone suffices for checking |
| 318 | * loop end condition below, w/o need for aligning @end to end |
| 319 | * e.g. 2000 to 4001 will anyhow loop twice |
| 320 | */ |
| 321 | start &= PAGE_MASK; |
| 322 | |
| 323 | local_irq_save(flags); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 324 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 325 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 326 | while (start < end) { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 327 | tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 328 | start += PAGE_SIZE; |
| 329 | } |
| 330 | } |
| 331 | |
| 332 | utlb_invalidate(); |
| 333 | |
| 334 | local_irq_restore(flags); |
| 335 | } |
| 336 | |
| 337 | /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) |
| 338 | * @start, @end interpreted as kvaddr |
| 339 | * Interestingly, shared TLB entries can also be flushed using just |
| 340 | * @start,@end alone (interpreted as user vaddr), although technically SASID |
| 341 | * is also needed. However our smart TLbProbe lookup takes care of that. |
| 342 | */ |
| 343 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 344 | { |
| 345 | unsigned long flags; |
| 346 | |
| 347 | /* exactly same as above, except for TLB entry not taking ASID */ |
| 348 | |
| 349 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { |
| 350 | local_flush_tlb_all(); |
| 351 | return; |
| 352 | } |
| 353 | |
| 354 | start &= PAGE_MASK; |
| 355 | |
| 356 | local_irq_save(flags); |
| 357 | while (start < end) { |
| 358 | tlb_entry_erase(start); |
| 359 | start += PAGE_SIZE; |
| 360 | } |
| 361 | |
| 362 | utlb_invalidate(); |
| 363 | |
| 364 | local_irq_restore(flags); |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | * Delete TLB entry in MMU for a given page (??? address) |
| 369 | * NOTE One TLB entry contains translation for single PAGE |
| 370 | */ |
| 371 | |
| 372 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 373 | { |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 374 | const unsigned int cpu = smp_processor_id(); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 375 | unsigned long flags; |
| 376 | |
| 377 | /* Note that it is critical that interrupts are DISABLED between |
| 378 | * checking the ASID and using it flush the TLB entry |
| 379 | */ |
| 380 | local_irq_save(flags); |
| 381 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 382 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
| 383 | tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); |
Vineet Gupta | d79e678 | 2013-01-18 15:12:20 +0530 | [diff] [blame] | 384 | utlb_invalidate(); |
| 385 | } |
| 386 | |
| 387 | local_irq_restore(flags); |
| 388 | } |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 389 | |
Vineet Gupta | 5ea72a9 | 2013-10-27 14:49:02 +0530 | [diff] [blame] | 390 | #ifdef CONFIG_SMP |
| 391 | |
| 392 | struct tlb_args { |
| 393 | struct vm_area_struct *ta_vma; |
| 394 | unsigned long ta_start; |
| 395 | unsigned long ta_end; |
| 396 | }; |
| 397 | |
| 398 | static inline void ipi_flush_tlb_page(void *arg) |
| 399 | { |
| 400 | struct tlb_args *ta = arg; |
| 401 | |
| 402 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); |
| 403 | } |
| 404 | |
| 405 | static inline void ipi_flush_tlb_range(void *arg) |
| 406 | { |
| 407 | struct tlb_args *ta = arg; |
| 408 | |
| 409 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); |
| 410 | } |
| 411 | |
| 412 | static inline void ipi_flush_tlb_kernel_range(void *arg) |
| 413 | { |
| 414 | struct tlb_args *ta = (struct tlb_args *)arg; |
| 415 | |
| 416 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); |
| 417 | } |
| 418 | |
| 419 | void flush_tlb_all(void) |
| 420 | { |
| 421 | on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); |
| 422 | } |
| 423 | |
| 424 | void flush_tlb_mm(struct mm_struct *mm) |
| 425 | { |
| 426 | on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, |
| 427 | mm, 1); |
| 428 | } |
| 429 | |
| 430 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
| 431 | { |
| 432 | struct tlb_args ta = { |
| 433 | .ta_vma = vma, |
| 434 | .ta_start = uaddr |
| 435 | }; |
| 436 | |
| 437 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); |
| 438 | } |
| 439 | |
| 440 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 441 | unsigned long end) |
| 442 | { |
| 443 | struct tlb_args ta = { |
| 444 | .ta_vma = vma, |
| 445 | .ta_start = start, |
| 446 | .ta_end = end |
| 447 | }; |
| 448 | |
| 449 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); |
| 450 | } |
| 451 | |
| 452 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
| 453 | { |
| 454 | struct tlb_args ta = { |
| 455 | .ta_start = start, |
| 456 | .ta_end = end |
| 457 | }; |
| 458 | |
| 459 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
| 460 | } |
| 461 | #endif |
| 462 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 463 | /* |
| 464 | * Routine to create a TLB entry |
| 465 | */ |
| 466 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
| 467 | { |
| 468 | unsigned long flags; |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 469 | unsigned int asid_or_sasid, rwx; |
| 470 | unsigned long pd0, pd1; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 471 | |
| 472 | /* |
| 473 | * create_tlb() assumes that current->mm == vma->mm, since |
| 474 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) |
| 475 | * -completes the lazy write to SASID reg (again valid for curr tsk) |
| 476 | * |
| 477 | * Removing the assumption involves |
| 478 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. |
| 479 | * -Fix the TLB paranoid debug code to not trigger false negatives. |
| 480 | * -More importantly it makes this handler inconsistent with fast-path |
| 481 | * TLB Refill handler which always deals with "current" |
| 482 | * |
| 483 | * Lets see the use cases when current->mm != vma->mm and we land here |
| 484 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault |
| 485 | * Here VM wants to pre-install a TLB entry for user stack while |
| 486 | * current->mm still points to pre-execve mm (hence the condition). |
| 487 | * However the stack vaddr is soon relocated (randomization) and |
| 488 | * move_page_tables() tries to undo that TLB entry. |
| 489 | * Thus not creating TLB entry is not any worse. |
| 490 | * |
| 491 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a |
| 492 | * breakpoint in debugged task. Not creating a TLB now is not |
| 493 | * performance critical. |
| 494 | * |
| 495 | * Both the cases above are not good enough for code churn. |
| 496 | */ |
| 497 | if (current->active_mm != vma->vm_mm) |
| 498 | return; |
| 499 | |
| 500 | local_irq_save(flags); |
| 501 | |
Vineet Gupta | 63eca94 | 2013-08-23 19:16:34 +0530 | [diff] [blame] | 502 | tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 503 | |
| 504 | address &= PAGE_MASK; |
| 505 | |
| 506 | /* update this PTE credentials */ |
| 507 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); |
| 508 | |
Vineet Gupta | d091fcb | 2013-06-17 19:44:06 +0530 | [diff] [blame] | 509 | /* Create HW TLB(PD0,PD1) from PTE */ |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 510 | |
| 511 | /* ASID for this task */ |
| 512 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
| 513 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 514 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 515 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 516 | /* |
| 517 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
| 518 | * however Linux only saves 1 set to save PTE real-estate |
| 519 | * Here we convert 3 PTE bits into 6 MMU bits: |
| 520 | * -Kernel only entries have Kr Kw Kx 0 0 0 |
| 521 | * -User entries have mirrored K and U bits |
| 522 | */ |
| 523 | rwx = pte_val(*ptep) & PTE_BITS_RWX; |
| 524 | |
| 525 | if (pte_val(*ptep) & _PAGE_GLOBAL) |
| 526 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ |
| 527 | else |
| 528 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ |
| 529 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 530 | pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 531 | |
Vineet Gupta | 483e9bcb | 2013-07-01 18:12:28 +0530 | [diff] [blame] | 532 | tlb_entry_insert(pd0, pd1); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 533 | |
| 534 | local_irq_restore(flags); |
| 535 | } |
| 536 | |
Vineet Gupta | eacd0e95 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 537 | /* |
| 538 | * Called at the end of pagefault, for a userspace mapped page |
| 539 | * -pre-install the corresponding TLB entry into MMU |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 540 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
| 541 | * flush_dcache_page(), copy_user_page() |
| 542 | * |
| 543 | * Note that flush (when done) involves both WBACK - so physical page is |
| 544 | * in sync as well as INV - so any non-congruent aliases don't remain |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 545 | */ |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 546 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 547 | pte_t *ptep) |
| 548 | { |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 549 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 550 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; |
Vineet Gupta | 29b93c6 | 2013-05-19 15:51:03 +0530 | [diff] [blame] | 551 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 552 | |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 553 | create_tlb(vma, vaddr, ptep); |
| 554 | |
Vineet Gupta | 29b93c6 | 2013-05-19 15:51:03 +0530 | [diff] [blame] | 555 | if (page == ZERO_PAGE(0)) { |
| 556 | return; |
| 557 | } |
| 558 | |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 559 | /* |
| 560 | * Exec page : Independent of aliasing/page-color considerations, |
| 561 | * since icache doesn't snoop dcache on ARC, any dirty |
| 562 | * K-mapping of a code page needs to be wback+inv so that |
| 563 | * icache fetch by userspace sees code correctly. |
| 564 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it |
| 565 | * so userspace sees the right data. |
| 566 | * (Avoids the flush for Non-exec + congruent mapping case) |
| 567 | */ |
Vineet Gupta | 3e87974 | 2013-05-22 18:38:10 +0530 | [diff] [blame] | 568 | if ((vma->vm_flags & VM_EXEC) || |
| 569 | addr_not_cache_congruent(paddr, vaddr)) { |
Vineet Gupta | eacd0e95 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 570 | |
Vineet Gupta | 2ed21da | 2013-05-13 17:23:58 +0530 | [diff] [blame] | 571 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); |
Vineet Gupta | eacd0e95 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 572 | if (dirty) { |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 573 | /* wback + inv dcache lines */ |
Vineet Gupta | 6ec18a8 | 2013-05-09 15:10:18 +0530 | [diff] [blame] | 574 | __flush_dcache_page(paddr, paddr); |
Vineet Gupta | 4102b53 | 2013-05-09 21:54:51 +0530 | [diff] [blame] | 575 | |
| 576 | /* invalidate any existing icache lines */ |
| 577 | if (vma->vm_flags & VM_EXEC) |
| 578 | __inv_icache_page(paddr, vaddr); |
Vineet Gupta | eacd0e95 | 2013-04-16 14:10:48 +0530 | [diff] [blame] | 579 | } |
Vineet Gupta | 24603fd | 2013-04-11 18:36:35 +0530 | [diff] [blame] | 580 | } |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 581 | } |
| 582 | |
| 583 | /* Read the Cache Build Confuration Registers, Decode them and save into |
| 584 | * the cpuinfo structure for later use. |
| 585 | * No Validation is done here, simply read/convert the BCRs |
| 586 | */ |
Paul Gortmaker | ce75995 | 2013-06-24 15:30:15 -0400 | [diff] [blame] | 587 | void read_decode_mmu_bcr(void) |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 588 | { |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 589 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 590 | unsigned int tmp; |
| 591 | struct bcr_mmu_1_2 { |
| 592 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 593 | unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; |
| 594 | #else |
| 595 | unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; |
| 596 | #endif |
| 597 | } *mmu2; |
| 598 | |
| 599 | struct bcr_mmu_3 { |
| 600 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 601 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, |
| 602 | u_itlb:4, u_dtlb:4; |
| 603 | #else |
| 604 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, |
| 605 | ways:4, ver:8; |
| 606 | #endif |
| 607 | } *mmu3; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 608 | |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 609 | struct bcr_mmu_4 { |
| 610 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 611 | unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, |
| 612 | n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; |
| 613 | #else |
| 614 | /* DTLB ITLB JES JE JA */ |
| 615 | unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, |
| 616 | pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; |
| 617 | #endif |
| 618 | } *mmu4; |
| 619 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 620 | tmp = read_aux_reg(ARC_REG_MMU_BCR); |
| 621 | mmu->ver = (tmp >> 24); |
| 622 | |
| 623 | if (mmu->ver <= 2) { |
| 624 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; |
Vineet Gupta | 40b552d | 2015-02-13 18:33:47 +0530 | [diff] [blame] | 625 | mmu->pg_sz_k = TO_KB(PAGE_SIZE); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 626 | mmu->sets = 1 << mmu2->sets; |
| 627 | mmu->ways = 1 << mmu2->ways; |
| 628 | mmu->u_dtlb = mmu2->u_dtlb; |
| 629 | mmu->u_itlb = mmu2->u_itlb; |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 630 | } else if (mmu->ver == 3) { |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 631 | mmu3 = (struct bcr_mmu_3 *)&tmp; |
Vineet Gupta | 40b552d | 2015-02-13 18:33:47 +0530 | [diff] [blame] | 632 | mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 633 | mmu->sets = 1 << mmu3->sets; |
| 634 | mmu->ways = 1 << mmu3->ways; |
| 635 | mmu->u_dtlb = mmu3->u_dtlb; |
| 636 | mmu->u_itlb = mmu3->u_itlb; |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 637 | } else { |
| 638 | mmu4 = (struct bcr_mmu_4 *)&tmp; |
| 639 | mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); |
| 640 | mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); |
| 641 | mmu->sets = 64 << mmu4->n_entry; |
| 642 | mmu->ways = mmu4->n_ways * 2; |
| 643 | mmu->u_dtlb = mmu4->u_dtlb * 4; |
| 644 | mmu->u_itlb = mmu4->u_itlb * 4; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | mmu->num_tlb = mmu->sets * mmu->ways; |
| 648 | } |
| 649 | |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 650 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
| 651 | { |
| 652 | int n = 0; |
Noam Camus | e3edeb6 | 2013-02-26 09:22:46 +0200 | [diff] [blame] | 653 | struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 654 | char super_pg[64] = ""; |
| 655 | |
| 656 | if (p_mmu->s_pg_sz_m) |
| 657 | scnprintf(super_pg, 64, "%dM Super Page%s, ", |
| 658 | p_mmu->s_pg_sz_m, " (not used)"); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 659 | |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 660 | n += scnprintf(buf + n, len - n, |
Vineet Gupta | d7a512b | 2015-04-06 17:22:39 +0530 | [diff] [blame] | 661 | "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", |
| 662 | p_mmu->ver, p_mmu->pg_sz_k, super_pg, |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 663 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, |
| 664 | p_mmu->u_dtlb, p_mmu->u_itlb, |
Vineet Gupta | 5637208 | 2014-09-25 16:54:43 +0530 | [diff] [blame] | 665 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : ""); |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 666 | |
| 667 | return buf; |
| 668 | } |
| 669 | |
Paul Gortmaker | ce75995 | 2013-06-24 15:30:15 -0400 | [diff] [blame] | 670 | void arc_mmu_init(void) |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 671 | { |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 672 | char str[256]; |
| 673 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
| 674 | |
| 675 | printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); |
| 676 | |
| 677 | /* For efficiency sake, kernel is compile time built for a MMU ver |
| 678 | * This must match the hardware it is running on. |
| 679 | * Linux built for MMU V2, if run on MMU V1 will break down because V1 |
| 680 | * hardware doesn't understand cmds such as WriteNI, or IVUTLB |
| 681 | * On the other hand, Linux built for V1 if run on MMU V2 will do |
| 682 | * un-needed workarounds to prevent memcpy thrashing. |
| 683 | * Similarly MMU V3 has new features which won't work on older MMU |
| 684 | */ |
| 685 | if (mmu->ver != CONFIG_ARC_MMU_VER) { |
| 686 | panic("MMU ver %d doesn't match kernel built for %d...\n", |
| 687 | mmu->ver, CONFIG_ARC_MMU_VER); |
| 688 | } |
| 689 | |
Vineet Gupta | 40b552d | 2015-02-13 18:33:47 +0530 | [diff] [blame] | 690 | if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) |
Vineet Gupta | af61742 | 2013-01-18 15:12:24 +0530 | [diff] [blame] | 691 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); |
| 692 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 693 | /* Enable the MMU */ |
| 694 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 695 | |
| 696 | /* In smp we use this reg for interrupt 1 scratch */ |
| 697 | #ifndef CONFIG_SMP |
| 698 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ |
| 699 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); |
| 700 | #endif |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 701 | } |
| 702 | |
| 703 | /* |
| 704 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} |
| 705 | * The mapping is Column-first. |
| 706 | * --------------------- ----------- |
| 707 | * |way0|way1|way2|way3| |way0|way1| |
| 708 | * --------------------- ----------- |
| 709 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | |
| 710 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | |
| 711 | * ~ ~ ~ ~ |
| 712 | * [set127] | 508| 509| 510| 511| | 254| 255| |
| 713 | * --------------------- ----------- |
| 714 | * For normal operations we don't(must not) care how above works since |
| 715 | * MMU cmd getIndex(vaddr) abstracts that out. |
| 716 | * However for walking WAYS of a SET, we need to know this |
| 717 | */ |
| 718 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) |
| 719 | |
| 720 | /* Handling of Duplicate PD (TLB entry) in MMU. |
| 721 | * -Could be due to buggy customer tapeouts or obscure kernel bugs |
| 722 | * -MMU complaints not at the time of duplicate PD installation, but at the |
| 723 | * time of lookup matching multiple ways. |
| 724 | * -Ideally these should never happen - but if they do - workaround by deleting |
| 725 | * the duplicate one. |
| 726 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) |
| 727 | */ |
| 728 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ |
| 729 | |
| 730 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, |
| 731 | struct pt_regs *regs) |
| 732 | { |
| 733 | int set, way, n; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 734 | unsigned long flags, is_valid; |
| 735 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
Vineet Gupta | 0a4c40a | 2013-09-27 18:20:06 +0530 | [diff] [blame] | 736 | unsigned int pd0[mmu->ways], pd1[mmu->ways]; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 737 | |
| 738 | local_irq_save(flags); |
| 739 | |
| 740 | /* re-enable the MMU */ |
| 741 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); |
| 742 | |
| 743 | /* loop thru all sets of TLB */ |
| 744 | for (set = 0; set < mmu->sets; set++) { |
| 745 | |
| 746 | /* read out all the ways of current set */ |
| 747 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { |
| 748 | write_aux_reg(ARC_REG_TLBINDEX, |
| 749 | SET_WAY_TO_IDX(mmu, set, way)); |
| 750 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); |
| 751 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); |
| 752 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); |
| 753 | is_valid |= pd0[way] & _PAGE_PRESENT; |
| 754 | } |
| 755 | |
| 756 | /* If all the WAYS in SET are empty, skip to next SET */ |
| 757 | if (!is_valid) |
| 758 | continue; |
| 759 | |
| 760 | /* Scan the set for duplicate ways: needs a nested loop */ |
Vineet Gupta | 0a4c40a | 2013-09-27 18:20:06 +0530 | [diff] [blame] | 761 | for (way = 0; way < mmu->ways - 1; way++) { |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 762 | if (!pd0[way]) |
| 763 | continue; |
| 764 | |
| 765 | for (n = way + 1; n < mmu->ways; n++) { |
| 766 | if ((pd0[way] & PAGE_MASK) == |
| 767 | (pd0[n] & PAGE_MASK)) { |
| 768 | |
| 769 | if (dup_pd_verbose) { |
| 770 | pr_info("Duplicate PD's @" |
| 771 | "[%d:%d]/[%d:%d]\n", |
| 772 | set, way, set, n); |
| 773 | pr_info("TLBPD0[%u]: %08x\n", |
| 774 | way, pd0[way]); |
| 775 | } |
| 776 | |
| 777 | /* |
| 778 | * clear entry @way and not @n. This is |
| 779 | * critical to our optimised loop |
| 780 | */ |
| 781 | pd0[way] = pd1[way] = 0; |
| 782 | write_aux_reg(ARC_REG_TLBINDEX, |
| 783 | SET_WAY_TO_IDX(mmu, set, way)); |
| 784 | __tlb_entry_erase(); |
| 785 | } |
| 786 | } |
| 787 | } |
| 788 | } |
| 789 | |
| 790 | local_irq_restore(flags); |
| 791 | } |
| 792 | |
| 793 | /*********************************************************************** |
| 794 | * Diagnostic Routines |
| 795 | * -Called from Low Level TLB Hanlders if things don;t look good |
| 796 | **********************************************************************/ |
| 797 | |
| 798 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
| 799 | |
| 800 | /* |
| 801 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS |
| 802 | * don't match |
| 803 | */ |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 804 | void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 805 | { |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 806 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 807 | is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 808 | |
| 809 | __asm__ __volatile__("flag 1"); |
| 810 | } |
| 811 | |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 812 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 813 | { |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 814 | unsigned int mmu_asid; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 815 | |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 816 | mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 817 | |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 818 | /* |
| 819 | * At the time of a TLB miss/installation |
| 820 | * - HW version needs to match SW version |
| 821 | * - SW needs to have a valid ASID |
| 822 | */ |
| 823 | if (addr < 0x70000000 && |
Vineet Gupta | 947bf10 | 2013-07-25 15:45:50 -0700 | [diff] [blame] | 824 | ((mm_asid == MM_CTXT_NO_ASID) || |
| 825 | (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) |
Vineet Gupta | 5bd87ad | 2013-08-23 17:37:18 +0530 | [diff] [blame] | 826 | print_asid_mismatch(mm_asid, mmu_asid, 0); |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 827 | } |
| 828 | #endif |