blob: b5c5e0aa0aaa8330f004fd61bafcd8d770e995d8 [file] [log] [blame]
Vineet Guptaf1f33472013-01-18 15:12:19 +05301/*
2 * TLB Management (flush/create/diagnostics) for ARC700
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
Vineet Guptad79e6782013-01-18 15:12:20 +05309 *
10 * vineetg: Aug 2011
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
12 *
13 * vineetg: May 2011
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
19 *
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
23 *
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
31 *
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
34 *
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
39 * TLBIVUTLB cmd
40 *
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
46 *
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
49 * flush is more than the size of TLB itself.
50 *
51 * Rahul Trivedi : Codito Technologies 2004
Vineet Guptaf1f33472013-01-18 15:12:19 +053052 */
53
54#include <linux/module.h>
Vineet Gupta483e9bcb2013-07-01 18:12:28 +053055#include <linux/bug.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053056#include <asm/arcregs.h>
Vineet Guptad79e6782013-01-18 15:12:20 +053057#include <asm/setup.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053058#include <asm/mmu_context.h>
Vineet Guptada1677b2013-05-14 13:28:17 +053059#include <asm/mmu.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053060
Vineet Guptad79e6782013-01-18 15:12:20 +053061/* Need for ARC MMU v2
62 *
63 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
64 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
65 * map into same set, there would be contention for the 2 ways causing severe
66 * Thrashing.
67 *
68 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
69 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
70 * Given this, the thrasing problem should never happen because once the 3
71 * J-TLB entries are created (even though 3rd will knock out one of the prev
72 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
73 *
74 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
75 * This is a simple design for keeping them in sync. So what do we do?
76 * The solution which James came up was pretty neat. It utilised the assoc
77 * of uTLBs by not invalidating always but only when absolutely necessary.
78 *
79 * - Existing TLB commands work as before
80 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
81 * - New command (TLBIVUTLB) to invalidate uTLBs.
82 *
83 * The uTLBs need only be invalidated when pages are being removed from the
84 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
85 * as a result of a miss, the removed entry is still allowed to exist in the
86 * uTLBs as it is still valid and present in the OS page table. This allows the
87 * full associativity of the uTLBs to hide the limited associativity of the main
88 * TLB.
89 *
90 * During a miss handler, the new "TLBWriteNI" command is used to load
91 * entries without clearing the uTLBs.
92 *
93 * When the OS page table is updated, TLB entries that may be associated with a
94 * removed page are removed (flushed) from the TLB using TLBWrite. In this
95 * circumstance, the uTLBs must also be cleared. This is done by using the
96 * existing TLBWrite command. An explicit IVUTLB is also required for those
97 * corner cases when TLBWrite was not executed at all because the corresp
98 * J-TLB entry got evicted/replaced.
99 */
100
Vineet Guptada1677b2013-05-14 13:28:17 +0530101
Vineet Guptaf1f33472013-01-18 15:12:19 +0530102/* A copy of the ASID from the PID reg is kept in asid_cache */
103int asid_cache = FIRST_ASID;
104
105/* ASID to mm struct mapping. We have one extra entry corresponding to
106 * NO_ASID to save us a compare when clearing the mm entry for old asid
107 * see get_new_mmu_context (asm-arc/mmu_context.h)
108 */
109struct mm_struct *asid_mm_map[NUM_ASID + 1];
Vineet Guptacc562d22013-01-18 15:12:19 +0530110
Vineet Guptad79e6782013-01-18 15:12:20 +0530111/*
112 * Utility Routine to erase a J-TLB entry
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530113 * Caller needs to setup Index Reg (manually or via getIndex)
Vineet Guptad79e6782013-01-18 15:12:20 +0530114 */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530115static inline void __tlb_entry_erase(void)
Vineet Guptad79e6782013-01-18 15:12:20 +0530116{
117 write_aux_reg(ARC_REG_TLBPD1, 0);
118 write_aux_reg(ARC_REG_TLBPD0, 0);
119 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
120}
121
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530122static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
123{
124 unsigned int idx;
125
126 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
127
128 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
129 idx = read_aux_reg(ARC_REG_TLBINDEX);
130
131 return idx;
132}
133
Vineet Guptad79e6782013-01-18 15:12:20 +0530134static void tlb_entry_erase(unsigned int vaddr_n_asid)
135{
136 unsigned int idx;
137
138 /* Locate the TLB entry for this vaddr + ASID */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530139 idx = tlb_entry_lkup(vaddr_n_asid);
Vineet Guptad79e6782013-01-18 15:12:20 +0530140
141 /* No error means entry found, zero it out */
142 if (likely(!(idx & TLB_LKUP_ERR))) {
143 __tlb_entry_erase();
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530144 } else {
Vineet Guptad79e6782013-01-18 15:12:20 +0530145 /* Duplicate entry error */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530146 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
147 vaddr_n_asid);
Vineet Guptad79e6782013-01-18 15:12:20 +0530148 }
149}
150
151/****************************************************************************
152 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
153 *
154 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
155 *
156 * utlb_invalidate ( )
157 * -For v2 MMU calls Flush uTLB Cmd
158 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
159 * This is because in v1 TLBWrite itself invalidate uTLBs
160 ***************************************************************************/
161
162static void utlb_invalidate(void)
163{
164#if (CONFIG_ARC_MMU_VER >= 2)
165
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530166#if (CONFIG_ARC_MMU_VER == 2)
Vineet Guptad79e6782013-01-18 15:12:20 +0530167 /* MMU v2 introduced the uTLB Flush command.
168 * There was however an obscure hardware bug, where uTLB flush would
169 * fail when a prior probe for J-TLB (both totally unrelated) would
170 * return lkup err - because the entry didnt exist in MMU.
171 * The Workround was to set Index reg with some valid value, prior to
172 * flush. This was fixed in MMU v3 hence not needed any more
173 */
174 unsigned int idx;
175
176 /* make sure INDEX Reg is valid */
177 idx = read_aux_reg(ARC_REG_TLBINDEX);
178
179 /* If not write some dummy val */
180 if (unlikely(idx & TLB_LKUP_ERR))
181 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
182#endif
183
184 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
185#endif
186
187}
188
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530189static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
190{
191 unsigned int idx;
192
193 /*
194 * First verify if entry for this vaddr+ASID already exists
195 * This also sets up PD0 (vaddr, ASID..) for final commit
196 */
197 idx = tlb_entry_lkup(pd0);
198
199 /*
200 * If Not already present get a free slot from MMU.
201 * Otherwise, Probe would have located the entry and set INDEX Reg
202 * with existing location. This will cause Write CMD to over-write
203 * existing entry with new PD0 and PD1
204 */
205 if (likely(idx & TLB_LKUP_ERR))
206 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
207
208 /* setup the other half of TLB entry (pfn, rwx..) */
209 write_aux_reg(ARC_REG_TLBPD1, pd1);
210
211 /*
212 * Commit the Entry to MMU
213 * It doesnt sound safe to use the TLBWriteNI cmd here
214 * which doesn't flush uTLBs. I'd rather be safe than sorry.
215 */
216 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
217}
218
Vineet Guptad79e6782013-01-18 15:12:20 +0530219/*
220 * Un-conditionally (without lookup) erase the entire MMU contents
221 */
222
223noinline void local_flush_tlb_all(void)
224{
225 unsigned long flags;
226 unsigned int entry;
227 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
228
229 local_irq_save(flags);
230
231 /* Load PD0 and PD1 with template for a Blank Entry */
232 write_aux_reg(ARC_REG_TLBPD1, 0);
233 write_aux_reg(ARC_REG_TLBPD0, 0);
234
235 for (entry = 0; entry < mmu->num_tlb; entry++) {
236 /* write this entry to the TLB */
237 write_aux_reg(ARC_REG_TLBINDEX, entry);
238 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
239 }
240
241 utlb_invalidate();
242
243 local_irq_restore(flags);
244}
245
246/*
247 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
248 */
249noinline void local_flush_tlb_mm(struct mm_struct *mm)
250{
251 /*
252 * Small optimisation courtesy IA64
253 * flush_mm called during fork,exit,munmap etc, multiple times as well.
254 * Only for fork( ) do we need to move parent to a new MMU ctxt,
255 * all other cases are NOPs, hence this check.
256 */
257 if (atomic_read(&mm->mm_users) == 0)
258 return;
259
260 /*
Vineet Gupta3daa48d2013-07-24 13:53:45 -0700261 * - Move to a new ASID, but only if the mm is still wired in
262 * (Android Binder ended up calling this for vma->mm != tsk->mm,
263 * causing h/w - s/w ASID to get out of sync)
264 * - Also get_new_mmu_context() new implementation allocates a new
265 * ASID only if it is not allocated already - so unallocate first
Vineet Guptad79e6782013-01-18 15:12:20 +0530266 */
Vineet Gupta3daa48d2013-07-24 13:53:45 -0700267 destroy_context(mm);
268 if (current->mm == mm)
Vineet Guptad79e6782013-01-18 15:12:20 +0530269 get_new_mmu_context(mm);
270}
271
272/*
273 * Flush a Range of TLB entries for userland.
274 * @start is inclusive, while @end is exclusive
275 * Difference between this and Kernel Range Flush is
276 * -Here the fastest way (if range is too large) is to move to next ASID
277 * without doing any explicit Shootdown
278 * -In case of kernel Flush, entry has to be shot down explictly
279 */
280void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
281 unsigned long end)
282{
283 unsigned long flags;
284 unsigned int asid;
285
286 /* If range @start to @end is more than 32 TLB entries deep,
287 * its better to move to a new ASID rather than searching for
288 * individual entries and then shooting them down
289 *
290 * The calc above is rough, doesn't account for unaligned parts,
291 * since this is heuristics based anyways
292 */
293 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
294 local_flush_tlb_mm(vma->vm_mm);
295 return;
296 }
297
298 /*
299 * @start moved to page start: this alone suffices for checking
300 * loop end condition below, w/o need for aligning @end to end
301 * e.g. 2000 to 4001 will anyhow loop twice
302 */
303 start &= PAGE_MASK;
304
305 local_irq_save(flags);
306 asid = vma->vm_mm->context.asid;
307
308 if (asid != NO_ASID) {
309 while (start < end) {
310 tlb_entry_erase(start | (asid & 0xff));
311 start += PAGE_SIZE;
312 }
313 }
314
315 utlb_invalidate();
316
317 local_irq_restore(flags);
318}
319
320/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
321 * @start, @end interpreted as kvaddr
322 * Interestingly, shared TLB entries can also be flushed using just
323 * @start,@end alone (interpreted as user vaddr), although technically SASID
324 * is also needed. However our smart TLbProbe lookup takes care of that.
325 */
326void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
327{
328 unsigned long flags;
329
330 /* exactly same as above, except for TLB entry not taking ASID */
331
332 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
333 local_flush_tlb_all();
334 return;
335 }
336
337 start &= PAGE_MASK;
338
339 local_irq_save(flags);
340 while (start < end) {
341 tlb_entry_erase(start);
342 start += PAGE_SIZE;
343 }
344
345 utlb_invalidate();
346
347 local_irq_restore(flags);
348}
349
350/*
351 * Delete TLB entry in MMU for a given page (??? address)
352 * NOTE One TLB entry contains translation for single PAGE
353 */
354
355void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
356{
357 unsigned long flags;
358
359 /* Note that it is critical that interrupts are DISABLED between
360 * checking the ASID and using it flush the TLB entry
361 */
362 local_irq_save(flags);
363
364 if (vma->vm_mm->context.asid != NO_ASID) {
365 tlb_entry_erase((page & PAGE_MASK) |
366 (vma->vm_mm->context.asid & 0xff));
367 utlb_invalidate();
368 }
369
370 local_irq_restore(flags);
371}
Vineet Guptacc562d22013-01-18 15:12:19 +0530372
373/*
374 * Routine to create a TLB entry
375 */
376void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
377{
378 unsigned long flags;
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530379 unsigned int asid_or_sasid, rwx;
380 unsigned long pd0, pd1;
Vineet Guptacc562d22013-01-18 15:12:19 +0530381
382 /*
383 * create_tlb() assumes that current->mm == vma->mm, since
384 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
385 * -completes the lazy write to SASID reg (again valid for curr tsk)
386 *
387 * Removing the assumption involves
388 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
389 * -Fix the TLB paranoid debug code to not trigger false negatives.
390 * -More importantly it makes this handler inconsistent with fast-path
391 * TLB Refill handler which always deals with "current"
392 *
393 * Lets see the use cases when current->mm != vma->mm and we land here
394 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
395 * Here VM wants to pre-install a TLB entry for user stack while
396 * current->mm still points to pre-execve mm (hence the condition).
397 * However the stack vaddr is soon relocated (randomization) and
398 * move_page_tables() tries to undo that TLB entry.
399 * Thus not creating TLB entry is not any worse.
400 *
401 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
402 * breakpoint in debugged task. Not creating a TLB now is not
403 * performance critical.
404 *
405 * Both the cases above are not good enough for code churn.
406 */
407 if (current->active_mm != vma->vm_mm)
408 return;
409
410 local_irq_save(flags);
411
412 tlb_paranoid_check(vma->vm_mm->context.asid, address);
413
414 address &= PAGE_MASK;
415
416 /* update this PTE credentials */
417 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
418
Vineet Guptad091fcb2013-06-17 19:44:06 +0530419 /* Create HW TLB(PD0,PD1) from PTE */
Vineet Guptacc562d22013-01-18 15:12:19 +0530420
421 /* ASID for this task */
422 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
423
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530424 pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
Vineet Guptacc562d22013-01-18 15:12:19 +0530425
Vineet Gupta64b703e2013-06-17 18:12:13 +0530426 /*
427 * ARC MMU provides fully orthogonal access bits for K/U mode,
428 * however Linux only saves 1 set to save PTE real-estate
429 * Here we convert 3 PTE bits into 6 MMU bits:
430 * -Kernel only entries have Kr Kw Kx 0 0 0
431 * -User entries have mirrored K and U bits
432 */
433 rwx = pte_val(*ptep) & PTE_BITS_RWX;
434
435 if (pte_val(*ptep) & _PAGE_GLOBAL)
436 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
437 else
438 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
439
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530440 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
Vineet Guptacc562d22013-01-18 15:12:19 +0530441
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530442 tlb_entry_insert(pd0, pd1);
Vineet Guptacc562d22013-01-18 15:12:19 +0530443
444 local_irq_restore(flags);
445}
446
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530447/*
448 * Called at the end of pagefault, for a userspace mapped page
449 * -pre-install the corresponding TLB entry into MMU
Vineet Gupta4102b532013-05-09 21:54:51 +0530450 * -Finalize the delayed D-cache flush of kernel mapping of page due to
451 * flush_dcache_page(), copy_user_page()
452 *
453 * Note that flush (when done) involves both WBACK - so physical page is
454 * in sync as well as INV - so any non-congruent aliases don't remain
Vineet Guptacc562d22013-01-18 15:12:19 +0530455 */
Vineet Gupta24603fd2013-04-11 18:36:35 +0530456void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
Vineet Guptacc562d22013-01-18 15:12:19 +0530457 pte_t *ptep)
458{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530459 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
Vineet Gupta4102b532013-05-09 21:54:51 +0530460 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
Vineet Gupta29b93c62013-05-19 15:51:03 +0530461 struct page *page = pfn_to_page(pte_pfn(*ptep));
Vineet Guptacc562d22013-01-18 15:12:19 +0530462
Vineet Gupta24603fd2013-04-11 18:36:35 +0530463 create_tlb(vma, vaddr, ptep);
464
Vineet Gupta29b93c62013-05-19 15:51:03 +0530465 if (page == ZERO_PAGE(0)) {
466 return;
467 }
468
Vineet Gupta4102b532013-05-09 21:54:51 +0530469 /*
470 * Exec page : Independent of aliasing/page-color considerations,
471 * since icache doesn't snoop dcache on ARC, any dirty
472 * K-mapping of a code page needs to be wback+inv so that
473 * icache fetch by userspace sees code correctly.
474 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
475 * so userspace sees the right data.
476 * (Avoids the flush for Non-exec + congruent mapping case)
477 */
Vineet Gupta3e879742013-05-22 18:38:10 +0530478 if ((vma->vm_flags & VM_EXEC) ||
479 addr_not_cache_congruent(paddr, vaddr)) {
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530480
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530481 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530482 if (dirty) {
Vineet Gupta4102b532013-05-09 21:54:51 +0530483 /* wback + inv dcache lines */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530484 __flush_dcache_page(paddr, paddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530485
486 /* invalidate any existing icache lines */
487 if (vma->vm_flags & VM_EXEC)
488 __inv_icache_page(paddr, vaddr);
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530489 }
Vineet Gupta24603fd2013-04-11 18:36:35 +0530490 }
Vineet Guptacc562d22013-01-18 15:12:19 +0530491}
492
493/* Read the Cache Build Confuration Registers, Decode them and save into
494 * the cpuinfo structure for later use.
495 * No Validation is done here, simply read/convert the BCRs
496 */
Paul Gortmakerce759952013-06-24 15:30:15 -0400497void read_decode_mmu_bcr(void)
Vineet Guptacc562d22013-01-18 15:12:19 +0530498{
Vineet Guptacc562d22013-01-18 15:12:19 +0530499 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
Vineet Guptada1677b2013-05-14 13:28:17 +0530500 unsigned int tmp;
501 struct bcr_mmu_1_2 {
502#ifdef CONFIG_CPU_BIG_ENDIAN
503 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
504#else
505 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
506#endif
507 } *mmu2;
508
509 struct bcr_mmu_3 {
510#ifdef CONFIG_CPU_BIG_ENDIAN
511 unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
512 u_itlb:4, u_dtlb:4;
513#else
514 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
515 ways:4, ver:8;
516#endif
517 } *mmu3;
Vineet Guptacc562d22013-01-18 15:12:19 +0530518
519 tmp = read_aux_reg(ARC_REG_MMU_BCR);
520 mmu->ver = (tmp >> 24);
521
522 if (mmu->ver <= 2) {
523 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
524 mmu->pg_sz = PAGE_SIZE;
525 mmu->sets = 1 << mmu2->sets;
526 mmu->ways = 1 << mmu2->ways;
527 mmu->u_dtlb = mmu2->u_dtlb;
528 mmu->u_itlb = mmu2->u_itlb;
529 } else {
530 mmu3 = (struct bcr_mmu_3 *)&tmp;
531 mmu->pg_sz = 512 << mmu3->pg_sz;
532 mmu->sets = 1 << mmu3->sets;
533 mmu->ways = 1 << mmu3->ways;
534 mmu->u_dtlb = mmu3->u_dtlb;
535 mmu->u_itlb = mmu3->u_itlb;
536 }
537
538 mmu->num_tlb = mmu->sets * mmu->ways;
539}
540
Vineet Guptaaf617422013-01-18 15:12:24 +0530541char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
542{
543 int n = 0;
Noam Camuse3edeb62013-02-26 09:22:46 +0200544 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
Vineet Guptaaf617422013-01-18 15:12:24 +0530545
546 n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
547 p_mmu->ver, TO_KB(p_mmu->pg_sz));
548
549 n += scnprintf(buf + n, len - n,
550 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
551 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
552 p_mmu->u_dtlb, p_mmu->u_itlb,
Vineet Gupta82357032013-06-01 12:55:42 +0530553 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : "");
Vineet Guptaaf617422013-01-18 15:12:24 +0530554
555 return buf;
556}
557
Paul Gortmakerce759952013-06-24 15:30:15 -0400558void arc_mmu_init(void)
Vineet Guptacc562d22013-01-18 15:12:19 +0530559{
Vineet Guptaaf617422013-01-18 15:12:24 +0530560 char str[256];
561 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
562
563 printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
564
565 /* For efficiency sake, kernel is compile time built for a MMU ver
566 * This must match the hardware it is running on.
567 * Linux built for MMU V2, if run on MMU V1 will break down because V1
568 * hardware doesn't understand cmds such as WriteNI, or IVUTLB
569 * On the other hand, Linux built for V1 if run on MMU V2 will do
570 * un-needed workarounds to prevent memcpy thrashing.
571 * Similarly MMU V3 has new features which won't work on older MMU
572 */
573 if (mmu->ver != CONFIG_ARC_MMU_VER) {
574 panic("MMU ver %d doesn't match kernel built for %d...\n",
575 mmu->ver, CONFIG_ARC_MMU_VER);
576 }
577
578 if (mmu->pg_sz != PAGE_SIZE)
579 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
580
Vineet Guptacc562d22013-01-18 15:12:19 +0530581 /* Enable the MMU */
582 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
Vineet Gupta41195d22013-01-18 15:12:23 +0530583
584 /* In smp we use this reg for interrupt 1 scratch */
585#ifndef CONFIG_SMP
586 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
587 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
588#endif
Vineet Guptacc562d22013-01-18 15:12:19 +0530589}
590
591/*
592 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
593 * The mapping is Column-first.
594 * --------------------- -----------
595 * |way0|way1|way2|way3| |way0|way1|
596 * --------------------- -----------
597 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
598 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
599 * ~ ~ ~ ~
600 * [set127] | 508| 509| 510| 511| | 254| 255|
601 * --------------------- -----------
602 * For normal operations we don't(must not) care how above works since
603 * MMU cmd getIndex(vaddr) abstracts that out.
604 * However for walking WAYS of a SET, we need to know this
605 */
606#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
607
608/* Handling of Duplicate PD (TLB entry) in MMU.
609 * -Could be due to buggy customer tapeouts or obscure kernel bugs
610 * -MMU complaints not at the time of duplicate PD installation, but at the
611 * time of lookup matching multiple ways.
612 * -Ideally these should never happen - but if they do - workaround by deleting
613 * the duplicate one.
614 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
615 */
616volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
617
618void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
619 struct pt_regs *regs)
620{
621 int set, way, n;
622 unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
623 unsigned long flags, is_valid;
624 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
625
626 local_irq_save(flags);
627
628 /* re-enable the MMU */
629 write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
630
631 /* loop thru all sets of TLB */
632 for (set = 0; set < mmu->sets; set++) {
633
634 /* read out all the ways of current set */
635 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
636 write_aux_reg(ARC_REG_TLBINDEX,
637 SET_WAY_TO_IDX(mmu, set, way));
638 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
639 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
640 pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
641 is_valid |= pd0[way] & _PAGE_PRESENT;
642 }
643
644 /* If all the WAYS in SET are empty, skip to next SET */
645 if (!is_valid)
646 continue;
647
648 /* Scan the set for duplicate ways: needs a nested loop */
649 for (way = 0; way < mmu->ways; way++) {
650 if (!pd0[way])
651 continue;
652
653 for (n = way + 1; n < mmu->ways; n++) {
654 if ((pd0[way] & PAGE_MASK) ==
655 (pd0[n] & PAGE_MASK)) {
656
657 if (dup_pd_verbose) {
658 pr_info("Duplicate PD's @"
659 "[%d:%d]/[%d:%d]\n",
660 set, way, set, n);
661 pr_info("TLBPD0[%u]: %08x\n",
662 way, pd0[way]);
663 }
664
665 /*
666 * clear entry @way and not @n. This is
667 * critical to our optimised loop
668 */
669 pd0[way] = pd1[way] = 0;
670 write_aux_reg(ARC_REG_TLBINDEX,
671 SET_WAY_TO_IDX(mmu, set, way));
672 __tlb_entry_erase();
673 }
674 }
675 }
676 }
677
678 local_irq_restore(flags);
679}
680
681/***********************************************************************
682 * Diagnostic Routines
683 * -Called from Low Level TLB Hanlders if things don;t look good
684 **********************************************************************/
685
686#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
687
688/*
689 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
690 * don't match
691 */
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530692void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
Vineet Guptacc562d22013-01-18 15:12:19 +0530693{
Vineet Guptacc562d22013-01-18 15:12:19 +0530694 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530695 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
Vineet Guptacc562d22013-01-18 15:12:19 +0530696
697 __asm__ __volatile__("flag 1");
698}
699
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530700void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
Vineet Guptacc562d22013-01-18 15:12:19 +0530701{
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530702 unsigned int mmu_asid;
Vineet Guptacc562d22013-01-18 15:12:19 +0530703
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530704 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
Vineet Guptacc562d22013-01-18 15:12:19 +0530705
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530706 /*
707 * At the time of a TLB miss/installation
708 * - HW version needs to match SW version
709 * - SW needs to have a valid ASID
710 */
711 if (addr < 0x70000000 &&
712 ((mmu_asid != mm_asid) || (mm_asid == NO_ASID)))
713 print_asid_mismatch(mm_asid, mmu_asid, 0);
Vineet Guptacc562d22013-01-18 15:12:19 +0530714}
715#endif