blob: 2a30c91f7977fdc0e27024cae6f8b28527071ada [file] [log] [blame]
Vineet Guptaf1f33472013-01-18 15:12:19 +05301/*
2 * TLB Management (flush/create/diagnostics) for ARC700
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
Vineet Guptad79e6782013-01-18 15:12:20 +05309 *
10 * vineetg: Aug 2011
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
12 *
13 * vineetg: May 2011
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
19 *
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
23 *
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
31 *
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
34 *
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
39 * TLBIVUTLB cmd
40 *
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
46 *
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
49 * flush is more than the size of TLB itself.
50 *
51 * Rahul Trivedi : Codito Technologies 2004
Vineet Guptaf1f33472013-01-18 15:12:19 +053052 */
53
54#include <linux/module.h>
Vineet Gupta483e9bcb2013-07-01 18:12:28 +053055#include <linux/bug.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053056#include <asm/arcregs.h>
Vineet Guptad79e6782013-01-18 15:12:20 +053057#include <asm/setup.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053058#include <asm/mmu_context.h>
Vineet Guptada1677b2013-05-14 13:28:17 +053059#include <asm/mmu.h>
Vineet Guptaf1f33472013-01-18 15:12:19 +053060
Vineet Guptad79e6782013-01-18 15:12:20 +053061/* Need for ARC MMU v2
62 *
63 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
64 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
65 * map into same set, there would be contention for the 2 ways causing severe
66 * Thrashing.
67 *
68 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
69 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
70 * Given this, the thrasing problem should never happen because once the 3
71 * J-TLB entries are created (even though 3rd will knock out one of the prev
72 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
73 *
74 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
75 * This is a simple design for keeping them in sync. So what do we do?
76 * The solution which James came up was pretty neat. It utilised the assoc
77 * of uTLBs by not invalidating always but only when absolutely necessary.
78 *
79 * - Existing TLB commands work as before
80 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
81 * - New command (TLBIVUTLB) to invalidate uTLBs.
82 *
83 * The uTLBs need only be invalidated when pages are being removed from the
84 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
85 * as a result of a miss, the removed entry is still allowed to exist in the
86 * uTLBs as it is still valid and present in the OS page table. This allows the
87 * full associativity of the uTLBs to hide the limited associativity of the main
88 * TLB.
89 *
90 * During a miss handler, the new "TLBWriteNI" command is used to load
91 * entries without clearing the uTLBs.
92 *
93 * When the OS page table is updated, TLB entries that may be associated with a
94 * removed page are removed (flushed) from the TLB using TLBWrite. In this
95 * circumstance, the uTLBs must also be cleared. This is done by using the
96 * existing TLBWrite command. An explicit IVUTLB is also required for those
97 * corner cases when TLBWrite was not executed at all because the corresp
98 * J-TLB entry got evicted/replaced.
99 */
100
Vineet Guptada1677b2013-05-14 13:28:17 +0530101
Vineet Guptaf1f33472013-01-18 15:12:19 +0530102/* A copy of the ASID from the PID reg is kept in asid_cache */
Vineet Gupta63eca942013-08-23 19:16:34 +0530103DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
Vineet Guptacc562d22013-01-18 15:12:19 +0530104
Vineet Guptad79e6782013-01-18 15:12:20 +0530105/*
106 * Utility Routine to erase a J-TLB entry
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530107 * Caller needs to setup Index Reg (manually or via getIndex)
Vineet Guptad79e6782013-01-18 15:12:20 +0530108 */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530109static inline void __tlb_entry_erase(void)
Vineet Guptad79e6782013-01-18 15:12:20 +0530110{
111 write_aux_reg(ARC_REG_TLBPD1, 0);
112 write_aux_reg(ARC_REG_TLBPD0, 0);
113 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
114}
115
Vineet Guptad7a512b2015-04-06 17:22:39 +0530116#if (CONFIG_ARC_MMU_VER < 4)
117
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530118static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
119{
120 unsigned int idx;
121
122 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
123
124 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
125 idx = read_aux_reg(ARC_REG_TLBINDEX);
126
127 return idx;
128}
129
Vineet Guptad79e6782013-01-18 15:12:20 +0530130static void tlb_entry_erase(unsigned int vaddr_n_asid)
131{
132 unsigned int idx;
133
134 /* Locate the TLB entry for this vaddr + ASID */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530135 idx = tlb_entry_lkup(vaddr_n_asid);
Vineet Guptad79e6782013-01-18 15:12:20 +0530136
137 /* No error means entry found, zero it out */
138 if (likely(!(idx & TLB_LKUP_ERR))) {
139 __tlb_entry_erase();
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530140 } else {
Vineet Guptad79e6782013-01-18 15:12:20 +0530141 /* Duplicate entry error */
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530142 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
143 vaddr_n_asid);
Vineet Guptad79e6782013-01-18 15:12:20 +0530144 }
145}
146
147/****************************************************************************
148 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
149 *
150 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
151 *
152 * utlb_invalidate ( )
153 * -For v2 MMU calls Flush uTLB Cmd
154 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
155 * This is because in v1 TLBWrite itself invalidate uTLBs
156 ***************************************************************************/
157
158static void utlb_invalidate(void)
159{
160#if (CONFIG_ARC_MMU_VER >= 2)
161
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530162#if (CONFIG_ARC_MMU_VER == 2)
Vineet Guptad79e6782013-01-18 15:12:20 +0530163 /* MMU v2 introduced the uTLB Flush command.
164 * There was however an obscure hardware bug, where uTLB flush would
165 * fail when a prior probe for J-TLB (both totally unrelated) would
166 * return lkup err - because the entry didnt exist in MMU.
167 * The Workround was to set Index reg with some valid value, prior to
168 * flush. This was fixed in MMU v3 hence not needed any more
169 */
170 unsigned int idx;
171
172 /* make sure INDEX Reg is valid */
173 idx = read_aux_reg(ARC_REG_TLBINDEX);
174
175 /* If not write some dummy val */
176 if (unlikely(idx & TLB_LKUP_ERR))
177 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
178#endif
179
180 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
181#endif
182
183}
184
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530185static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
186{
187 unsigned int idx;
188
189 /*
190 * First verify if entry for this vaddr+ASID already exists
191 * This also sets up PD0 (vaddr, ASID..) for final commit
192 */
193 idx = tlb_entry_lkup(pd0);
194
195 /*
196 * If Not already present get a free slot from MMU.
197 * Otherwise, Probe would have located the entry and set INDEX Reg
198 * with existing location. This will cause Write CMD to over-write
199 * existing entry with new PD0 and PD1
200 */
201 if (likely(idx & TLB_LKUP_ERR))
202 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
203
204 /* setup the other half of TLB entry (pfn, rwx..) */
205 write_aux_reg(ARC_REG_TLBPD1, pd1);
206
207 /*
208 * Commit the Entry to MMU
209 * It doesnt sound safe to use the TLBWriteNI cmd here
210 * which doesn't flush uTLBs. I'd rather be safe than sorry.
211 */
212 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
213}
214
Vineet Guptad7a512b2015-04-06 17:22:39 +0530215#else /* CONFIG_ARC_MMU_VER >= 4) */
216
217static void utlb_invalidate(void)
218{
219 /* No need since uTLB is always in sync with JTLB */
220}
221
222static void tlb_entry_erase(unsigned int vaddr_n_asid)
223{
224 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
225 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
226}
227
228static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
229{
230 write_aux_reg(ARC_REG_TLBPD0, pd0);
231 write_aux_reg(ARC_REG_TLBPD1, pd1);
232 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
233}
234
235#endif
236
Vineet Guptad79e6782013-01-18 15:12:20 +0530237/*
238 * Un-conditionally (without lookup) erase the entire MMU contents
239 */
240
241noinline void local_flush_tlb_all(void)
242{
Vineet Guptab598e172015-10-02 12:25:35 +0530243 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
Vineet Guptad79e6782013-01-18 15:12:20 +0530244 unsigned long flags;
245 unsigned int entry;
Vineet Guptab598e172015-10-02 12:25:35 +0530246 int num_tlb = mmu->sets * mmu->ways;
Vineet Guptad79e6782013-01-18 15:12:20 +0530247
248 local_irq_save(flags);
249
250 /* Load PD0 and PD1 with template for a Blank Entry */
251 write_aux_reg(ARC_REG_TLBPD1, 0);
252 write_aux_reg(ARC_REG_TLBPD0, 0);
253
Vineet Guptab598e172015-10-02 12:25:35 +0530254 for (entry = 0; entry < num_tlb; entry++) {
Vineet Guptad79e6782013-01-18 15:12:20 +0530255 /* write this entry to the TLB */
256 write_aux_reg(ARC_REG_TLBINDEX, entry);
257 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
258 }
259
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530260 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
261 const int stlb_idx = 0x800;
262
263 /* Blank sTLB entry */
264 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
265
266 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
267 write_aux_reg(ARC_REG_TLBINDEX, entry);
268 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
269 }
270 }
271
Vineet Guptad79e6782013-01-18 15:12:20 +0530272 utlb_invalidate();
273
274 local_irq_restore(flags);
275}
276
277/*
278 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
279 */
280noinline void local_flush_tlb_mm(struct mm_struct *mm)
281{
282 /*
283 * Small optimisation courtesy IA64
284 * flush_mm called during fork,exit,munmap etc, multiple times as well.
285 * Only for fork( ) do we need to move parent to a new MMU ctxt,
286 * all other cases are NOPs, hence this check.
287 */
288 if (atomic_read(&mm->mm_users) == 0)
289 return;
290
291 /*
Vineet Gupta3daa48d2013-07-24 13:53:45 -0700292 * - Move to a new ASID, but only if the mm is still wired in
293 * (Android Binder ended up calling this for vma->mm != tsk->mm,
294 * causing h/w - s/w ASID to get out of sync)
295 * - Also get_new_mmu_context() new implementation allocates a new
296 * ASID only if it is not allocated already - so unallocate first
Vineet Guptad79e6782013-01-18 15:12:20 +0530297 */
Vineet Gupta3daa48d2013-07-24 13:53:45 -0700298 destroy_context(mm);
299 if (current->mm == mm)
Vineet Guptad79e6782013-01-18 15:12:20 +0530300 get_new_mmu_context(mm);
301}
302
303/*
304 * Flush a Range of TLB entries for userland.
305 * @start is inclusive, while @end is exclusive
306 * Difference between this and Kernel Range Flush is
307 * -Here the fastest way (if range is too large) is to move to next ASID
308 * without doing any explicit Shootdown
309 * -In case of kernel Flush, entry has to be shot down explictly
310 */
311void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
312 unsigned long end)
313{
Vineet Gupta63eca942013-08-23 19:16:34 +0530314 const unsigned int cpu = smp_processor_id();
Vineet Guptad79e6782013-01-18 15:12:20 +0530315 unsigned long flags;
Vineet Guptad79e6782013-01-18 15:12:20 +0530316
317 /* If range @start to @end is more than 32 TLB entries deep,
318 * its better to move to a new ASID rather than searching for
319 * individual entries and then shooting them down
320 *
321 * The calc above is rough, doesn't account for unaligned parts,
322 * since this is heuristics based anyways
323 */
324 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
325 local_flush_tlb_mm(vma->vm_mm);
326 return;
327 }
328
329 /*
330 * @start moved to page start: this alone suffices for checking
331 * loop end condition below, w/o need for aligning @end to end
332 * e.g. 2000 to 4001 will anyhow loop twice
333 */
334 start &= PAGE_MASK;
335
336 local_irq_save(flags);
Vineet Guptad79e6782013-01-18 15:12:20 +0530337
Vineet Gupta63eca942013-08-23 19:16:34 +0530338 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
Vineet Guptad79e6782013-01-18 15:12:20 +0530339 while (start < end) {
Vineet Gupta63eca942013-08-23 19:16:34 +0530340 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
Vineet Guptad79e6782013-01-18 15:12:20 +0530341 start += PAGE_SIZE;
342 }
343 }
344
345 utlb_invalidate();
346
347 local_irq_restore(flags);
348}
349
350/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
351 * @start, @end interpreted as kvaddr
352 * Interestingly, shared TLB entries can also be flushed using just
353 * @start,@end alone (interpreted as user vaddr), although technically SASID
354 * is also needed. However our smart TLbProbe lookup takes care of that.
355 */
356void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
357{
358 unsigned long flags;
359
360 /* exactly same as above, except for TLB entry not taking ASID */
361
362 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
363 local_flush_tlb_all();
364 return;
365 }
366
367 start &= PAGE_MASK;
368
369 local_irq_save(flags);
370 while (start < end) {
371 tlb_entry_erase(start);
372 start += PAGE_SIZE;
373 }
374
375 utlb_invalidate();
376
377 local_irq_restore(flags);
378}
379
380/*
381 * Delete TLB entry in MMU for a given page (??? address)
382 * NOTE One TLB entry contains translation for single PAGE
383 */
384
385void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
386{
Vineet Gupta63eca942013-08-23 19:16:34 +0530387 const unsigned int cpu = smp_processor_id();
Vineet Guptad79e6782013-01-18 15:12:20 +0530388 unsigned long flags;
389
390 /* Note that it is critical that interrupts are DISABLED between
391 * checking the ASID and using it flush the TLB entry
392 */
393 local_irq_save(flags);
394
Vineet Gupta63eca942013-08-23 19:16:34 +0530395 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
396 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
Vineet Guptad79e6782013-01-18 15:12:20 +0530397 utlb_invalidate();
398 }
399
400 local_irq_restore(flags);
401}
Vineet Guptacc562d22013-01-18 15:12:19 +0530402
Vineet Gupta5ea72a92013-10-27 14:49:02 +0530403#ifdef CONFIG_SMP
404
405struct tlb_args {
406 struct vm_area_struct *ta_vma;
407 unsigned long ta_start;
408 unsigned long ta_end;
409};
410
411static inline void ipi_flush_tlb_page(void *arg)
412{
413 struct tlb_args *ta = arg;
414
415 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
416}
417
418static inline void ipi_flush_tlb_range(void *arg)
419{
420 struct tlb_args *ta = arg;
421
422 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
423}
424
Vineet Guptac7119d52015-10-15 08:04:45 +0530425#ifdef CONFIG_TRANSPARENT_HUGEPAGE
426static inline void ipi_flush_pmd_tlb_range(void *arg)
427{
428 struct tlb_args *ta = arg;
429
430 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
431}
432#endif
433
Vineet Gupta5ea72a92013-10-27 14:49:02 +0530434static inline void ipi_flush_tlb_kernel_range(void *arg)
435{
436 struct tlb_args *ta = (struct tlb_args *)arg;
437
438 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
439}
440
441void flush_tlb_all(void)
442{
443 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
444}
445
446void flush_tlb_mm(struct mm_struct *mm)
447{
448 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
449 mm, 1);
450}
451
452void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
453{
454 struct tlb_args ta = {
455 .ta_vma = vma,
456 .ta_start = uaddr
457 };
458
459 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
460}
461
462void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
463 unsigned long end)
464{
465 struct tlb_args ta = {
466 .ta_vma = vma,
467 .ta_start = start,
468 .ta_end = end
469 };
470
471 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
472}
473
Vineet Guptac7119d52015-10-15 08:04:45 +0530474#ifdef CONFIG_TRANSPARENT_HUGEPAGE
475void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
476 unsigned long end)
477{
478 struct tlb_args ta = {
479 .ta_vma = vma,
480 .ta_start = start,
481 .ta_end = end
482 };
483
484 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
485}
486#endif
487
Vineet Gupta5ea72a92013-10-27 14:49:02 +0530488void flush_tlb_kernel_range(unsigned long start, unsigned long end)
489{
490 struct tlb_args ta = {
491 .ta_start = start,
492 .ta_end = end
493 };
494
495 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
496}
497#endif
498
Vineet Guptacc562d22013-01-18 15:12:19 +0530499/*
500 * Routine to create a TLB entry
501 */
502void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
503{
504 unsigned long flags;
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530505 unsigned int asid_or_sasid, rwx;
506 unsigned long pd0, pd1;
Vineet Guptacc562d22013-01-18 15:12:19 +0530507
508 /*
509 * create_tlb() assumes that current->mm == vma->mm, since
510 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
511 * -completes the lazy write to SASID reg (again valid for curr tsk)
512 *
513 * Removing the assumption involves
514 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
515 * -Fix the TLB paranoid debug code to not trigger false negatives.
516 * -More importantly it makes this handler inconsistent with fast-path
517 * TLB Refill handler which always deals with "current"
518 *
519 * Lets see the use cases when current->mm != vma->mm and we land here
520 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
521 * Here VM wants to pre-install a TLB entry for user stack while
522 * current->mm still points to pre-execve mm (hence the condition).
523 * However the stack vaddr is soon relocated (randomization) and
524 * move_page_tables() tries to undo that TLB entry.
525 * Thus not creating TLB entry is not any worse.
526 *
527 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
528 * breakpoint in debugged task. Not creating a TLB now is not
529 * performance critical.
530 *
531 * Both the cases above are not good enough for code churn.
532 */
533 if (current->active_mm != vma->vm_mm)
534 return;
535
536 local_irq_save(flags);
537
Vineet Gupta63eca942013-08-23 19:16:34 +0530538 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address);
Vineet Guptacc562d22013-01-18 15:12:19 +0530539
540 address &= PAGE_MASK;
541
542 /* update this PTE credentials */
543 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
544
Vineet Guptad091fcb2013-06-17 19:44:06 +0530545 /* Create HW TLB(PD0,PD1) from PTE */
Vineet Guptacc562d22013-01-18 15:12:19 +0530546
547 /* ASID for this task */
548 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
549
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530550 pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
Vineet Guptacc562d22013-01-18 15:12:19 +0530551
Vineet Gupta64b703e2013-06-17 18:12:13 +0530552 /*
553 * ARC MMU provides fully orthogonal access bits for K/U mode,
554 * however Linux only saves 1 set to save PTE real-estate
555 * Here we convert 3 PTE bits into 6 MMU bits:
556 * -Kernel only entries have Kr Kw Kx 0 0 0
557 * -User entries have mirrored K and U bits
558 */
559 rwx = pte_val(*ptep) & PTE_BITS_RWX;
560
561 if (pte_val(*ptep) & _PAGE_GLOBAL)
562 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
563 else
564 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
565
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530566 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
Vineet Guptacc562d22013-01-18 15:12:19 +0530567
Vineet Gupta483e9bcb2013-07-01 18:12:28 +0530568 tlb_entry_insert(pd0, pd1);
Vineet Guptacc562d22013-01-18 15:12:19 +0530569
570 local_irq_restore(flags);
571}
572
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530573/*
574 * Called at the end of pagefault, for a userspace mapped page
575 * -pre-install the corresponding TLB entry into MMU
Vineet Gupta4102b532013-05-09 21:54:51 +0530576 * -Finalize the delayed D-cache flush of kernel mapping of page due to
577 * flush_dcache_page(), copy_user_page()
578 *
579 * Note that flush (when done) involves both WBACK - so physical page is
580 * in sync as well as INV - so any non-congruent aliases don't remain
Vineet Guptacc562d22013-01-18 15:12:19 +0530581 */
Vineet Gupta24603fd2013-04-11 18:36:35 +0530582void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
Vineet Guptacc562d22013-01-18 15:12:19 +0530583 pte_t *ptep)
584{
Vineet Gupta24603fd2013-04-11 18:36:35 +0530585 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
Vineet Gupta4102b532013-05-09 21:54:51 +0530586 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
Vineet Gupta29b93c62013-05-19 15:51:03 +0530587 struct page *page = pfn_to_page(pte_pfn(*ptep));
Vineet Guptacc562d22013-01-18 15:12:19 +0530588
Vineet Gupta24603fd2013-04-11 18:36:35 +0530589 create_tlb(vma, vaddr, ptep);
590
Vineet Gupta29b93c62013-05-19 15:51:03 +0530591 if (page == ZERO_PAGE(0)) {
592 return;
593 }
594
Vineet Gupta4102b532013-05-09 21:54:51 +0530595 /*
596 * Exec page : Independent of aliasing/page-color considerations,
597 * since icache doesn't snoop dcache on ARC, any dirty
598 * K-mapping of a code page needs to be wback+inv so that
599 * icache fetch by userspace sees code correctly.
600 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
601 * so userspace sees the right data.
602 * (Avoids the flush for Non-exec + congruent mapping case)
603 */
Vineet Gupta3e879742013-05-22 18:38:10 +0530604 if ((vma->vm_flags & VM_EXEC) ||
605 addr_not_cache_congruent(paddr, vaddr)) {
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530606
Vineet Gupta2ed21da2013-05-13 17:23:58 +0530607 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530608 if (dirty) {
Vineet Gupta4102b532013-05-09 21:54:51 +0530609 /* wback + inv dcache lines */
Vineet Gupta6ec18a82013-05-09 15:10:18 +0530610 __flush_dcache_page(paddr, paddr);
Vineet Gupta4102b532013-05-09 21:54:51 +0530611
612 /* invalidate any existing icache lines */
613 if (vma->vm_flags & VM_EXEC)
614 __inv_icache_page(paddr, vaddr);
Vineet Guptaeacd0e952013-04-16 14:10:48 +0530615 }
Vineet Gupta24603fd2013-04-11 18:36:35 +0530616 }
Vineet Guptacc562d22013-01-18 15:12:19 +0530617}
618
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530619#ifdef CONFIG_TRANSPARENT_HUGEPAGE
620
621/*
622 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
623 * support.
624 *
625 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
626 * new bit "SZ" in TLB page desciptor to distinguish between them.
627 * Super Page size is configurable in hardware (4K to 16M), but fixed once
628 * RTL builds.
629 *
630 * The exact THP size a Linx configuration will support is a function of:
631 * - MMU page size (typical 8K, RTL fixed)
632 * - software page walker address split between PGD:PTE:PFN (typical
633 * 11:8:13, but can be changed with 1 line)
634 * So for above default, THP size supported is 8K * (2^8) = 2M
635 *
636 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
637 * reduces to 1 level (as PTE is folded into PGD and canonically referred
638 * to as PMD).
639 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
640 */
641
642void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
643 pmd_t *pmd)
644{
645 pte_t pte = __pte(pmd_val(*pmd));
646 update_mmu_cache(vma, addr, &pte);
647}
648
649void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
650 pgtable_t pgtable)
651{
652 struct list_head *lh = (struct list_head *) pgtable;
653
654 assert_spin_locked(&mm->page_table_lock);
655
656 /* FIFO */
657 if (!pmd_huge_pte(mm, pmdp))
658 INIT_LIST_HEAD(lh);
659 else
660 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
661 pmd_huge_pte(mm, pmdp) = pgtable;
662}
663
664pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
665{
666 struct list_head *lh;
667 pgtable_t pgtable;
668
669 assert_spin_locked(&mm->page_table_lock);
670
671 pgtable = pmd_huge_pte(mm, pmdp);
672 lh = (struct list_head *) pgtable;
673 if (list_empty(lh))
674 pmd_huge_pte(mm, pmdp) = NULL;
675 else {
676 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
677 list_del(lh);
678 }
679
680 pte_val(pgtable[0]) = 0;
681 pte_val(pgtable[1]) = 0;
682
683 return pgtable;
684}
685
Vineet Guptac7119d52015-10-15 08:04:45 +0530686void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
687 unsigned long end)
Vineet Gupta722fe8f2015-02-27 19:36:35 +0530688{
689 unsigned int cpu;
690 unsigned long flags;
691
692 local_irq_save(flags);
693
694 cpu = smp_processor_id();
695
696 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
697 unsigned int asid = hw_pid(vma->vm_mm, cpu);
698
699 /* No need to loop here: this will always be for 1 Huge Page */
700 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
701 }
702
703 local_irq_restore(flags);
704}
705
Vineet Guptafe6c1b82014-07-08 18:43:47 +0530706#endif
707
Vineet Guptacc562d22013-01-18 15:12:19 +0530708/* Read the Cache Build Confuration Registers, Decode them and save into
709 * the cpuinfo structure for later use.
710 * No Validation is done here, simply read/convert the BCRs
711 */
Paul Gortmakerce759952013-06-24 15:30:15 -0400712void read_decode_mmu_bcr(void)
Vineet Guptacc562d22013-01-18 15:12:19 +0530713{
Vineet Guptacc562d22013-01-18 15:12:19 +0530714 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
Vineet Guptada1677b2013-05-14 13:28:17 +0530715 unsigned int tmp;
716 struct bcr_mmu_1_2 {
717#ifdef CONFIG_CPU_BIG_ENDIAN
718 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
719#else
720 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
721#endif
722 } *mmu2;
723
724 struct bcr_mmu_3 {
725#ifdef CONFIG_CPU_BIG_ENDIAN
Vineet Guptad0890ea2015-10-02 19:24:20 +0530726 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
Vineet Guptada1677b2013-05-14 13:28:17 +0530727 u_itlb:4, u_dtlb:4;
728#else
Vineet Guptad0890ea2015-10-02 19:24:20 +0530729 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
Vineet Guptada1677b2013-05-14 13:28:17 +0530730 ways:4, ver:8;
731#endif
732 } *mmu3;
Vineet Guptacc562d22013-01-18 15:12:19 +0530733
Vineet Guptad7a512b2015-04-06 17:22:39 +0530734 struct bcr_mmu_4 {
735#ifdef CONFIG_CPU_BIG_ENDIAN
736 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
737 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
738#else
739 /* DTLB ITLB JES JE JA */
740 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
741 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
742#endif
743 } *mmu4;
744
Vineet Guptacc562d22013-01-18 15:12:19 +0530745 tmp = read_aux_reg(ARC_REG_MMU_BCR);
746 mmu->ver = (tmp >> 24);
747
748 if (mmu->ver <= 2) {
749 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
Vineet Guptad0890ea2015-10-02 19:24:20 +0530750 mmu->pg_sz_k = TO_KB(0x2000);
Vineet Guptacc562d22013-01-18 15:12:19 +0530751 mmu->sets = 1 << mmu2->sets;
752 mmu->ways = 1 << mmu2->ways;
753 mmu->u_dtlb = mmu2->u_dtlb;
754 mmu->u_itlb = mmu2->u_itlb;
Vineet Guptad7a512b2015-04-06 17:22:39 +0530755 } else if (mmu->ver == 3) {
Vineet Guptacc562d22013-01-18 15:12:19 +0530756 mmu3 = (struct bcr_mmu_3 *)&tmp;
Vineet Gupta40b552d2015-02-13 18:33:47 +0530757 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
Vineet Guptacc562d22013-01-18 15:12:19 +0530758 mmu->sets = 1 << mmu3->sets;
759 mmu->ways = 1 << mmu3->ways;
760 mmu->u_dtlb = mmu3->u_dtlb;
761 mmu->u_itlb = mmu3->u_itlb;
Vineet Guptad0890ea2015-10-02 19:24:20 +0530762 mmu->sasid = mmu3->sasid;
Vineet Guptad7a512b2015-04-06 17:22:39 +0530763 } else {
764 mmu4 = (struct bcr_mmu_4 *)&tmp;
765 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
766 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
767 mmu->sets = 64 << mmu4->n_entry;
768 mmu->ways = mmu4->n_ways * 2;
769 mmu->u_dtlb = mmu4->u_dtlb * 4;
770 mmu->u_itlb = mmu4->u_itlb * 4;
Vineet Guptad0890ea2015-10-02 19:24:20 +0530771 mmu->sasid = mmu4->sasid;
772 mmu->pae = mmu4->pae;
Vineet Guptacc562d22013-01-18 15:12:19 +0530773 }
Vineet Guptacc562d22013-01-18 15:12:19 +0530774}
775
Vineet Guptaaf617422013-01-18 15:12:24 +0530776char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
777{
778 int n = 0;
Noam Camuse3edeb62013-02-26 09:22:46 +0200779 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
Vineet Guptad7a512b2015-04-06 17:22:39 +0530780 char super_pg[64] = "";
781
782 if (p_mmu->s_pg_sz_m)
783 scnprintf(super_pg, 64, "%dM Super Page%s, ",
Vineet Gupta6ce18792015-03-12 19:48:03 +0530784 p_mmu->s_pg_sz_m,
Vineet Gupta964cf282015-10-02 19:20:27 +0530785 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
Vineet Guptaaf617422013-01-18 15:12:24 +0530786
Vineet Guptaaf617422013-01-18 15:12:24 +0530787 n += scnprintf(buf + n, len - n,
Vineet Guptad0890ea2015-10-02 19:24:20 +0530788 "MMU [v%x]\t: %dK PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d\n",
Vineet Guptad7a512b2015-04-06 17:22:39 +0530789 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
Vineet Guptab598e172015-10-02 12:25:35 +0530790 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
Vineet Guptad0890ea2015-10-02 19:24:20 +0530791 p_mmu->u_dtlb, p_mmu->u_itlb);
Vineet Guptaaf617422013-01-18 15:12:24 +0530792
793 return buf;
794}
795
Paul Gortmakerce759952013-06-24 15:30:15 -0400796void arc_mmu_init(void)
Vineet Guptacc562d22013-01-18 15:12:19 +0530797{
Vineet Guptaaf617422013-01-18 15:12:24 +0530798 char str[256];
799 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
800
801 printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
802
803 /* For efficiency sake, kernel is compile time built for a MMU ver
804 * This must match the hardware it is running on.
805 * Linux built for MMU V2, if run on MMU V1 will break down because V1
806 * hardware doesn't understand cmds such as WriteNI, or IVUTLB
807 * On the other hand, Linux built for V1 if run on MMU V2 will do
808 * un-needed workarounds to prevent memcpy thrashing.
809 * Similarly MMU V3 has new features which won't work on older MMU
810 */
811 if (mmu->ver != CONFIG_ARC_MMU_VER) {
812 panic("MMU ver %d doesn't match kernel built for %d...\n",
813 mmu->ver, CONFIG_ARC_MMU_VER);
814 }
815
Vineet Gupta40b552d2015-02-13 18:33:47 +0530816 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
Vineet Guptaaf617422013-01-18 15:12:24 +0530817 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
818
Vineet Gupta6ce18792015-03-12 19:48:03 +0530819 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
820 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
821 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
822 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
823
Vineet Guptacc562d22013-01-18 15:12:19 +0530824 /* Enable the MMU */
825 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
Vineet Gupta41195d22013-01-18 15:12:23 +0530826
827 /* In smp we use this reg for interrupt 1 scratch */
828#ifndef CONFIG_SMP
829 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
830 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
831#endif
Vineet Guptacc562d22013-01-18 15:12:19 +0530832}
833
834/*
835 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
836 * The mapping is Column-first.
837 * --------------------- -----------
838 * |way0|way1|way2|way3| |way0|way1|
839 * --------------------- -----------
840 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
841 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
842 * ~ ~ ~ ~
843 * [set127] | 508| 509| 510| 511| | 254| 255|
844 * --------------------- -----------
845 * For normal operations we don't(must not) care how above works since
846 * MMU cmd getIndex(vaddr) abstracts that out.
847 * However for walking WAYS of a SET, we need to know this
848 */
849#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
850
851/* Handling of Duplicate PD (TLB entry) in MMU.
852 * -Could be due to buggy customer tapeouts or obscure kernel bugs
853 * -MMU complaints not at the time of duplicate PD installation, but at the
854 * time of lookup matching multiple ways.
855 * -Ideally these should never happen - but if they do - workaround by deleting
856 * the duplicate one.
857 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
858 */
859volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
860
861void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
862 struct pt_regs *regs)
863{
864 int set, way, n;
Vineet Guptacc562d22013-01-18 15:12:19 +0530865 unsigned long flags, is_valid;
866 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
Vineet Gupta0a4c40a2013-09-27 18:20:06 +0530867 unsigned int pd0[mmu->ways], pd1[mmu->ways];
Vineet Guptacc562d22013-01-18 15:12:19 +0530868
869 local_irq_save(flags);
870
871 /* re-enable the MMU */
872 write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
873
874 /* loop thru all sets of TLB */
875 for (set = 0; set < mmu->sets; set++) {
876
877 /* read out all the ways of current set */
878 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
879 write_aux_reg(ARC_REG_TLBINDEX,
880 SET_WAY_TO_IDX(mmu, set, way));
881 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
882 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
883 pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
884 is_valid |= pd0[way] & _PAGE_PRESENT;
885 }
886
887 /* If all the WAYS in SET are empty, skip to next SET */
888 if (!is_valid)
889 continue;
890
891 /* Scan the set for duplicate ways: needs a nested loop */
Vineet Gupta0a4c40a2013-09-27 18:20:06 +0530892 for (way = 0; way < mmu->ways - 1; way++) {
Vineet Guptacc562d22013-01-18 15:12:19 +0530893 if (!pd0[way])
894 continue;
895
896 for (n = way + 1; n < mmu->ways; n++) {
897 if ((pd0[way] & PAGE_MASK) ==
898 (pd0[n] & PAGE_MASK)) {
899
900 if (dup_pd_verbose) {
901 pr_info("Duplicate PD's @"
902 "[%d:%d]/[%d:%d]\n",
903 set, way, set, n);
904 pr_info("TLBPD0[%u]: %08x\n",
905 way, pd0[way]);
906 }
907
908 /*
909 * clear entry @way and not @n. This is
910 * critical to our optimised loop
911 */
912 pd0[way] = pd1[way] = 0;
913 write_aux_reg(ARC_REG_TLBINDEX,
914 SET_WAY_TO_IDX(mmu, set, way));
915 __tlb_entry_erase();
916 }
917 }
918 }
919 }
920
921 local_irq_restore(flags);
922}
923
924/***********************************************************************
925 * Diagnostic Routines
926 * -Called from Low Level TLB Hanlders if things don;t look good
927 **********************************************************************/
928
929#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
930
931/*
932 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
933 * don't match
934 */
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530935void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
Vineet Guptacc562d22013-01-18 15:12:19 +0530936{
Vineet Guptacc562d22013-01-18 15:12:19 +0530937 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530938 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
Vineet Guptacc562d22013-01-18 15:12:19 +0530939
940 __asm__ __volatile__("flag 1");
941}
942
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530943void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
Vineet Guptacc562d22013-01-18 15:12:19 +0530944{
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530945 unsigned int mmu_asid;
Vineet Guptacc562d22013-01-18 15:12:19 +0530946
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530947 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
Vineet Guptacc562d22013-01-18 15:12:19 +0530948
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530949 /*
950 * At the time of a TLB miss/installation
951 * - HW version needs to match SW version
952 * - SW needs to have a valid ASID
953 */
954 if (addr < 0x70000000 &&
Vineet Gupta947bf102013-07-25 15:45:50 -0700955 ((mm_asid == MM_CTXT_NO_ASID) ||
956 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
Vineet Gupta5bd87ad2013-08-23 17:37:18 +0530957 print_asid_mismatch(mm_asid, mmu_asid, 0);
Vineet Guptacc562d22013-01-18 15:12:19 +0530958}
959#endif