blob: 5ece856c5725c7cc72d0a0175bf9229330fabec5 [file] [log] [blame]
Chris Zankel3f65ce42005-06-23 22:01:24 -07001/*
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02002 * arch/xtensa/mm/tlb.c
Chris Zankel3f65ce42005-06-23 22:01:24 -07003 *
4 * Logic that manipulates the Xtensa MMU. Derived from MIPS.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2003 Tensilica Inc.
11 *
12 * Joe Taylor
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier
15 */
16
17#include <linux/mm.h>
18#include <asm/processor.h>
19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
Chris Zankel3f65ce42005-06-23 22:01:24 -070021#include <asm/cacheflush.h>
22
23
24static inline void __flush_itlb_all (void)
25{
Chris Zankel173d6682006-12-10 02:18:48 -080026 int w, i;
Chris Zankel3f65ce42005-06-23 22:01:24 -070027
Chris Zankel173d6682006-12-10 02:18:48 -080028 for (w = 0; w < ITLB_ARF_WAYS; w++) {
29 for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
30 int e = w + (i << PAGE_SHIFT);
31 invalidate_itlb_entry_no_isync(e);
Chris Zankel3f65ce42005-06-23 22:01:24 -070032 }
33 }
34 asm volatile ("isync\n");
35}
36
37static inline void __flush_dtlb_all (void)
38{
Chris Zankel173d6682006-12-10 02:18:48 -080039 int w, i;
Chris Zankel3f65ce42005-06-23 22:01:24 -070040
Chris Zankel173d6682006-12-10 02:18:48 -080041 for (w = 0; w < DTLB_ARF_WAYS; w++) {
42 for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
43 int e = w + (i << PAGE_SHIFT);
44 invalidate_dtlb_entry_no_isync(e);
Chris Zankel3f65ce42005-06-23 22:01:24 -070045 }
46 }
47 asm volatile ("isync\n");
48}
49
50
Max Filippovf6151362013-10-17 02:42:26 +040051void local_flush_tlb_all(void)
Chris Zankel3f65ce42005-06-23 22:01:24 -070052{
53 __flush_itlb_all();
54 __flush_dtlb_all();
55}
56
57/* If mm is current, we simply assign the current task a new ASID, thus,
58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
59 * wie invalidate the context, thus, when that user mapping is swapped in,
60 * a new context will be assigned to it.
61 */
62
Max Filippovf6151362013-10-17 02:42:26 +040063void local_flush_tlb_mm(struct mm_struct *mm)
Chris Zankel3f65ce42005-06-23 22:01:24 -070064{
Max Filippovf6151362013-10-17 02:42:26 +040065 int cpu = smp_processor_id();
66
Chris Zankel3f65ce42005-06-23 22:01:24 -070067 if (mm == current->active_mm) {
Max Filippov382cb5b2012-11-05 07:44:03 +040068 unsigned long flags;
Max Filippov87962c42013-05-15 19:02:06 +040069 local_irq_save(flags);
Max Filippovf6151362013-10-17 02:42:26 +040070 mm->context.asid[cpu] = NO_CONTEXT;
71 activate_context(mm, cpu);
Chris Zankel3f65ce42005-06-23 22:01:24 -070072 local_irq_restore(flags);
Max Filippovf6151362013-10-17 02:42:26 +040073 } else {
74 mm->context.asid[cpu] = NO_CONTEXT;
75 mm->context.cpu = -1;
Chris Zankel3f65ce42005-06-23 22:01:24 -070076 }
Chris Zankel3f65ce42005-06-23 22:01:24 -070077}
78
Max Filippovf6151362013-10-17 02:42:26 +040079
Chris Zankel173d6682006-12-10 02:18:48 -080080#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
81#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
82#if _ITLB_ENTRIES > _DTLB_ENTRIES
83# define _TLB_ENTRIES _ITLB_ENTRIES
84#else
85# define _TLB_ENTRIES _DTLB_ENTRIES
86#endif
87
Max Filippovf6151362013-10-17 02:42:26 +040088void local_flush_tlb_range(struct vm_area_struct *vma,
89 unsigned long start, unsigned long end)
Chris Zankel3f65ce42005-06-23 22:01:24 -070090{
Max Filippovf6151362013-10-17 02:42:26 +040091 int cpu = smp_processor_id();
Chris Zankel3f65ce42005-06-23 22:01:24 -070092 struct mm_struct *mm = vma->vm_mm;
93 unsigned long flags;
94
Max Filippovf6151362013-10-17 02:42:26 +040095 if (mm->context.asid[cpu] == NO_CONTEXT)
Chris Zankel3f65ce42005-06-23 22:01:24 -070096 return;
97
98#if 0
99 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
Max Filippovf6151362013-10-17 02:42:26 +0400100 (unsigned long)mm->context.asid[cpu], start, end);
Chris Zankel3f65ce42005-06-23 22:01:24 -0700101#endif
Max Filippov87962c42013-05-15 19:02:06 +0400102 local_irq_save(flags);
Chris Zankel3f65ce42005-06-23 22:01:24 -0700103
Chris Zankel173d6682006-12-10 02:18:48 -0800104 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
Chris Zankel3f65ce42005-06-23 22:01:24 -0700105 int oldpid = get_rasid_register();
Max Filippovf6151362013-10-17 02:42:26 +0400106
107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
Chris Zankel3f65ce42005-06-23 22:01:24 -0700108 start &= PAGE_MASK;
Chris Zankelc4c45942012-11-28 16:53:51 -0800109 if (vma->vm_flags & VM_EXEC)
Chris Zankel3f65ce42005-06-23 22:01:24 -0700110 while(start < end) {
111 invalidate_itlb_mapping(start);
112 invalidate_dtlb_mapping(start);
113 start += PAGE_SIZE;
114 }
115 else
116 while(start < end) {
117 invalidate_dtlb_mapping(start);
118 start += PAGE_SIZE;
119 }
120
121 set_rasid_register(oldpid);
122 } else {
Max Filippovf6151362013-10-17 02:42:26 +0400123 local_flush_tlb_mm(mm);
Chris Zankel3f65ce42005-06-23 22:01:24 -0700124 }
125 local_irq_restore(flags);
126}
127
Max Filippovf6151362013-10-17 02:42:26 +0400128void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
Chris Zankel3f65ce42005-06-23 22:01:24 -0700129{
Max Filippovf6151362013-10-17 02:42:26 +0400130 int cpu = smp_processor_id();
Chris Zankel3f65ce42005-06-23 22:01:24 -0700131 struct mm_struct* mm = vma->vm_mm;
132 unsigned long flags;
133 int oldpid;
Chris Zankel3f65ce42005-06-23 22:01:24 -0700134
Max Filippovf6151362013-10-17 02:42:26 +0400135 if (mm->context.asid[cpu] == NO_CONTEXT)
Chris Zankel3f65ce42005-06-23 22:01:24 -0700136 return;
137
Max Filippov87962c42013-05-15 19:02:06 +0400138 local_irq_save(flags);
Chris Zankel3f65ce42005-06-23 22:01:24 -0700139
Chris Zankelc4c45942012-11-28 16:53:51 -0800140 oldpid = get_rasid_register();
Max Filippovf6151362013-10-17 02:42:26 +0400141 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
Chris Zankel3f65ce42005-06-23 22:01:24 -0700142
143 if (vma->vm_flags & VM_EXEC)
144 invalidate_itlb_mapping(page);
145 invalidate_dtlb_mapping(page);
146
147 set_rasid_register(oldpid);
148
149 local_irq_restore(flags);
Chris Zankel3f65ce42005-06-23 22:01:24 -0700150}
Max Filippova99e07e2013-05-15 19:34:05 +0400151
Max Filippov04c6b3e2014-02-14 14:08:48 +0400152void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
153{
154 if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
155 end - start < _TLB_ENTRIES << PAGE_SHIFT) {
156 start &= PAGE_MASK;
157 while (start < end) {
158 invalidate_itlb_mapping(start);
159 invalidate_dtlb_mapping(start);
160 start += PAGE_SIZE;
161 }
162 } else {
163 local_flush_tlb_all();
164 }
165}
166
Max Filippova99e07e2013-05-15 19:34:05 +0400167#ifdef CONFIG_DEBUG_TLB_SANITY
168
169static unsigned get_pte_for_vaddr(unsigned vaddr)
170{
171 struct task_struct *task = get_current();
172 struct mm_struct *mm = task->mm;
173 pgd_t *pgd;
174 pmd_t *pmd;
175 pte_t *pte;
176
177 if (!mm)
178 mm = task->active_mm;
179 pgd = pgd_offset(mm, vaddr);
180 if (pgd_none_or_clear_bad(pgd))
181 return 0;
182 pmd = pmd_offset(pgd, vaddr);
183 if (pmd_none_or_clear_bad(pmd))
184 return 0;
185 pte = pte_offset_map(pmd, vaddr);
186 if (!pte)
187 return 0;
188 return pte_val(*pte);
189}
190
191enum {
192 TLB_SUSPICIOUS = 1,
193 TLB_INSANE = 2,
194};
195
196static void tlb_insane(void)
197{
198 BUG_ON(1);
199}
200
201static void tlb_suspicious(void)
202{
203 WARN_ON(1);
204}
205
206/*
207 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
208 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
209 *
210 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
211 * marked as non-present. Non-present PTE and the page with non-zero refcount
212 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
213 * means that the page was freed prematurely. Non-zero mapcount is unusual,
214 * but does not necessary means an error, thus marked as suspicious.
215 */
216static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
217{
218 unsigned tlbidx = w | (e << PAGE_SHIFT);
219 unsigned r0 = dtlb ?
220 read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
221 unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
222 unsigned pte = get_pte_for_vaddr(vpn);
223 unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
224 unsigned tlb_asid = r0 & ASID_MASK;
225 bool kernel = tlb_asid == 1;
226 int rc = 0;
227
228 if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
229 pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
230 dtlb ? 'D' : 'I', w, e, vpn,
231 kernel ? "kernel" : "user");
232 rc |= TLB_INSANE;
233 }
234
235 if (tlb_asid == mm_asid) {
236 unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
237 read_itlb_translation(tlbidx);
238 if ((pte ^ r1) & PAGE_MASK) {
239 pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
240 dtlb ? 'D' : 'I', w, e, r0, r1, pte);
241 if (pte == 0 || !pte_present(__pte(pte))) {
242 struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
243 pr_err("page refcount: %d, mapcount: %d\n",
244 page_count(p),
245 page_mapcount(p));
246 if (!page_count(p))
247 rc |= TLB_INSANE;
248 else if (page_mapped(p))
249 rc |= TLB_SUSPICIOUS;
250 } else {
251 rc |= TLB_INSANE;
252 }
253 }
254 }
255 return rc;
256}
257
258void check_tlb_sanity(void)
259{
260 unsigned long flags;
261 unsigned w, e;
262 int bug = 0;
263
264 local_irq_save(flags);
265 for (w = 0; w < DTLB_ARF_WAYS; ++w)
266 for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
267 bug |= check_tlb_entry(w, e, true);
268 for (w = 0; w < ITLB_ARF_WAYS; ++w)
269 for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
270 bug |= check_tlb_entry(w, e, false);
271 if (bug & TLB_INSANE)
272 tlb_insane();
273 if (bug & TLB_SUSPICIOUS)
274 tlb_suspicious();
275 local_irq_restore(flags);
276}
277
278#endif /* CONFIG_DEBUG_TLB_SANITY */