blob: d94dadedf74f57d5ad8d34d28c8c4cc3201745d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundta23ba432007-11-28 20:19:38 +09002 * arch/sh/mm/cache-sh5.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Paul Mundt38350e02008-02-13 20:14:10 +09004 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2002 Benedict Gaster
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (C) 2003 - 2008 Paul Mundt
Paul Mundta23ba432007-11-28 20:19:38 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
Paul Mundt38350e02008-02-13 20:14:10 +090016#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/processor.h>
18#include <asm/cache.h>
Paul Mundt38350e02008-02-13 20:14:10 +090019#include <asm/pgalloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080020#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Paul Mundt37443ef2009-08-15 12:29:49 +090023extern void __weak sh4__flush_region_init(void);
24
Linus Torvalds1da177e2005-04-16 15:20:36 -070025/* Wired TLB entry for the D-cache */
26static unsigned long long dtlb_cache_slot;
27
Paul Mundt38350e02008-02-13 20:14:10 +090028/*
29 * The following group of functions deal with mapping and unmapping a
30 * temporary page into a DTLB slot that has been set aside for exclusive
31 * use.
32 */
33static inline void
34sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
35 unsigned long paddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
Paul Mundt983f4c52009-09-01 21:12:55 +090037 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
39}
40
41static inline void sh64_teardown_dtlb_cache_slot(void)
42{
43 sh64_teardown_tlb_slot(dtlb_cache_slot);
Paul Mundt983f4c52009-09-01 21:12:55 +090044 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070045}
46
Paul Mundt38350e02008-02-13 20:14:10 +090047static inline void sh64_icache_inv_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
49 unsigned long long addr, flag, data;
Paul Mundt983f4c52009-09-01 21:12:55 +090050 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Paul Mundt38350e02008-02-13 20:14:10 +090052 addr = ICCR0;
53 flag = ICCR0_ICI;
54 data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Paul Mundt983f4c52009-09-01 21:12:55 +090056 /* Make this a critical section for safety (probably not strictly necessary.) */
57 local_irq_save(flags);
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 /* Without %1 it gets unexplicably wrong */
Paul Mundt38350e02008-02-13 20:14:10 +090060 __asm__ __volatile__ (
61 "getcfg %3, 0, %0\n\t"
62 "or %0, %2, %0\n\t"
63 "putcfg %3, 0, %0\n\t"
64 "synci"
65 : "=&r" (data)
66 : "0" (data), "r" (flag), "r" (addr));
Paul Mundt983f4c52009-09-01 21:12:55 +090067
68 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
71static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
72{
73 /* Invalidate range of addresses [start,end] from the I-cache, where
74 * the addresses lie in the kernel superpage. */
75
76 unsigned long long ullend, addr, aligned_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 aligned_start = (unsigned long long)(signed long long)(signed long) start;
Paul Mundt38350e02008-02-13 20:14:10 +090078 addr = L1_CACHE_ALIGN(aligned_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 ullend = (unsigned long long) (signed long long) (signed long) end;
Paul Mundt38350e02008-02-13 20:14:10 +090080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 while (addr <= ullend) {
Paul Mundt38350e02008-02-13 20:14:10 +090082 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 addr += L1_CACHE_BYTES;
84 }
85}
86
87static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
88{
89 /* If we get called, we know that vma->vm_flags contains VM_EXEC.
90 Also, eaddr is page-aligned. */
Paul Mundt38350e02008-02-13 20:14:10 +090091 unsigned int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 unsigned long long addr, end_addr;
Paul Mundt983f4c52009-09-01 21:12:55 +090093 unsigned long flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 unsigned long running_asid, vma_asid;
95 addr = eaddr;
96 end_addr = addr + PAGE_SIZE;
97
98 /* Check whether we can use the current ASID for the I-cache
99 invalidation. For example, if we're called via
100 access_process_vm->flush_cache_page->here, (e.g. when reading from
101 /proc), 'running_asid' will be that of the reader, not of the
102 victim.
103
104 Also, note the risk that we might get pre-empted between the ASID
105 compare and blocking IRQs, and before we regain control, the
106 pid->ASID mapping changes. However, the whole cache will get
107 invalidated when the mapping is renewed, so the worst that can
108 happen is that the loop below ends up invalidating somebody else's
109 cache entries.
110 */
111
112 running_asid = get_asid();
Paul Mundt38350e02008-02-13 20:14:10 +0900113 vma_asid = cpu_asid(cpu, vma->vm_mm);
Paul Mundt983f4c52009-09-01 21:12:55 +0900114 if (running_asid != vma_asid) {
115 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 switch_and_save_asid(vma_asid);
Paul Mundt983f4c52009-09-01 21:12:55 +0900117 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 while (addr < end_addr) {
119 /* Worth unrolling a little */
Paul Mundt38350e02008-02-13 20:14:10 +0900120 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
121 __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
122 __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
123 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 addr += 128;
125 }
Paul Mundt983f4c52009-09-01 21:12:55 +0900126 if (running_asid != vma_asid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 switch_and_save_asid(running_asid);
Paul Mundt983f4c52009-09-01 21:12:55 +0900128 local_irq_restore(flags);
129 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
133 unsigned long start, unsigned long end)
134{
135 /* Used for invalidating big chunks of I-cache, i.e. assume the range
136 is whole pages. If 'start' or 'end' is not page aligned, the code
137 is conservative and invalidates to the ends of the enclosing pages.
138 This is functionally OK, just a performance loss. */
139
140 /* See the comments below in sh64_dcache_purge_user_range() regarding
141 the choice of algorithm. However, for the I-cache option (2) isn't
142 available because there are no physical tags so aliases can't be
143 resolved. The icbi instruction has to be used through the user
144 mapping. Because icbi is cheaper than ocbp on a cache hit, it
145 would be cheaper to use the selective code for a large range than is
146 possible with the D-cache. Just assume 64 for now as a working
147 figure.
148 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 int n_pages;
150
Paul Mundt38350e02008-02-13 20:14:10 +0900151 if (!mm)
152 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 n_pages = ((end - start) >> PAGE_SHIFT);
155 if (n_pages >= 64) {
156 sh64_icache_inv_all();
157 } else {
158 unsigned long aligned_start;
159 unsigned long eaddr;
160 unsigned long after_last_page_start;
161 unsigned long mm_asid, current_asid;
Paul Mundt983f4c52009-09-01 21:12:55 +0900162 unsigned long flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Paul Mundt38350e02008-02-13 20:14:10 +0900164 mm_asid = cpu_asid(smp_processor_id(), mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 current_asid = get_asid();
166
Paul Mundt983f4c52009-09-01 21:12:55 +0900167 if (mm_asid != current_asid) {
168 /* Switch ASID and run the invalidate loop under cli */
169 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 switch_and_save_asid(mm_asid);
Paul Mundt983f4c52009-09-01 21:12:55 +0900171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 aligned_start = start & PAGE_MASK;
174 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
175
176 while (aligned_start < after_last_page_start) {
177 struct vm_area_struct *vma;
178 unsigned long vma_end;
179 vma = find_vma(mm, aligned_start);
180 if (!vma || (aligned_start <= vma->vm_end)) {
181 /* Avoid getting stuck in an error condition */
182 aligned_start += PAGE_SIZE;
183 continue;
184 }
185 vma_end = vma->vm_end;
186 if (vma->vm_flags & VM_EXEC) {
187 /* Executable */
188 eaddr = aligned_start;
189 while (eaddr < vma_end) {
190 sh64_icache_inv_user_page(vma, eaddr);
191 eaddr += PAGE_SIZE;
192 }
193 }
194 aligned_start = vma->vm_end; /* Skip to start of next region */
195 }
Paul Mundt38350e02008-02-13 20:14:10 +0900196
Paul Mundt983f4c52009-09-01 21:12:55 +0900197 if (mm_asid != current_asid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 switch_and_save_asid(current_asid);
Paul Mundt983f4c52009-09-01 21:12:55 +0900199 local_irq_restore(flags);
200 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 }
202}
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
205{
206 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
207 cache hit on the virtual tag the instruction ends there, without a
208 TLB lookup. */
209
210 unsigned long long aligned_start;
211 unsigned long long ull_end;
212 unsigned long long addr;
213
214 ull_end = end;
215
216 /* Just invalidate over the range using the natural addresses. TLB
217 miss handling will be OK (TBC). Since it's for the current process,
218 either we're already in the right ASID context, or the ASIDs have
219 been recycled since we were last active in which case we might just
220 invalidate another processes I-cache entries : no worries, just a
221 performance drop for him. */
Paul Mundt38350e02008-02-13 20:14:10 +0900222 aligned_start = L1_CACHE_ALIGN(start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 addr = aligned_start;
224 while (addr < ull_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900225 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
226 __asm__ __volatile__ ("nop");
227 __asm__ __volatile__ ("nop");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 addr += L1_CACHE_BYTES;
229 }
230}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232/* Buffer used as the target of alloco instructions to purge data from cache
233 sets by natural eviction. -- RPC */
Paul Mundt38350e02008-02-13 20:14:10 +0900234#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
236
Paul Mundt38350e02008-02-13 20:14:10 +0900237static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
239 /* Purge all ways in a particular block of sets, specified by the base
240 set number and number of sets. Can handle wrap-around, if that's
241 needed. */
242
243 int dummy_buffer_base_set;
244 unsigned long long eaddr, eaddr0, eaddr1;
245 int j;
246 int set_offset;
247
Paul Mundt38350e02008-02-13 20:14:10 +0900248 dummy_buffer_base_set = ((int)&dummy_alloco_area &
249 cpu_data->dcache.entry_mask) >>
250 cpu_data->dcache.entry_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 set_offset = sets_to_purge_base - dummy_buffer_base_set;
252
Paul Mundt38350e02008-02-13 20:14:10 +0900253 for (j = 0; j < n_sets; j++, set_offset++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 set_offset &= (cpu_data->dcache.sets - 1);
Paul Mundt38350e02008-02-13 20:14:10 +0900255 eaddr0 = (unsigned long long)dummy_alloco_area +
256 (set_offset << cpu_data->dcache.entry_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Paul Mundt38350e02008-02-13 20:14:10 +0900258 /*
259 * Do one alloco which hits the required set per cache
260 * way. For write-back mode, this will purge the #ways
261 * resident lines. There's little point unrolling this
262 * loop because the allocos stall more if they're too
263 * close together.
264 */
265 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
266 cpu_data->dcache.ways;
267
268 for (eaddr = eaddr0; eaddr < eaddr1;
269 eaddr += cpu_data->dcache.way_size) {
270 __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
271 __asm__ __volatile__ ("synco"); /* TAKum03020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
273
Paul Mundt38350e02008-02-13 20:14:10 +0900274 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
275 cpu_data->dcache.ways;
276
277 for (eaddr = eaddr0; eaddr < eaddr1;
278 eaddr += cpu_data->dcache.way_size) {
279 /*
280 * Load from each address. Required because
281 * alloco is a NOP if the cache is write-through.
282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
Paul Mundt2fedaac2009-05-09 14:38:49 +0900284 __raw_readb((unsigned long)eaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
286 }
287
Paul Mundt38350e02008-02-13 20:14:10 +0900288 /*
289 * Don't use OCBI to invalidate the lines. That costs cycles
290 * directly. If the dummy block is just left resident, it will
291 * naturally get evicted as required.
292 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
Paul Mundt38350e02008-02-13 20:14:10 +0900295/*
296 * Purge the entire contents of the dcache. The most efficient way to
297 * achieve this is to use alloco instructions on a region of unused
298 * memory equal in size to the cache, thereby causing the current
299 * contents to be discarded by natural eviction. The alternative, namely
300 * reading every tag, setting up a mapping for the corresponding page and
301 * doing an OCBP for the line, would be much more expensive.
302 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303static void sh64_dcache_purge_all(void)
304{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
311 anything else in the kernel */
312#define MAGIC_PAGE0_START 0xffffffffec000000ULL
313
Paul Mundt38350e02008-02-13 20:14:10 +0900314/* Purge the physical page 'paddr' from the cache. It's known that any
315 * cache lines requiring attention have the same page colour as the the
316 * address 'eaddr'.
317 *
318 * This relies on the fact that the D-cache matches on physical tags when
319 * no virtual tag matches. So we create an alias for the original page
320 * and purge through that. (Alternatively, we could have done this by
321 * switching ASID to match the original mapping and purged through that,
322 * but that involves ASID switching cost + probably a TLBMISS + refill
323 * anyway.)
324 */
325static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
326 unsigned long eaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 unsigned long long magic_page_start;
329 unsigned long long magic_eaddr, magic_eaddr_end;
330
331 magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
332
333 /* As long as the kernel is not pre-emptible, this doesn't need to be
334 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
336
337 magic_eaddr = magic_page_start;
338 magic_eaddr_end = magic_eaddr + PAGE_SIZE;
Paul Mundt38350e02008-02-13 20:14:10 +0900339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 while (magic_eaddr < magic_eaddr_end) {
341 /* Little point in unrolling this loop - the OCBPs are blocking
342 and won't go any quicker (i.e. the loop overhead is parallel
343 to part of the OCBP execution.) */
Paul Mundt38350e02008-02-13 20:14:10 +0900344 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 magic_eaddr += L1_CACHE_BYTES;
346 }
347
348 sh64_teardown_dtlb_cache_slot();
349}
350
Paul Mundt38350e02008-02-13 20:14:10 +0900351/*
352 * Purge a page given its physical start address, by creating a temporary
353 * 1 page mapping and purging across that. Even if we know the virtual
354 * address (& vma or mm) of the page, the method here is more elegant
355 * because it avoids issues of coping with page faults on the purge
356 * instructions (i.e. no special-case code required in the critical path
357 * in the TLB miss handling).
358 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359static void sh64_dcache_purge_phy_page(unsigned long paddr)
360{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 unsigned long long eaddr_start, eaddr, eaddr_end;
362 int i;
363
364 /* As long as the kernel is not pre-emptible, this doesn't need to be
365 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 eaddr_start = MAGIC_PAGE0_START;
Paul Mundt38350e02008-02-13 20:14:10 +0900367 for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
369
370 eaddr = eaddr_start;
371 eaddr_end = eaddr + PAGE_SIZE;
372 while (eaddr < eaddr_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900373 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 eaddr += L1_CACHE_BYTES;
375 }
376
377 sh64_teardown_dtlb_cache_slot();
378 eaddr_start += PAGE_SIZE;
379 }
380}
381
Hugh Dickins60ec5582005-10-29 18:16:34 -0700382static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
383 unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
385 pgd_t *pgd;
Paul Mundt38350e02008-02-13 20:14:10 +0900386 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 pmd_t *pmd;
388 pte_t *pte;
389 pte_t entry;
Hugh Dickins60ec5582005-10-29 18:16:34 -0700390 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 unsigned long paddr;
392
Hugh Dickins60ec5582005-10-29 18:16:34 -0700393 if (!mm)
394 return; /* No way to find physical address of page */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
Hugh Dickins60ec5582005-10-29 18:16:34 -0700396 pgd = pgd_offset(mm, addr);
397 if (pgd_bad(*pgd))
398 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Paul Mundt38350e02008-02-13 20:14:10 +0900400 pud = pud_offset(pgd, addr);
401 if (pud_none(*pud) || pud_bad(*pud))
402 return;
403
404 pmd = pmd_offset(pud, addr);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700405 if (pmd_none(*pmd) || pmd_bad(*pmd))
406 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Hugh Dickins60ec5582005-10-29 18:16:34 -0700408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
409 do {
410 entry = *pte;
411 if (pte_none(entry) || !pte_present(entry))
412 continue;
413 paddr = pte_val(entry) & PAGE_MASK;
414 sh64_dcache_purge_coloured_phy_page(paddr, addr);
415 } while (pte++, addr += PAGE_SIZE, addr != end);
416 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Paul Mundt38350e02008-02-13 20:14:10 +0900419/*
420 * There are at least 5 choices for the implementation of this, with
421 * pros (+), cons(-), comments(*):
422 *
423 * 1. ocbp each line in the range through the original user's ASID
424 * + no lines spuriously evicted
425 * - tlbmiss handling (must either handle faults on demand => extra
426 * special-case code in tlbmiss critical path), or map the page in
427 * advance (=> flush_tlb_range in advance to avoid multiple hits)
428 * - ASID switching
429 * - expensive for large ranges
430 *
431 * 2. temporarily map each page in the range to a special effective
432 * address and ocbp through the temporary mapping; relies on the
433 * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
434 * never look at the etags)
435 * + no spurious evictions
436 * - expensive for large ranges
437 * * surely cheaper than (1)
438 *
439 * 3. walk all the lines in the cache, check the tags, if a match
440 * occurs create a page mapping to ocbp the line through
441 * + no spurious evictions
442 * - tag inspection overhead
443 * - (especially for small ranges)
444 * - potential cost of setting up/tearing down page mapping for
445 * every line that matches the range
446 * * cost partly independent of range size
447 *
448 * 4. walk all the lines in the cache, check the tags, if a match
449 * occurs use 4 * alloco to purge the line (+3 other probably
450 * innocent victims) by natural eviction
451 * + no tlb mapping overheads
452 * - spurious evictions
453 * - tag inspection overhead
454 *
455 * 5. implement like flush_cache_all
456 * + no tag inspection overhead
457 * - spurious evictions
458 * - bad for small ranges
459 *
460 * (1) can be ruled out as more expensive than (2). (2) appears best
461 * for small ranges. The choice between (3), (4) and (5) for large
462 * ranges and the range size for the large/small boundary need
463 * benchmarking to determine.
464 *
465 * For now use approach (2) for small ranges and (5) for large ones.
466 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467static void sh64_dcache_purge_user_range(struct mm_struct *mm,
468 unsigned long start, unsigned long end)
469{
Paul Mundt38350e02008-02-13 20:14:10 +0900470 int n_pages = ((end - start) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Hugh Dickins60ec5582005-10-29 18:16:34 -0700472 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 sh64_dcache_purge_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 } else {
Hugh Dickins60ec5582005-10-29 18:16:34 -0700475 /* Small range, covered by a single page table page */
476 start &= PAGE_MASK; /* should already be so */
477 end = PAGE_ALIGN(end); /* should already be so */
478 sh64_dcache_purge_user_pages(mm, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
Paul Mundt38350e02008-02-13 20:14:10 +0900481
482/*
483 * Invalidate the entire contents of both caches, after writing back to
484 * memory any dirty data from the D-cache.
485 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900486static void sh5_flush_cache_all(void *unused)
Paul Mundt38350e02008-02-13 20:14:10 +0900487{
488 sh64_dcache_purge_all();
489 sh64_icache_inv_all();
490}
491
492/*
493 * Invalidate an entire user-address space from both caches, after
494 * writing back dirty data (e.g. for shared mmap etc).
495 *
496 * This could be coded selectively by inspecting all the tags then
497 * doing 4*alloco on any set containing a match (as for
498 * flush_cache_range), but fork/exit/execve (where this is called from)
499 * are expensive anyway.
500 *
501 * Have to do a purge here, despite the comments re I-cache below.
502 * There could be odd-coloured dirty data associated with the mm still
503 * in the cache - if this gets written out through natural eviction
504 * after the kernel has reused the page there will be chaos.
505 *
506 * The mm being torn down won't ever be active again, so any Icache
507 * lines tagged with its ASID won't be visible for the rest of the
508 * lifetime of this ASID cycle. Before the ASID gets reused, there
509 * will be a flush_cache_all. Hence we don't need to touch the
510 * I-cache. This is similar to the lack of action needed in
511 * flush_tlb_mm - see fault.c.
512 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900513static void sh5_flush_cache_mm(void *unused)
Paul Mundt38350e02008-02-13 20:14:10 +0900514{
515 sh64_dcache_purge_all();
516}
517
518/*
519 * Invalidate (from both caches) the range [start,end) of virtual
520 * addresses from the user address space specified by mm, after writing
521 * back any dirty data.
522 *
523 * Note, 'end' is 1 byte beyond the end of the range to flush.
524 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900525static void sh5_flush_cache_range(void *args)
Paul Mundt38350e02008-02-13 20:14:10 +0900526{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900527 struct flusher_data *data = args;
528 struct vm_area_struct *vma;
529 unsigned long start, end;
Paul Mundt38350e02008-02-13 20:14:10 +0900530
Paul Mundtf26b2a52009-08-21 17:23:14 +0900531 vma = data->vma;
532 start = data->addr1;
533 end = data->addr2;
534
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end);
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
Paul Mundt38350e02008-02-13 20:14:10 +0900537}
538
539/*
540 * Invalidate any entries in either cache for the vma within the user
541 * address space vma->vm_mm for the page starting at virtual address
542 * 'eaddr'. This seems to be used primarily in breaking COW. Note,
543 * the I-cache must be searched too in case the page in question is
544 * both writable and being executed from (e.g. stack trampolines.)
545 *
546 * Note, this is called with pte lock held.
547 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900548static void sh5_flush_cache_page(void *args)
Paul Mundt38350e02008-02-13 20:14:10 +0900549{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900550 struct flusher_data *data = args;
551 struct vm_area_struct *vma;
552 unsigned long eaddr, pfn;
553
554 vma = data->vma;
555 eaddr = data->addr1;
556 pfn = data->addr2;
557
Paul Mundt38350e02008-02-13 20:14:10 +0900558 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
559
560 if (vma->vm_flags & VM_EXEC)
561 sh64_icache_inv_user_page(vma, eaddr);
562}
563
Paul Mundtf26b2a52009-08-21 17:23:14 +0900564static void sh5_flush_dcache_page(void *page)
Paul Mundt38350e02008-02-13 20:14:10 +0900565{
Paul Mundt3af539e2009-11-12 17:03:28 +0900566 sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
Paul Mundt38350e02008-02-13 20:14:10 +0900567 wmb();
568}
569
570/*
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400571 * Flush the range [start,end] of kernel virtual address space from
Paul Mundt38350e02008-02-13 20:14:10 +0900572 * the I-cache. The corresponding range must be purged from the
573 * D-cache also because the SH-5 doesn't have cache snooping between
574 * the caches. The addresses will be visible through the superpage
575 * mapping, therefore it's guaranteed that there no cache entries for
576 * the range in cache sets of the wrong colour.
577 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900578static void sh5_flush_icache_range(void *args)
Paul Mundt38350e02008-02-13 20:14:10 +0900579{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900580 struct flusher_data *data = args;
581 unsigned long start, end;
582
583 start = data->addr1;
584 end = data->addr2;
585
Paul Mundt38350e02008-02-13 20:14:10 +0900586 __flush_purge_region((void *)start, end);
587 wmb();
588 sh64_icache_inv_kernel_range(start, end);
589}
590
591/*
Paul Mundt38350e02008-02-13 20:14:10 +0900592 * For the address range [start,end), write back the data from the
593 * D-cache and invalidate the corresponding region of the I-cache for the
594 * current process. Used to flush signal trampolines on the stack to
595 * make them executable.
596 */
Paul Mundtf26b2a52009-08-21 17:23:14 +0900597static void sh5_flush_cache_sigtramp(void *vaddr)
Paul Mundt38350e02008-02-13 20:14:10 +0900598{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900599 unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
Paul Mundt38350e02008-02-13 20:14:10 +0900600
Paul Mundtf26b2a52009-08-21 17:23:14 +0900601 __flush_wback_region(vaddr, L1_CACHE_BYTES);
Paul Mundt38350e02008-02-13 20:14:10 +0900602 wmb();
Paul Mundtf26b2a52009-08-21 17:23:14 +0900603 sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
Paul Mundt38350e02008-02-13 20:14:10 +0900604}
605
Paul Mundt94ecd222009-08-16 01:50:17 +0900606void __init sh5_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
Paul Mundtf26b2a52009-08-21 17:23:14 +0900608 local_flush_cache_all = sh5_flush_cache_all;
609 local_flush_cache_mm = sh5_flush_cache_mm;
610 local_flush_cache_dup_mm = sh5_flush_cache_mm;
611 local_flush_cache_page = sh5_flush_cache_page;
612 local_flush_cache_range = sh5_flush_cache_range;
613 local_flush_dcache_page = sh5_flush_dcache_page;
614 local_flush_icache_range = sh5_flush_icache_range;
615 local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Paul Mundt94ecd222009-08-16 01:50:17 +0900617 /* Reserve a slot for dcache colouring in the DTLB */
618 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Paul Mundt94ecd222009-08-16 01:50:17 +0900620 sh4__flush_region_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}