blob: 3e2d7321b636aaa74dad3624efaa27a475a79cc2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Paul Mundta23ba432007-11-28 20:19:38 +09002 * arch/sh/mm/cache-sh5.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Paul Mundt38350e02008-02-13 20:14:10 +09004 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2002 Benedict Gaster
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (C) 2003 - 2008 Paul Mundt
Paul Mundta23ba432007-11-28 20:19:38 +09008 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/mm.h>
Paul Mundt38350e02008-02-13 20:14:10 +090016#include <asm/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/processor.h>
18#include <asm/cache.h>
Paul Mundt38350e02008-02-13 20:14:10 +090019#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/uaccess.h>
21#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot;
25
Paul Mundt38350e02008-02-13 20:14:10 +090026void __init p3_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
Paul Mundt38350e02008-02-13 20:14:10 +090028 /* Reserve a slot for dcache colouring in the DTLB */
29 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
Linus Torvalds1da177e2005-04-16 15:20:36 -070030}
31
32#ifdef CONFIG_DCACHE_DISABLED
33#define sh64_dcache_purge_all() do { } while (0)
34#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
35#define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
36#define sh64_dcache_purge_phy_page(paddr) do { } while (0)
37#define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#endif
39
Paul Mundt38350e02008-02-13 20:14:10 +090040/*
41 * The following group of functions deal with mapping and unmapping a
42 * temporary page into a DTLB slot that has been set aside for exclusive
43 * use.
44 */
45static inline void
46sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
47 unsigned long paddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070048{
Paul Mundt38350e02008-02-13 20:14:10 +090049 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
51}
52
53static inline void sh64_teardown_dtlb_cache_slot(void)
54{
55 sh64_teardown_tlb_slot(dtlb_cache_slot);
Paul Mundt38350e02008-02-13 20:14:10 +090056 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070057}
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#ifndef CONFIG_ICACHE_DISABLED
Paul Mundt38350e02008-02-13 20:14:10 +090060static inline void sh64_icache_inv_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
62 unsigned long long addr, flag, data;
Paul Mundt2fedaac2009-05-09 14:38:49 +090063 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Paul Mundt38350e02008-02-13 20:14:10 +090065 addr = ICCR0;
66 flag = ICCR0_ICI;
67 data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 /* Make this a critical section for safety (probably not strictly necessary.) */
70 local_irq_save(flags);
71
72 /* Without %1 it gets unexplicably wrong */
Paul Mundt38350e02008-02-13 20:14:10 +090073 __asm__ __volatile__ (
74 "getcfg %3, 0, %0\n\t"
75 "or %0, %2, %0\n\t"
76 "putcfg %3, 0, %0\n\t"
77 "synci"
78 : "=&r" (data)
79 : "0" (data), "r" (flag), "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81 local_irq_restore(flags);
82}
83
84static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
85{
86 /* Invalidate range of addresses [start,end] from the I-cache, where
87 * the addresses lie in the kernel superpage. */
88
89 unsigned long long ullend, addr, aligned_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 aligned_start = (unsigned long long)(signed long long)(signed long) start;
Paul Mundt38350e02008-02-13 20:14:10 +090091 addr = L1_CACHE_ALIGN(aligned_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 ullend = (unsigned long long) (signed long long) (signed long) end;
Paul Mundt38350e02008-02-13 20:14:10 +090093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 while (addr <= ullend) {
Paul Mundt38350e02008-02-13 20:14:10 +090095 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 addr += L1_CACHE_BYTES;
97 }
98}
99
100static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
101{
102 /* If we get called, we know that vma->vm_flags contains VM_EXEC.
103 Also, eaddr is page-aligned. */
Paul Mundt38350e02008-02-13 20:14:10 +0900104 unsigned int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 unsigned long long addr, end_addr;
106 unsigned long flags = 0;
107 unsigned long running_asid, vma_asid;
108 addr = eaddr;
109 end_addr = addr + PAGE_SIZE;
110
111 /* Check whether we can use the current ASID for the I-cache
112 invalidation. For example, if we're called via
113 access_process_vm->flush_cache_page->here, (e.g. when reading from
114 /proc), 'running_asid' will be that of the reader, not of the
115 victim.
116
117 Also, note the risk that we might get pre-empted between the ASID
118 compare and blocking IRQs, and before we regain control, the
119 pid->ASID mapping changes. However, the whole cache will get
120 invalidated when the mapping is renewed, so the worst that can
121 happen is that the loop below ends up invalidating somebody else's
122 cache entries.
123 */
124
125 running_asid = get_asid();
Paul Mundt38350e02008-02-13 20:14:10 +0900126 vma_asid = cpu_asid(cpu, vma->vm_mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (running_asid != vma_asid) {
128 local_irq_save(flags);
129 switch_and_save_asid(vma_asid);
130 }
131 while (addr < end_addr) {
132 /* Worth unrolling a little */
Paul Mundt38350e02008-02-13 20:14:10 +0900133 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
134 __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
135 __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
136 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 addr += 128;
138 }
139 if (running_asid != vma_asid) {
140 switch_and_save_asid(running_asid);
141 local_irq_restore(flags);
142 }
143}
144
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
146 unsigned long start, unsigned long end)
147{
148 /* Used for invalidating big chunks of I-cache, i.e. assume the range
149 is whole pages. If 'start' or 'end' is not page aligned, the code
150 is conservative and invalidates to the ends of the enclosing pages.
151 This is functionally OK, just a performance loss. */
152
153 /* See the comments below in sh64_dcache_purge_user_range() regarding
154 the choice of algorithm. However, for the I-cache option (2) isn't
155 available because there are no physical tags so aliases can't be
156 resolved. The icbi instruction has to be used through the user
157 mapping. Because icbi is cheaper than ocbp on a cache hit, it
158 would be cheaper to use the selective code for a large range than is
159 possible with the D-cache. Just assume 64 for now as a working
160 figure.
161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 int n_pages;
163
Paul Mundt38350e02008-02-13 20:14:10 +0900164 if (!mm)
165 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167 n_pages = ((end - start) >> PAGE_SHIFT);
168 if (n_pages >= 64) {
169 sh64_icache_inv_all();
170 } else {
171 unsigned long aligned_start;
172 unsigned long eaddr;
173 unsigned long after_last_page_start;
174 unsigned long mm_asid, current_asid;
Paul Mundt2fedaac2009-05-09 14:38:49 +0900175 unsigned long flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176
Paul Mundt38350e02008-02-13 20:14:10 +0900177 mm_asid = cpu_asid(smp_processor_id(), mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 current_asid = get_asid();
179
180 if (mm_asid != current_asid) {
181 /* Switch ASID and run the invalidate loop under cli */
182 local_irq_save(flags);
183 switch_and_save_asid(mm_asid);
184 }
185
186 aligned_start = start & PAGE_MASK;
187 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
188
189 while (aligned_start < after_last_page_start) {
190 struct vm_area_struct *vma;
191 unsigned long vma_end;
192 vma = find_vma(mm, aligned_start);
193 if (!vma || (aligned_start <= vma->vm_end)) {
194 /* Avoid getting stuck in an error condition */
195 aligned_start += PAGE_SIZE;
196 continue;
197 }
198 vma_end = vma->vm_end;
199 if (vma->vm_flags & VM_EXEC) {
200 /* Executable */
201 eaddr = aligned_start;
202 while (eaddr < vma_end) {
203 sh64_icache_inv_user_page(vma, eaddr);
204 eaddr += PAGE_SIZE;
205 }
206 }
207 aligned_start = vma->vm_end; /* Skip to start of next region */
208 }
Paul Mundt38350e02008-02-13 20:14:10 +0900209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 if (mm_asid != current_asid) {
211 switch_and_save_asid(current_asid);
212 local_irq_restore(flags);
213 }
214 }
215}
216
Paul Mundt38350e02008-02-13 20:14:10 +0900217/*
218 * Invalidate a small range of user context I-cache, not necessarily page
219 * (or even cache-line) aligned.
220 *
221 * Since this is used inside ptrace, the ASID in the mm context typically
222 * won't match current_asid. We'll have to switch ASID to do this. For
223 * safety, and given that the range will be small, do all this under cli.
224 *
225 * Note, there is a hazard that the ASID in mm->context is no longer
226 * actually associated with mm, i.e. if the mm->context has started a new
227 * cycle since mm was last active. However, this is just a performance
228 * issue: all that happens is that we invalidate lines belonging to
229 * another mm, so the owning process has to refill them when that mm goes
230 * live again. mm itself can't have any cache entries because there will
231 * have been a flush_cache_all when the new mm->context cycle started.
232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
234 unsigned long start, int len)
235{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 unsigned long long eaddr = start;
237 unsigned long long eaddr_end = start + len;
238 unsigned long current_asid, mm_asid;
Paul Mundt2fedaac2009-05-09 14:38:49 +0900239 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 unsigned long long epage_start;
241
Paul Mundt38350e02008-02-13 20:14:10 +0900242 /*
243 * Align to start of cache line. Otherwise, suppose len==8 and
244 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
245 */
246 eaddr = L1_CACHE_ALIGN(start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 eaddr_end = start + len;
248
Paul Mundt38350e02008-02-13 20:14:10 +0900249 mm_asid = cpu_asid(smp_processor_id(), mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 current_asid = switch_and_save_asid(mm_asid);
252
253 epage_start = eaddr & PAGE_MASK;
254
Paul Mundt38350e02008-02-13 20:14:10 +0900255 while (eaddr < eaddr_end) {
256 __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 eaddr += L1_CACHE_BYTES;
258 }
259 switch_and_save_asid(current_asid);
260 local_irq_restore(flags);
261}
262
263static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
264{
265 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
266 cache hit on the virtual tag the instruction ends there, without a
267 TLB lookup. */
268
269 unsigned long long aligned_start;
270 unsigned long long ull_end;
271 unsigned long long addr;
272
273 ull_end = end;
274
275 /* Just invalidate over the range using the natural addresses. TLB
276 miss handling will be OK (TBC). Since it's for the current process,
277 either we're already in the right ASID context, or the ASIDs have
278 been recycled since we were last active in which case we might just
279 invalidate another processes I-cache entries : no worries, just a
280 performance drop for him. */
Paul Mundt38350e02008-02-13 20:14:10 +0900281 aligned_start = L1_CACHE_ALIGN(start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 addr = aligned_start;
283 while (addr < ull_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900284 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
285 __asm__ __volatile__ ("nop");
286 __asm__ __volatile__ ("nop");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 addr += L1_CACHE_BYTES;
288 }
289}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290#endif /* !CONFIG_ICACHE_DISABLED */
291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292#ifndef CONFIG_DCACHE_DISABLED
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293/* Buffer used as the target of alloco instructions to purge data from cache
294 sets by natural eviction. -- RPC */
Paul Mundt38350e02008-02-13 20:14:10 +0900295#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
297
Paul Mundt38350e02008-02-13 20:14:10 +0900298static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 /* Purge all ways in a particular block of sets, specified by the base
301 set number and number of sets. Can handle wrap-around, if that's
302 needed. */
303
304 int dummy_buffer_base_set;
305 unsigned long long eaddr, eaddr0, eaddr1;
306 int j;
307 int set_offset;
308
Paul Mundt38350e02008-02-13 20:14:10 +0900309 dummy_buffer_base_set = ((int)&dummy_alloco_area &
310 cpu_data->dcache.entry_mask) >>
311 cpu_data->dcache.entry_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 set_offset = sets_to_purge_base - dummy_buffer_base_set;
313
Paul Mundt38350e02008-02-13 20:14:10 +0900314 for (j = 0; j < n_sets; j++, set_offset++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 set_offset &= (cpu_data->dcache.sets - 1);
Paul Mundt38350e02008-02-13 20:14:10 +0900316 eaddr0 = (unsigned long long)dummy_alloco_area +
317 (set_offset << cpu_data->dcache.entry_shift);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318
Paul Mundt38350e02008-02-13 20:14:10 +0900319 /*
320 * Do one alloco which hits the required set per cache
321 * way. For write-back mode, this will purge the #ways
322 * resident lines. There's little point unrolling this
323 * loop because the allocos stall more if they're too
324 * close together.
325 */
326 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
327 cpu_data->dcache.ways;
328
329 for (eaddr = eaddr0; eaddr < eaddr1;
330 eaddr += cpu_data->dcache.way_size) {
331 __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
332 __asm__ __volatile__ ("synco"); /* TAKum03020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 }
334
Paul Mundt38350e02008-02-13 20:14:10 +0900335 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
336 cpu_data->dcache.ways;
337
338 for (eaddr = eaddr0; eaddr < eaddr1;
339 eaddr += cpu_data->dcache.way_size) {
340 /*
341 * Load from each address. Required because
342 * alloco is a NOP if the cache is write-through.
343 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
Paul Mundt2fedaac2009-05-09 14:38:49 +0900345 __raw_readb((unsigned long)eaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347 }
348
Paul Mundt38350e02008-02-13 20:14:10 +0900349 /*
350 * Don't use OCBI to invalidate the lines. That costs cycles
351 * directly. If the dummy block is just left resident, it will
352 * naturally get evicted as required.
353 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
Paul Mundt38350e02008-02-13 20:14:10 +0900356/*
357 * Purge the entire contents of the dcache. The most efficient way to
358 * achieve this is to use alloco instructions on a region of unused
359 * memory equal in size to the cache, thereby causing the current
360 * contents to be discarded by natural eviction. The alternative, namely
361 * reading every tag, setting up a mapping for the corresponding page and
362 * doing an OCBP for the line, would be much more expensive.
363 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364static void sh64_dcache_purge_all(void)
365{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
372 anything else in the kernel */
373#define MAGIC_PAGE0_START 0xffffffffec000000ULL
374
Paul Mundt38350e02008-02-13 20:14:10 +0900375/* Purge the physical page 'paddr' from the cache. It's known that any
376 * cache lines requiring attention have the same page colour as the the
377 * address 'eaddr'.
378 *
379 * This relies on the fact that the D-cache matches on physical tags when
380 * no virtual tag matches. So we create an alias for the original page
381 * and purge through that. (Alternatively, we could have done this by
382 * switching ASID to match the original mapping and purged through that,
383 * but that involves ASID switching cost + probably a TLBMISS + refill
384 * anyway.)
385 */
386static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
387 unsigned long eaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 unsigned long long magic_page_start;
390 unsigned long long magic_eaddr, magic_eaddr_end;
391
392 magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
393
394 /* As long as the kernel is not pre-emptible, this doesn't need to be
395 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
397
398 magic_eaddr = magic_page_start;
399 magic_eaddr_end = magic_eaddr + PAGE_SIZE;
Paul Mundt38350e02008-02-13 20:14:10 +0900400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 while (magic_eaddr < magic_eaddr_end) {
402 /* Little point in unrolling this loop - the OCBPs are blocking
403 and won't go any quicker (i.e. the loop overhead is parallel
404 to part of the OCBP execution.) */
Paul Mundt38350e02008-02-13 20:14:10 +0900405 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 magic_eaddr += L1_CACHE_BYTES;
407 }
408
409 sh64_teardown_dtlb_cache_slot();
410}
411
Paul Mundt38350e02008-02-13 20:14:10 +0900412/*
413 * Purge a page given its physical start address, by creating a temporary
414 * 1 page mapping and purging across that. Even if we know the virtual
415 * address (& vma or mm) of the page, the method here is more elegant
416 * because it avoids issues of coping with page faults on the purge
417 * instructions (i.e. no special-case code required in the critical path
418 * in the TLB miss handling).
419 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420static void sh64_dcache_purge_phy_page(unsigned long paddr)
421{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 unsigned long long eaddr_start, eaddr, eaddr_end;
423 int i;
424
425 /* As long as the kernel is not pre-emptible, this doesn't need to be
426 under cli/sti. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 eaddr_start = MAGIC_PAGE0_START;
Paul Mundt38350e02008-02-13 20:14:10 +0900428 for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
430
431 eaddr = eaddr_start;
432 eaddr_end = eaddr + PAGE_SIZE;
433 while (eaddr < eaddr_end) {
Paul Mundt38350e02008-02-13 20:14:10 +0900434 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 eaddr += L1_CACHE_BYTES;
436 }
437
438 sh64_teardown_dtlb_cache_slot();
439 eaddr_start += PAGE_SIZE;
440 }
441}
442
Hugh Dickins60ec5582005-10-29 18:16:34 -0700443static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
444 unsigned long addr, unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 pgd_t *pgd;
Paul Mundt38350e02008-02-13 20:14:10 +0900447 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 pmd_t *pmd;
449 pte_t *pte;
450 pte_t entry;
Hugh Dickins60ec5582005-10-29 18:16:34 -0700451 spinlock_t *ptl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 unsigned long paddr;
453
Hugh Dickins60ec5582005-10-29 18:16:34 -0700454 if (!mm)
455 return; /* No way to find physical address of page */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
Hugh Dickins60ec5582005-10-29 18:16:34 -0700457 pgd = pgd_offset(mm, addr);
458 if (pgd_bad(*pgd))
459 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Paul Mundt38350e02008-02-13 20:14:10 +0900461 pud = pud_offset(pgd, addr);
462 if (pud_none(*pud) || pud_bad(*pud))
463 return;
464
465 pmd = pmd_offset(pud, addr);
Hugh Dickins60ec5582005-10-29 18:16:34 -0700466 if (pmd_none(*pmd) || pmd_bad(*pmd))
467 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Hugh Dickins60ec5582005-10-29 18:16:34 -0700469 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
470 do {
471 entry = *pte;
472 if (pte_none(entry) || !pte_present(entry))
473 continue;
474 paddr = pte_val(entry) & PAGE_MASK;
475 sh64_dcache_purge_coloured_phy_page(paddr, addr);
476 } while (pte++, addr += PAGE_SIZE, addr != end);
477 pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Paul Mundt38350e02008-02-13 20:14:10 +0900480/*
481 * There are at least 5 choices for the implementation of this, with
482 * pros (+), cons(-), comments(*):
483 *
484 * 1. ocbp each line in the range through the original user's ASID
485 * + no lines spuriously evicted
486 * - tlbmiss handling (must either handle faults on demand => extra
487 * special-case code in tlbmiss critical path), or map the page in
488 * advance (=> flush_tlb_range in advance to avoid multiple hits)
489 * - ASID switching
490 * - expensive for large ranges
491 *
492 * 2. temporarily map each page in the range to a special effective
493 * address and ocbp through the temporary mapping; relies on the
494 * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
495 * never look at the etags)
496 * + no spurious evictions
497 * - expensive for large ranges
498 * * surely cheaper than (1)
499 *
500 * 3. walk all the lines in the cache, check the tags, if a match
501 * occurs create a page mapping to ocbp the line through
502 * + no spurious evictions
503 * - tag inspection overhead
504 * - (especially for small ranges)
505 * - potential cost of setting up/tearing down page mapping for
506 * every line that matches the range
507 * * cost partly independent of range size
508 *
509 * 4. walk all the lines in the cache, check the tags, if a match
510 * occurs use 4 * alloco to purge the line (+3 other probably
511 * innocent victims) by natural eviction
512 * + no tlb mapping overheads
513 * - spurious evictions
514 * - tag inspection overhead
515 *
516 * 5. implement like flush_cache_all
517 * + no tag inspection overhead
518 * - spurious evictions
519 * - bad for small ranges
520 *
521 * (1) can be ruled out as more expensive than (2). (2) appears best
522 * for small ranges. The choice between (3), (4) and (5) for large
523 * ranges and the range size for the large/small boundary need
524 * benchmarking to determine.
525 *
526 * For now use approach (2) for small ranges and (5) for large ones.
527 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528static void sh64_dcache_purge_user_range(struct mm_struct *mm,
529 unsigned long start, unsigned long end)
530{
Paul Mundt38350e02008-02-13 20:14:10 +0900531 int n_pages = ((end - start) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Hugh Dickins60ec5582005-10-29 18:16:34 -0700533 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 sh64_dcache_purge_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 } else {
Hugh Dickins60ec5582005-10-29 18:16:34 -0700536 /* Small range, covered by a single page table page */
537 start &= PAGE_MASK; /* should already be so */
538 end = PAGE_ALIGN(end); /* should already be so */
539 sh64_dcache_purge_user_pages(mm, start, end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
Paul Mundt38350e02008-02-13 20:14:10 +0900543/*
544 * Purge the range of addresses from the D-cache.
545 *
546 * The addresses lie in the superpage mapping. There's no harm if we
547 * overpurge at either end - just a small performance loss.
548 */
549void __flush_purge_region(void *start, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
Paul Mundt38350e02008-02-13 20:14:10 +0900551 unsigned long long ullend, addr, aligned_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Paul Mundt38350e02008-02-13 20:14:10 +0900553 aligned_start = (unsigned long long)(signed long long)(signed long) start;
554 addr = L1_CACHE_ALIGN(aligned_start);
555 ullend = (unsigned long long) (signed long long) (signed long) start + size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Paul Mundt38350e02008-02-13 20:14:10 +0900557 while (addr <= ullend) {
558 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 addr += L1_CACHE_BYTES;
560 }
561}
562
Paul Mundt38350e02008-02-13 20:14:10 +0900563void __flush_wback_region(void *start, int size)
564{
565 unsigned long long ullend, addr, aligned_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
Paul Mundt38350e02008-02-13 20:14:10 +0900567 aligned_start = (unsigned long long)(signed long long)(signed long) start;
568 addr = L1_CACHE_ALIGN(aligned_start);
569 ullend = (unsigned long long) (signed long long) (signed long) start + size;
570
571 while (addr < ullend) {
572 __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
573 addr += L1_CACHE_BYTES;
574 }
575}
576
577void __flush_invalidate_region(void *start, int size)
578{
579 unsigned long long ullend, addr, aligned_start;
580
581 aligned_start = (unsigned long long)(signed long long)(signed long) start;
582 addr = L1_CACHE_ALIGN(aligned_start);
583 ullend = (unsigned long long) (signed long long) (signed long) start + size;
584
585 while (addr < ullend) {
586 __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
587 addr += L1_CACHE_BYTES;
588 }
589}
590#endif /* !CONFIG_DCACHE_DISABLED */
591
592/*
593 * Invalidate the entire contents of both caches, after writing back to
594 * memory any dirty data from the D-cache.
595 */
596void flush_cache_all(void)
597{
598 sh64_dcache_purge_all();
599 sh64_icache_inv_all();
600}
601
602/*
603 * Invalidate an entire user-address space from both caches, after
604 * writing back dirty data (e.g. for shared mmap etc).
605 *
606 * This could be coded selectively by inspecting all the tags then
607 * doing 4*alloco on any set containing a match (as for
608 * flush_cache_range), but fork/exit/execve (where this is called from)
609 * are expensive anyway.
610 *
611 * Have to do a purge here, despite the comments re I-cache below.
612 * There could be odd-coloured dirty data associated with the mm still
613 * in the cache - if this gets written out through natural eviction
614 * after the kernel has reused the page there will be chaos.
615 *
616 * The mm being torn down won't ever be active again, so any Icache
617 * lines tagged with its ASID won't be visible for the rest of the
618 * lifetime of this ASID cycle. Before the ASID gets reused, there
619 * will be a flush_cache_all. Hence we don't need to touch the
620 * I-cache. This is similar to the lack of action needed in
621 * flush_tlb_mm - see fault.c.
622 */
623void flush_cache_mm(struct mm_struct *mm)
624{
625 sh64_dcache_purge_all();
626}
627
628/*
629 * Invalidate (from both caches) the range [start,end) of virtual
630 * addresses from the user address space specified by mm, after writing
631 * back any dirty data.
632 *
633 * Note, 'end' is 1 byte beyond the end of the range to flush.
634 */
635void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
636 unsigned long end)
637{
638 struct mm_struct *mm = vma->vm_mm;
639
640 sh64_dcache_purge_user_range(mm, start, end);
641 sh64_icache_inv_user_page_range(mm, start, end);
642}
643
644/*
645 * Invalidate any entries in either cache for the vma within the user
646 * address space vma->vm_mm for the page starting at virtual address
647 * 'eaddr'. This seems to be used primarily in breaking COW. Note,
648 * the I-cache must be searched too in case the page in question is
649 * both writable and being executed from (e.g. stack trampolines.)
650 *
651 * Note, this is called with pte lock held.
652 */
653void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
654 unsigned long pfn)
655{
656 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
657
658 if (vma->vm_flags & VM_EXEC)
659 sh64_icache_inv_user_page(vma, eaddr);
660}
661
662void flush_dcache_page(struct page *page)
663{
664 sh64_dcache_purge_phy_page(page_to_phys(page));
665 wmb();
666}
667
668/*
669 * Flush the range [start,end] of kernel virtual adddress space from
670 * the I-cache. The corresponding range must be purged from the
671 * D-cache also because the SH-5 doesn't have cache snooping between
672 * the caches. The addresses will be visible through the superpage
673 * mapping, therefore it's guaranteed that there no cache entries for
674 * the range in cache sets of the wrong colour.
675 */
676void flush_icache_range(unsigned long start, unsigned long end)
677{
678 __flush_purge_region((void *)start, end);
679 wmb();
680 sh64_icache_inv_kernel_range(start, end);
681}
682
683/*
684 * Flush the range of user (defined by vma->vm_mm) address space starting
685 * at 'addr' for 'len' bytes from the cache. The range does not straddle
686 * a page boundary, the unique physical page containing the range is
687 * 'page'. This seems to be used mainly for invalidating an address
688 * range following a poke into the program text through the ptrace() call
689 * from another process (e.g. for BRK instruction insertion).
690 */
691void flush_icache_user_range(struct vm_area_struct *vma,
692 struct page *page, unsigned long addr, int len)
693{
694
695 sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
696 mb();
697
698 if (vma->vm_flags & VM_EXEC)
699 sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
700}
701
702/*
703 * For the address range [start,end), write back the data from the
704 * D-cache and invalidate the corresponding region of the I-cache for the
705 * current process. Used to flush signal trampolines on the stack to
706 * make them executable.
707 */
708void flush_cache_sigtramp(unsigned long vaddr)
709{
710 unsigned long end = vaddr + L1_CACHE_BYTES;
711
712 __flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
713 wmb();
714 sh64_icache_inv_current_user_range(vaddr, end);
715}
716
Paul Mundtccd80582008-04-25 12:58:40 +0900717#ifdef CONFIG_MMU
Paul Mundt38350e02008-02-13 20:14:10 +0900718/*
719 * These *MUST* lie in an area of virtual address space that's otherwise
720 * unused.
721 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722#define UNIQUE_EADDR_START 0xe0000000UL
723#define UNIQUE_EADDR_END 0xe8000000UL
724
Paul Mundt38350e02008-02-13 20:14:10 +0900725/*
726 * Given a physical address paddr, and a user virtual address user_eaddr
727 * which will eventually be mapped to it, create a one-off kernel-private
728 * eaddr mapped to the same paddr. This is used for creating special
729 * destination pages for copy_user_page and clear_user_page.
730 */
731static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
732 unsigned long paddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 static unsigned long current_pointer = UNIQUE_EADDR_START;
735 unsigned long coloured_pointer;
736
737 if (current_pointer == UNIQUE_EADDR_END) {
738 sh64_dcache_purge_all();
739 current_pointer = UNIQUE_EADDR_START;
740 }
741
Paul Mundt38350e02008-02-13 20:14:10 +0900742 coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
743 (user_eaddr & CACHE_OC_SYN_MASK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
745
746 current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
747
748 return coloured_pointer;
749}
750
Paul Mundt38350e02008-02-13 20:14:10 +0900751static void sh64_copy_user_page_coloured(void *to, void *from,
752 unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753{
754 void *coloured_to;
755
Paul Mundt38350e02008-02-13 20:14:10 +0900756 /*
757 * Discard any existing cache entries of the wrong colour. These are
758 * present quite often, if the kernel has recently used the page
759 * internally, then given it up, then it's been allocated to the user.
760 */
761 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Paul Mundt38350e02008-02-13 20:14:10 +0900763 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
764 copy_page(from, coloured_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 sh64_teardown_dtlb_cache_slot();
767}
768
769static void sh64_clear_user_page_coloured(void *to, unsigned long address)
770{
771 void *coloured_to;
772
Paul Mundt38350e02008-02-13 20:14:10 +0900773 /*
774 * Discard any existing kernel-originated lines of the wrong
775 * colour (as above)
776 */
777 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
Paul Mundt38350e02008-02-13 20:14:10 +0900779 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
780 clear_page(coloured_to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 sh64_teardown_dtlb_cache_slot();
783}
784
Paul Mundt38350e02008-02-13 20:14:10 +0900785/*
786 * 'from' and 'to' are kernel virtual addresses (within the superpage
787 * mapping of the physical RAM). 'address' is the user virtual address
788 * where the copy 'to' will be mapped after. This allows a custom
789 * mapping to be used to ensure that the new copy is placed in the
790 * right cache sets for the user to see it without having to bounce it
791 * out via memory. Note however : the call to flush_page_to_ram in
792 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
793 * very important case!
794 *
795 * TBD : can we guarantee that on every call, any cache entries for
796 * 'from' are in the same colour sets as 'address' also? i.e. is this
797 * always used just to deal with COW? (I suspect not).
798 *
799 * There are two possibilities here for when the page 'from' was last accessed:
800 * - by the kernel : this is OK, no purge required.
801 * - by the/a user (e.g. for break_COW) : need to purge.
802 *
803 * If the potential user mapping at 'address' is the same colour as
804 * 'from' there is no need to purge any cache lines from the 'from'
805 * page mapped into cache sets of colour 'address'. (The copy will be
806 * accessing the page through 'from').
807 */
808void copy_user_page(void *to, void *from, unsigned long address,
809 struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810{
Paul Mundt38350e02008-02-13 20:14:10 +0900811 if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 sh64_dcache_purge_coloured_phy_page(__pa(from), address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
Paul Mundt38350e02008-02-13 20:14:10 +0900814 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
815 copy_page(to, from);
816 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 sh64_copy_user_page_coloured(to, from, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818}
819
Paul Mundt38350e02008-02-13 20:14:10 +0900820/*
821 * 'to' is a kernel virtual address (within the superpage mapping of the
822 * physical RAM). 'address' is the user virtual address where the 'to'
823 * page will be mapped after. This allows a custom mapping to be used to
824 * ensure that the new copy is placed in the right cache sets for the
825 * user to see it without having to bounce it out via memory.
826 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827void clear_user_page(void *to, unsigned long address, struct page *page)
828{
Paul Mundt38350e02008-02-13 20:14:10 +0900829 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
830 clear_page(to);
831 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 sh64_clear_user_page_coloured(to, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833}
Paul Mundt0dfae7d2009-07-27 21:30:17 +0900834
835void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
836 unsigned long vaddr, void *dst, const void *src,
837 unsigned long len)
838{
839 flush_cache_page(vma, vaddr, page_to_pfn(page));
840 memcpy(dst, src, len);
841 flush_icache_user_range(vma, page, vaddr, len);
842}
843
844void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
845 unsigned long vaddr, void *dst, const void *src,
846 unsigned long len)
847{
848 flush_cache_page(vma, vaddr, page_to_pfn(page));
849 memcpy(dst, src, len);
850}
Paul Mundtccd80582008-04-25 12:58:40 +0900851#endif