blob: 16bad7c0a63f876ec0555c247607512bae82dc8a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5 * Copyright (C) 2004 Maciej W. Rozycki
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/init.h>
22
23#include <asm/asm.h>
24#include <asm/bootinfo.h>
25#include <asm/cacheops.h>
26#include <asm/cpu.h>
27#include <asm/mipsregs.h>
28#include <asm/mmu_context.h>
29#include <asm/uaccess.h>
30
31extern void sb1_dma_init(void);
32
33/* These are probed at ld_mmu time */
34static unsigned long icache_size;
35static unsigned long dcache_size;
36
37static unsigned short icache_line_size;
38static unsigned short dcache_line_size;
39
40static unsigned int icache_index_mask;
41static unsigned int dcache_index_mask;
42
43static unsigned short icache_assoc;
44static unsigned short dcache_assoc;
45
46static unsigned short icache_sets;
47static unsigned short dcache_sets;
48
49static unsigned int icache_range_cutoff;
50static unsigned int dcache_range_cutoff;
51
52/*
53 * The dcache is fully coherent to the system, with one
54 * big caveat: the instruction stream. In other words,
55 * if we miss in the icache, and have dirty data in the
56 * L1 dcache, then we'll go out to memory (or the L2) and
57 * get the not-as-recent data.
58 *
59 * So the only time we have to flush the dcache is when
60 * we're flushing the icache. Since the L2 is fully
61 * coherent to everything, including I/O, we never have
62 * to flush it
63 */
64
65#define cache_set_op(op, addr) \
66 __asm__ __volatile__( \
67 " .set noreorder \n" \
68 " .set mips64\n\t \n" \
69 " cache %0, (0<<13)(%1) \n" \
70 " cache %0, (1<<13)(%1) \n" \
71 " cache %0, (2<<13)(%1) \n" \
72 " cache %0, (3<<13)(%1) \n" \
73 " .set mips0 \n" \
74 " .set reorder" \
75 : \
76 : "i" (op), "r" (addr))
77
78#define sync() \
79 __asm__ __volatile( \
80 " .set mips64\n\t \n" \
81 " sync \n" \
82 " .set mips0")
83
84#define mispredict() \
85 __asm__ __volatile__( \
86 " bnezl $0, 1f \n" /* Force mispredict */ \
87 "1: \n");
88
89/*
90 * Writeback and invalidate the entire dcache
91 */
92static inline void __sb1_writeback_inv_dcache_all(void)
93{
94 unsigned long addr = 0;
95
96 while (addr < dcache_line_size * dcache_sets) {
97 cache_set_op(Index_Writeback_Inv_D, addr);
98 addr += dcache_line_size;
99 }
100}
101
102/*
103 * Writeback and invalidate a range of the dcache. The addresses are
104 * virtual, and since we're using index ops and bit 12 is part of both
105 * the virtual frame and physical index, we have to clear both sets
106 * (bit 12 set and cleared).
107 */
108static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
109 unsigned long end)
110{
111 unsigned long index;
112
113 start &= ~(dcache_line_size - 1);
114 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
115
116 while (start != end) {
117 index = start & dcache_index_mask;
118 cache_set_op(Index_Writeback_Inv_D, index);
119 cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12));
120 start += dcache_line_size;
121 }
122 sync();
123}
124
125/*
126 * Writeback and invalidate a range of the dcache. With physical
127 * addresseses, we don't have to worry about possible bit 12 aliasing.
128 * XXXKW is it worth turning on KX and using hit ops with xkphys?
129 */
130static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
131 unsigned long end)
132{
133 start &= ~(dcache_line_size - 1);
134 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
135
136 while (start != end) {
137 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask);
138 start += dcache_line_size;
139 }
140 sync();
141}
142
143
144/*
145 * Invalidate the entire icache
146 */
147static inline void __sb1_flush_icache_all(void)
148{
149 unsigned long addr = 0;
150
151 while (addr < icache_line_size * icache_sets) {
152 cache_set_op(Index_Invalidate_I, addr);
153 addr += icache_line_size;
154 }
155}
156
157/*
Atsushi Nemotof6502792006-08-25 17:55:31 +0900158 * Invalidate a range of the icache. The addresses are virtual, and
159 * the cache is virtually indexed and tagged. However, we don't
160 * necessarily have the right ASID context, so use index ops instead
161 * of hit ops.
162 */
163static inline void __sb1_flush_icache_range(unsigned long start,
164 unsigned long end)
165{
166 start &= ~(icache_line_size - 1);
167 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
168
169 while (start != end) {
170 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
171 start += icache_line_size;
172 }
173 mispredict();
174 sync();
175}
176
177/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 * Flush the icache for a given physical page. Need to writeback the
179 * dcache first, then invalidate the icache. If the page isn't
180 * executable, nothing is required.
181 */
182static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
183{
184 int cpu = smp_processor_id();
185
186#ifndef CONFIG_SMP
187 if (!(vma->vm_flags & VM_EXEC))
188 return;
189#endif
190
191 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
192
193 /*
194 * Bumping the ASID is probably cheaper than the flush ...
195 */
Atsushi Nemotof6502792006-08-25 17:55:31 +0900196 if (vma->vm_mm == current->active_mm) {
197 if (cpu_context(cpu, vma->vm_mm) != 0)
198 drop_mmu_context(vma->vm_mm, cpu);
199 } else
200 __sb1_flush_icache_range(addr, addr + PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
203#ifdef CONFIG_SMP
204struct flush_cache_page_args {
205 struct vm_area_struct *vma;
206 unsigned long addr;
207 unsigned long pfn;
208};
209
210static void sb1_flush_cache_page_ipi(void *info)
211{
212 struct flush_cache_page_args *args = info;
213
214 local_sb1_flush_cache_page(args->vma, args->addr, args->pfn);
215}
216
217/* Dirty dcache could be on another CPU, so do the IPIs */
218static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
219{
220 struct flush_cache_page_args args;
221
222 if (!(vma->vm_flags & VM_EXEC))
223 return;
224
225 addr &= PAGE_MASK;
226 args.vma = vma;
227 args.addr = addr;
228 args.pfn = pfn;
229 on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
230}
231#else
232void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
233 __attribute__((alias("local_sb1_flush_cache_page")));
234#endif
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237/*
238 * Invalidate all caches on this CPU
239 */
Ralf Baechle77c728c2005-03-04 19:36:51 +0000240static void __attribute_used__ local_sb1___flush_cache_all(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 __sb1_writeback_inv_dcache_all();
243 __sb1_flush_icache_all();
244}
245
246#ifdef CONFIG_SMP
247void sb1___flush_cache_all_ipi(void *ignored)
248 __attribute__((alias("local_sb1___flush_cache_all")));
249
250static void sb1___flush_cache_all(void)
251{
252 on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
253}
254#else
255void sb1___flush_cache_all(void)
256 __attribute__((alias("local_sb1___flush_cache_all")));
257#endif
258
259/*
260 * When flushing a range in the icache, we have to first writeback
261 * the dcache for the same range, so new ifetches will see any
262 * data that was dirty in the dcache.
263 *
264 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
265 */
266
267static void local_sb1_flush_icache_range(unsigned long start,
268 unsigned long end)
269{
270 /* Just wb-inv the whole dcache if the range is big enough */
271 if ((end - start) > dcache_range_cutoff)
272 __sb1_writeback_inv_dcache_all();
273 else
274 __sb1_writeback_inv_dcache_range(start, end);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 /* Just flush the whole icache if the range is big enough */
277 if ((end - start) > icache_range_cutoff)
278 __sb1_flush_icache_all();
279 else
280 __sb1_flush_icache_range(start, end);
281}
282
283#ifdef CONFIG_SMP
284struct flush_icache_range_args {
285 unsigned long start;
286 unsigned long end;
287};
288
289static void sb1_flush_icache_range_ipi(void *info)
290{
291 struct flush_icache_range_args *args = info;
292
293 local_sb1_flush_icache_range(args->start, args->end);
294}
295
296void sb1_flush_icache_range(unsigned long start, unsigned long end)
297{
298 struct flush_icache_range_args args;
299
300 args.start = start;
301 args.end = end;
302 on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
303}
304#else
305void sb1_flush_icache_range(unsigned long start, unsigned long end)
306 __attribute__((alias("local_sb1_flush_icache_range")));
307#endif
308
309/*
310 * Flush the icache for a given physical page. Need to writeback the
311 * dcache first, then invalidate the icache. If the page isn't
312 * executable, nothing is required.
313 */
314static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
315 struct page *page)
316{
317 unsigned long start;
318 int cpu = smp_processor_id();
319
320#ifndef CONFIG_SMP
321 if (!(vma->vm_flags & VM_EXEC))
322 return;
323#endif
324
325 /* Need to writeback any dirty data for that page, we have the PA */
326 start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
327 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
328 /*
329 * If there's a context, bump the ASID (cheaper than a flush,
330 * since we don't know VAs!)
331 */
Atsushi Nemotof6502792006-08-25 17:55:31 +0900332 if (vma->vm_mm == current->active_mm) {
333 if (cpu_context(cpu, vma->vm_mm) != 0)
334 drop_mmu_context(vma->vm_mm, cpu);
335 } else
336 __sb1_flush_icache_range(start, start + PAGE_SIZE);
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
340#ifdef CONFIG_SMP
341struct flush_icache_page_args {
342 struct vm_area_struct *vma;
343 struct page *page;
344};
345
346static void sb1_flush_icache_page_ipi(void *info)
347{
348 struct flush_icache_page_args *args = info;
349 local_sb1_flush_icache_page(args->vma, args->page);
350}
351
352/* Dirty dcache could be on another CPU, so do the IPIs */
353static void sb1_flush_icache_page(struct vm_area_struct *vma,
354 struct page *page)
355{
356 struct flush_icache_page_args args;
357
358 if (!(vma->vm_flags & VM_EXEC))
359 return;
360 args.vma = vma;
361 args.page = page;
362 on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
363}
364#else
365void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page)
366 __attribute__((alias("local_sb1_flush_icache_page")));
367#endif
368
369/*
370 * A signal trampoline must fit into a single cacheline.
371 */
372static void local_sb1_flush_cache_sigtramp(unsigned long addr)
373{
374 cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask);
375 cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask);
376 cache_set_op(Index_Invalidate_I, addr & icache_index_mask);
377 mispredict();
378}
379
380#ifdef CONFIG_SMP
381static void sb1_flush_cache_sigtramp_ipi(void *info)
382{
383 unsigned long iaddr = (unsigned long) info;
384 local_sb1_flush_cache_sigtramp(iaddr);
385}
386
387static void sb1_flush_cache_sigtramp(unsigned long addr)
388{
389 on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
390}
391#else
392void sb1_flush_cache_sigtramp(unsigned long addr)
393 __attribute__((alias("local_sb1_flush_cache_sigtramp")));
394#endif
395
396
397/*
398 * Anything that just flushes dcache state can be ignored, as we're always
399 * coherent in dcache space. This is just a dummy function that all the
400 * nop'ed routines point to
401 */
402static void sb1_nop(void)
403{
404}
405
406/*
407 * Cache set values (from the mips64 spec)
408 * 0 - 64
409 * 1 - 128
410 * 2 - 256
411 * 3 - 512
412 * 4 - 1024
413 * 5 - 2048
414 * 6 - 4096
415 * 7 - Reserved
416 */
417
418static unsigned int decode_cache_sets(unsigned int config_field)
419{
420 if (config_field == 7) {
421 /* JDCXXX - Find a graceful way to abort. */
422 return 0;
423 }
424 return (1<<(config_field + 6));
425}
426
427/*
428 * Cache line size values (from the mips64 spec)
429 * 0 - No cache present.
430 * 1 - 4 bytes
431 * 2 - 8 bytes
432 * 3 - 16 bytes
433 * 4 - 32 bytes
434 * 5 - 64 bytes
435 * 6 - 128 bytes
436 * 7 - Reserved
437 */
438
439static unsigned int decode_cache_line_size(unsigned int config_field)
440{
441 if (config_field == 0) {
442 return 0;
443 } else if (config_field == 7) {
444 /* JDCXXX - Find a graceful way to abort. */
445 return 0;
446 }
447 return (1<<(config_field + 1));
448}
449
450/*
451 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
452 *
453 * 24:22 Icache sets per way
454 * 21:19 Icache line size
455 * 18:16 Icache Associativity
456 * 15:13 Dcache sets per way
457 * 12:10 Dcache line size
458 * 9:7 Dcache Associativity
459 */
460
461static char *way_string[] = {
462 "direct mapped", "2-way", "3-way", "4-way",
463 "5-way", "6-way", "7-way", "8-way",
464};
465
466static __init void probe_cache_sizes(void)
467{
468 u32 config1;
469
470 config1 = read_c0_config1();
471 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
472 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
473 icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
474 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
475 icache_assoc = ((config1 >> 16) & 0x7) + 1;
476 dcache_assoc = ((config1 >> 7) & 0x7) + 1;
477 icache_size = icache_line_size * icache_sets * icache_assoc;
478 dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
479 /* Need to remove non-index bits for index ops */
480 icache_index_mask = (icache_sets - 1) * icache_line_size;
481 dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
482 /*
483 * These are for choosing range (index ops) versus all.
484 * icache flushes all ways for each set, so drop icache_assoc.
485 * dcache flushes all ways and each setting of bit 12 for each
486 * index, so drop dcache_assoc and halve the dcache_sets.
487 */
488 icache_range_cutoff = icache_sets * icache_line_size;
489 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
490
491 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
492 icache_size >> 10, way_string[icache_assoc - 1],
493 icache_line_size);
494 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
495 dcache_size >> 10, way_string[dcache_assoc - 1],
496 dcache_line_size);
497}
498
499/*
Andrew Isaacson46dc3a42005-06-22 16:02:03 -0700500 * This is called from cache.c. We have to set up all the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 * memory management function pointers, as well as initialize
502 * the caches and tlbs
503 */
Ralf Baechle02cf2112005-10-01 13:06:32 +0100504void sb1_cache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
506 extern char except_vec2_sb1;
507 extern char handle_vec2_sb1;
508
509 /* Special cache error handler for SB1 */
Ralf Baechlee01402b2005-07-14 15:57:16 +0000510 set_uncached_handler (0x100, &except_vec2_sb1, 0x80);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512 probe_cache_sizes();
513
514#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
515 sb1_dma_init();
516#endif
517
518 /*
519 * None of these are needed for the SB1 - the Dcache is
520 * physically indexed and tagged, so no virtual aliasing can
521 * occur
522 */
523 flush_cache_range = (void *) sb1_nop;
524 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
525 flush_cache_all = sb1_nop;
526
527 /* These routines are for Icache coherence with the Dcache */
528 flush_icache_range = sb1_flush_icache_range;
Ralf Baechle585fa722006-08-12 16:40:08 +0100529 __flush_icache_page = sb1_flush_icache_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 flush_icache_all = __sb1_flush_icache_all; /* local only */
531
532 /* This implies an Icache flush too, so can't be nop'ed */
533 flush_cache_page = sb1_flush_cache_page;
534
535 flush_cache_sigtramp = sb1_flush_cache_sigtramp;
Ralf Baechle7e3bfc72006-04-05 20:42:04 +0100536 local_flush_data_cache_page = (void *) sb1_nop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 flush_data_cache_page = (void *) sb1_nop;
538
539 /* Full flush */
540 __flush_cache_all = sb1___flush_cache_all;
541
542 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
543
544 /*
545 * This is the only way to force the update of K0 to complete
546 * before subsequent instruction fetch.
547 */
548 __asm__ __volatile__(
549 ".set push \n"
550 " .set noat \n"
551 " .set noreorder \n"
552 " .set mips3 \n"
553 " " STR(PTR_LA) " $1, 1f \n"
554 " " STR(MTC0) " $1, $14 \n"
555 " eret \n"
556 "1: .set pop"
557 :
558 :
559 : "memory");
560
561 flush_cache_all();
562}