blob: 6e77c042d8e9417ad5b9141c6eab37767693e3ed [file] [log] [blame]
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +02001/*
2 * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
3 *
4 * Copyright (C) 2008 Marvell Semiconductor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 *
10 * References:
11 * - Unified Layer 2 Cache for Feroceon CPU Cores,
12 * Document ID MV-S104858-00, Rev. A, October 23 2007.
13 */
14
15#include <linux/init.h>
16#include <asm/cacheflush.h>
Nicolas Pitre1bb77262008-09-12 16:11:51 -040017#include <asm/kmap_types.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Lennert Buytenhek6f088f12008-08-09 13:44:58 +020021#include <plat/cache-feroceon-l2.h>
Nicolas Pitre1bb77262008-09-12 16:11:51 -040022#include "mm.h"
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020023
24/*
25 * Low-level cache maintenance operations.
26 *
27 * As well as the regular 'clean/invalidate/flush L2 cache line by
28 * MVA' instructions, the Feroceon L2 cache controller also features
29 * 'clean/invalidate L2 range by MVA' operations.
30 *
31 * Cache range operations are initiated by writing the start and
32 * end addresses to successive cp15 registers, and process every
33 * cache line whose first byte address lies in the inclusive range
34 * [start:end].
35 *
36 * The cache range operations stall the CPU pipeline until completion.
37 *
38 * The range operations require two successive cp15 writes, in
39 * between which we don't want to be preempted.
40 */
Nicolas Pitre1bb77262008-09-12 16:11:51 -040041
42static inline unsigned long l2_start_va(unsigned long paddr)
43{
44#ifdef CONFIG_HIGHMEM
45 /*
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of
51 * interrupts by the caller.
52 */
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
58#else
59 return __phys_to_virt(paddr);
60#endif
61}
62
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020063static inline void l2_clean_pa(unsigned long addr)
64{
65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
66}
67
Nicolas Pitre1bb77262008-09-12 16:11:51 -040068static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020069{
Nicolas Pitre1bb77262008-09-12 16:11:51 -040070 unsigned long va_start, va_end, flags;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020071
72 /*
73 * Make sure 'start' and 'end' reference the same page, as
74 * L2 is PIPT and range operations only do a TLB lookup on
75 * the start address.
76 */
Nicolas Pitre99c6bb32008-09-11 15:14:59 -040077 BUG_ON((start ^ end) >> PAGE_SHIFT);
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020078
79 raw_local_irq_save(flags);
Nicolas Pitre1bb77262008-09-12 16:11:51 -040080 va_start = l2_start_va(start);
81 va_end = va_start + (end - start);
Nicolas Pitre99c6bb32008-09-11 15:14:59 -040082 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
83 "mcr p15, 1, %1, c15, c9, 5"
Nicolas Pitre1bb77262008-09-12 16:11:51 -040084 : : "r" (va_start), "r" (va_end));
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020085 raw_local_irq_restore(flags);
86}
87
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020088static inline void l2_clean_inv_pa(unsigned long addr)
89{
90 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
91}
92
93static inline void l2_inv_pa(unsigned long addr)
94{
95 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
96}
97
Nicolas Pitre1bb77262008-09-12 16:11:51 -040098static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +020099{
Nicolas Pitre1bb77262008-09-12 16:11:51 -0400100 unsigned long va_start, va_end, flags;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200101
102 /*
103 * Make sure 'start' and 'end' reference the same page, as
104 * L2 is PIPT and range operations only do a TLB lookup on
105 * the start address.
106 */
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400107 BUG_ON((start ^ end) >> PAGE_SHIFT);
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200108
109 raw_local_irq_save(flags);
Nicolas Pitre1bb77262008-09-12 16:11:51 -0400110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start);
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
113 "mcr p15, 1, %1, c15, c11, 5"
Nicolas Pitre1bb77262008-09-12 16:11:51 -0400114 : : "r" (va_start), "r" (va_end));
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200115 raw_local_irq_restore(flags);
116}
117
Maxime Bizond75de082009-03-27 18:42:19 +0100118static inline void l2_inv_all(void)
119{
120 __asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
121}
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200122
123/*
124 * Linux primitives.
125 *
126 * Note that the end addresses passed to Linux primitives are
127 * noninclusive, while the hardware cache range operations use
128 * inclusive start and end addresses.
129 */
130#define CACHE_LINE_SIZE 32
131#define MAX_RANGE_SIZE 1024
132
133static int l2_wt_override;
134
135static unsigned long calc_range_end(unsigned long start, unsigned long end)
136{
137 unsigned long range_end;
138
139 BUG_ON(start & (CACHE_LINE_SIZE - 1));
140 BUG_ON(end & (CACHE_LINE_SIZE - 1));
141
142 /*
143 * Try to process all cache lines between 'start' and 'end'.
144 */
145 range_end = end;
146
147 /*
148 * Limit the number of cache lines processed at once,
149 * since cache range operations stall the CPU pipeline
150 * until completion.
151 */
152 if (range_end > start + MAX_RANGE_SIZE)
153 range_end = start + MAX_RANGE_SIZE;
154
155 /*
156 * Cache range operations can't straddle a page boundary.
157 */
158 if (range_end > (start | (PAGE_SIZE - 1)) + 1)
159 range_end = (start | (PAGE_SIZE - 1)) + 1;
160
161 return range_end;
162}
163
164static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
165{
166 /*
167 * Clean and invalidate partial first cache line.
168 */
169 if (start & (CACHE_LINE_SIZE - 1)) {
170 l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
171 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
172 }
173
174 /*
175 * Clean and invalidate partial last cache line.
176 */
Nicolas Pitre72bc2b12008-11-08 21:15:53 +0100177 if (start < end && end & (CACHE_LINE_SIZE - 1)) {
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200178 l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
179 end &= ~(CACHE_LINE_SIZE - 1);
180 }
181
182 /*
183 * Invalidate all full cache lines between 'start' and 'end'.
184 */
Nicolas Pitre72bc2b12008-11-08 21:15:53 +0100185 while (start < end) {
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200186 unsigned long range_end = calc_range_end(start, end);
187 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
188 start = range_end;
189 }
190
191 dsb();
192}
193
194static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
195{
196 /*
197 * If L2 is forced to WT, the L2 will always be clean and we
198 * don't need to do anything here.
199 */
200 if (!l2_wt_override) {
201 start &= ~(CACHE_LINE_SIZE - 1);
202 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
203 while (start != end) {
204 unsigned long range_end = calc_range_end(start, end);
205 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
206 start = range_end;
207 }
208 }
209
210 dsb();
211}
212
213static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
214{
215 start &= ~(CACHE_LINE_SIZE - 1);
216 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
217 while (start != end) {
218 unsigned long range_end = calc_range_end(start, end);
219 if (!l2_wt_override)
220 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
221 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
222 start = range_end;
223 }
224
225 dsb();
226}
227
228
229/*
230 * Routines to disable and re-enable the D-cache and I-cache at run
231 * time. These are necessary because the L2 cache can only be enabled
232 * or disabled while the L1 Dcache and Icache are both disabled.
233 */
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400234static int __init flush_and_disable_dcache(void)
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200235{
236 u32 cr;
237
238 cr = get_cr();
239 if (cr & CR_C) {
240 unsigned long flags;
241
242 raw_local_irq_save(flags);
243 flush_cache_all();
244 set_cr(cr & ~CR_C);
245 raw_local_irq_restore(flags);
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400246 return 1;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200247 }
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400248 return 0;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200249}
250
251static void __init enable_dcache(void)
252{
253 u32 cr;
254
255 cr = get_cr();
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400256 set_cr(cr | CR_C);
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200257}
258
259static void __init __invalidate_icache(void)
260{
Nicolas Pitref0003282009-03-27 14:22:26 -0400261 __asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200262}
263
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400264static int __init invalidate_and_disable_icache(void)
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200265{
266 u32 cr;
267
268 cr = get_cr();
269 if (cr & CR_I) {
270 set_cr(cr & ~CR_I);
271 __invalidate_icache();
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400272 return 1;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200273 }
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400274 return 0;
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200275}
276
277static void __init enable_icache(void)
278{
279 u32 cr;
280
281 cr = get_cr();
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400282 set_cr(cr | CR_I);
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200283}
284
285static inline u32 read_extra_features(void)
286{
287 u32 u;
288
289 __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
290
291 return u;
292}
293
294static inline void write_extra_features(u32 u)
295{
296 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
297}
298
299static void __init disable_l2_prefetch(void)
300{
301 u32 u;
302
303 /*
304 * Read the CPU Extra Features register and verify that the
305 * Disable L2 Prefetch bit is set.
306 */
307 u = read_extra_features();
308 if (!(u & 0x01000000)) {
309 printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n");
310 write_extra_features(u | 0x01000000);
311 }
312}
313
314static void __init enable_l2(void)
315{
316 u32 u;
317
318 u = read_extra_features();
319 if (!(u & 0x00400000)) {
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400320 int i, d;
321
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200322 printk(KERN_INFO "Feroceon L2: Enabling L2\n");
323
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400324 d = flush_and_disable_dcache();
325 i = invalidate_and_disable_icache();
Maxime Bizond75de082009-03-27 18:42:19 +0100326 l2_inv_all();
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200327 write_extra_features(u | 0x00400000);
Nicolas Pitre99c6bb32008-09-11 15:14:59 -0400328 if (i)
329 enable_icache();
330 if (d)
331 enable_dcache();
Lennert Buytenhek99c6dc12008-06-22 22:45:04 +0200332 }
333}
334
335void __init feroceon_l2_init(int __l2_wt_override)
336{
337 l2_wt_override = __l2_wt_override;
338
339 disable_l2_prefetch();
340
341 outer_cache.inv_range = feroceon_l2_inv_range;
342 outer_cache.clean_range = feroceon_l2_clean_range;
343 outer_cache.flush_range = feroceon_l2_flush_range;
344
345 enable_l2();
346
347 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
348 l2_wt_override ? ", in WT override mode" : "");
349}