blob: b4dd2f4b491b7d32385fbda3dd80a1ef536b4067 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King051334b2014-03-15 23:04:10 +000032 const char *type;
Russell King3b8bad52014-03-15 16:47:57 +000033 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000034 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000035 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000036 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000037 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000038 struct outer_cache_fns outer_cache;
39};
40
Catalin Marinas382266a2007-02-05 14:48:19 +010041#define CACHE_LINE_SIZE 32
42
43static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050044static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010045static u32 l2x0_way_mask; /* Bitmask of active ways */
46static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010047static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010048
Barry Song91c2ebb2011-09-30 14:43:12 +010049struct l2x0_regs l2x0_saved_regs;
50
Russell King37abcdb2014-03-15 16:47:50 +000051/*
52 * Common code for all cache controllers.
53 */
Russell King83841fe2014-03-15 16:48:14 +000054static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010055{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010056 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010057 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010058 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010059}
60
Russell King2b2a87a2014-03-16 17:19:21 +000061/*
62 * This should only be called when we have a requirement that the
63 * register be written due to a work-around, as platforms running
64 * in non-secure mode may not be able to access this register.
65 */
66static inline void l2c_set_debug(void __iomem *base, unsigned long val)
67{
68 outer_cache.set_debug(val);
69}
70
Russell Kingdf5dd4c2014-03-15 16:47:56 +000071static void __l2c_op_way(void __iomem *reg)
72{
73 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000074 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000075}
76
Russell King37abcdb2014-03-15 16:47:50 +000077static inline void l2c_unlock(void __iomem *base, unsigned num)
78{
79 unsigned i;
80
81 for (i = 0; i < num; i++) {
82 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
83 i * L2X0_LOCKDOWN_STRIDE);
84 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
85 i * L2X0_LOCKDOWN_STRIDE);
86 }
87}
88
Russell King3b8bad52014-03-15 16:47:57 +000089/*
90 * Enable the L2 cache controller. This function must only be
91 * called when the cache controller is known to be disabled.
92 */
93static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
94{
95 unsigned long flags;
96
Russell King9a07f272014-03-17 20:10:31 +000097 /* Only write the aux register if it needs changing */
98 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
99 writel_relaxed(aux, base + L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000100
Russell King17f3f992014-03-17 17:15:02 +0000101 l2c_unlock(base, num_lock);
102
Russell King3b8bad52014-03-15 16:47:57 +0000103 local_irq_save(flags);
104 __l2c_op_way(base + L2X0_INV_WAY);
105 writel_relaxed(0, base + sync_reg_offset);
106 l2c_wait_mask(base + sync_reg_offset, 1);
107 local_irq_restore(flags);
108
109 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
110}
111
112static void l2c_disable(void)
113{
114 void __iomem *base = l2x0_base;
115
116 outer_cache.flush_all();
117 writel_relaxed(0, base + L2X0_CTRL);
118 dsb(st);
119}
120
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100121#ifdef CONFIG_CACHE_PL310
122static inline void cache_wait(void __iomem *reg, unsigned long mask)
123{
124 /* cache operations by line are atomic on PL310 */
125}
126#else
Russell King83841fe2014-03-15 16:48:14 +0000127#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100128#endif
129
Catalin Marinas382266a2007-02-05 14:48:19 +0100130static inline void cache_sync(void)
131{
Russell King3d107432009-11-19 11:41:09 +0000132 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100133
Will Deaconf154fe92012-04-20 17:21:08 +0100134 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000135 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100136}
137
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100138#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100139static inline void debug_writel(unsigned long val)
140{
141 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000142 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100143}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100144#else
145/* Optimised out for non-errata case */
146static inline void debug_writel(unsigned long val)
147{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100148}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100149#endif
150
Catalin Marinas23107c52010-03-24 16:48:53 +0100151static void l2x0_cache_sync(void)
152{
153 unsigned long flags;
154
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500155 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100156 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500157 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100158}
159
Will Deacon38a89142011-07-01 14:36:19 +0100160static void __l2x0_flush_all(void)
161{
162 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000163 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100164 cache_sync();
165 debug_writel(0x00);
166}
167
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530168static void l2x0_flush_all(void)
169{
170 unsigned long flags;
171
172 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500173 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100174 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500175 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530176}
177
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530178static void l2x0_disable(void)
179{
180 unsigned long flags;
181
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500182 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100183 __l2x0_flush_all();
184 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100185 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500186 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530187}
188
Russell King75461f52014-03-15 16:48:07 +0000189/*
Russell King6a28cf52014-03-15 18:55:53 +0000190 * L2C-210 specific code.
191 *
192 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
193 * ensure that no background operation is running. The way operations
194 * are all background tasks.
195 *
196 * While a background operation is in progress, any new operation is
197 * ignored (unspecified whether this causes an error.) Thankfully, not
198 * used on SMP.
199 *
200 * Never has a different sync register other than L2X0_CACHE_SYNC, but
201 * we use sync_reg_offset here so we can share some of this with L2C-310.
202 */
203static void __l2c210_cache_sync(void __iomem *base)
204{
205 writel_relaxed(0, base + sync_reg_offset);
206}
207
208static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
209 unsigned long end)
210{
211 while (start < end) {
212 writel_relaxed(start, reg);
213 start += CACHE_LINE_SIZE;
214 }
215}
216
217static void l2c210_inv_range(unsigned long start, unsigned long end)
218{
219 void __iomem *base = l2x0_base;
220
221 if (start & (CACHE_LINE_SIZE - 1)) {
222 start &= ~(CACHE_LINE_SIZE - 1);
223 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
224 start += CACHE_LINE_SIZE;
225 }
226
227 if (end & (CACHE_LINE_SIZE - 1)) {
228 end &= ~(CACHE_LINE_SIZE - 1);
229 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
230 }
231
232 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
233 __l2c210_cache_sync(base);
234}
235
236static void l2c210_clean_range(unsigned long start, unsigned long end)
237{
238 void __iomem *base = l2x0_base;
239
240 start &= ~(CACHE_LINE_SIZE - 1);
241 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
242 __l2c210_cache_sync(base);
243}
244
245static void l2c210_flush_range(unsigned long start, unsigned long end)
246{
247 void __iomem *base = l2x0_base;
248
249 start &= ~(CACHE_LINE_SIZE - 1);
250 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
251 __l2c210_cache_sync(base);
252}
253
254static void l2c210_flush_all(void)
255{
256 void __iomem *base = l2x0_base;
257
258 BUG_ON(!irqs_disabled());
259
260 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
261 __l2c210_cache_sync(base);
262}
263
264static void l2c210_sync(void)
265{
266 __l2c210_cache_sync(l2x0_base);
267}
268
269static void l2c210_resume(void)
270{
271 void __iomem *base = l2x0_base;
272
273 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
274 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
275}
276
277static const struct l2c_init_data l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000278 .type = "L2C-210",
Russell King6a28cf52014-03-15 18:55:53 +0000279 .num_lock = 1,
280 .enable = l2c_enable,
281 .outer_cache = {
282 .inv_range = l2c210_inv_range,
283 .clean_range = l2c210_clean_range,
284 .flush_range = l2c210_flush_range,
285 .flush_all = l2c210_flush_all,
286 .disable = l2c_disable,
287 .sync = l2c210_sync,
288 .resume = l2c210_resume,
289 },
290};
291
292/*
Russell King733c6bb2014-03-15 21:29:28 +0000293 * L2C-220 specific code.
294 *
295 * All operations are background operations: they have to be waited for.
296 * Conflicting requests generate a slave error (which will cause an
297 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
298 * sync register here.
299 *
300 * However, we can re-use the l2c210_resume call.
301 */
302static inline void __l2c220_cache_sync(void __iomem *base)
303{
304 writel_relaxed(0, base + L2X0_CACHE_SYNC);
305 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
306}
307
308static void l2c220_op_way(void __iomem *base, unsigned reg)
309{
310 unsigned long flags;
311
312 raw_spin_lock_irqsave(&l2x0_lock, flags);
313 __l2c_op_way(base + reg);
314 __l2c220_cache_sync(base);
315 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
316}
317
318static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
319 unsigned long end, unsigned long flags)
320{
321 raw_spinlock_t *lock = &l2x0_lock;
322
323 while (start < end) {
324 unsigned long blk_end = start + min(end - start, 4096UL);
325
326 while (start < blk_end) {
327 l2c_wait_mask(reg, 1);
328 writel_relaxed(start, reg);
329 start += CACHE_LINE_SIZE;
330 }
331
332 if (blk_end < end) {
333 raw_spin_unlock_irqrestore(lock, flags);
334 raw_spin_lock_irqsave(lock, flags);
335 }
336 }
337
338 return flags;
339}
340
341static void l2c220_inv_range(unsigned long start, unsigned long end)
342{
343 void __iomem *base = l2x0_base;
344 unsigned long flags;
345
346 raw_spin_lock_irqsave(&l2x0_lock, flags);
347 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
348 if (start & (CACHE_LINE_SIZE - 1)) {
349 start &= ~(CACHE_LINE_SIZE - 1);
350 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
351 start += CACHE_LINE_SIZE;
352 }
353
354 if (end & (CACHE_LINE_SIZE - 1)) {
355 end &= ~(CACHE_LINE_SIZE - 1);
356 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
357 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
358 }
359 }
360
361 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
362 start, end, flags);
363 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
364 __l2c220_cache_sync(base);
365 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
366}
367
368static void l2c220_clean_range(unsigned long start, unsigned long end)
369{
370 void __iomem *base = l2x0_base;
371 unsigned long flags;
372
373 start &= ~(CACHE_LINE_SIZE - 1);
374 if ((end - start) >= l2x0_size) {
375 l2c220_op_way(base, L2X0_CLEAN_WAY);
376 return;
377 }
378
379 raw_spin_lock_irqsave(&l2x0_lock, flags);
380 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
381 start, end, flags);
382 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
383 __l2c220_cache_sync(base);
384 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
385}
386
387static void l2c220_flush_range(unsigned long start, unsigned long end)
388{
389 void __iomem *base = l2x0_base;
390 unsigned long flags;
391
392 start &= ~(CACHE_LINE_SIZE - 1);
393 if ((end - start) >= l2x0_size) {
394 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
395 return;
396 }
397
398 raw_spin_lock_irqsave(&l2x0_lock, flags);
399 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
400 start, end, flags);
401 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
402 __l2c220_cache_sync(base);
403 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
404}
405
406static void l2c220_flush_all(void)
407{
408 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
409}
410
411static void l2c220_sync(void)
412{
413 unsigned long flags;
414
415 raw_spin_lock_irqsave(&l2x0_lock, flags);
416 __l2c220_cache_sync(l2x0_base);
417 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
418}
419
420static const struct l2c_init_data l2c220_data = {
Russell King051334b2014-03-15 23:04:10 +0000421 .type = "L2C-220",
Russell King733c6bb2014-03-15 21:29:28 +0000422 .num_lock = 1,
423 .enable = l2c_enable,
424 .outer_cache = {
425 .inv_range = l2c220_inv_range,
426 .clean_range = l2c220_clean_range,
427 .flush_range = l2c220_flush_range,
428 .flush_all = l2c220_flush_all,
429 .disable = l2c_disable,
430 .sync = l2c220_sync,
431 .resume = l2c210_resume,
432 },
433};
434
435/*
Russell King75461f52014-03-15 16:48:07 +0000436 * L2C-310 specific code.
437 *
Russell Kingf7773322014-03-15 20:51:47 +0000438 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
439 * and the way operations are all background tasks. However, issuing an
440 * operation while a background operation is in progress results in a
441 * SLVERR response. We can reuse:
442 *
443 * __l2c210_cache_sync (using sync_reg_offset)
444 * l2c210_sync
445 * l2c210_inv_range (if 588369 is not applicable)
446 * l2c210_clean_range
447 * l2c210_flush_range (if 588369 is not applicable)
448 * l2c210_flush_all (if 727915 is not applicable)
449 *
Russell King75461f52014-03-15 16:48:07 +0000450 * Errata:
451 * 588369: PL310 R0P0->R1P0, fixed R2P0.
452 * Affects: all clean+invalidate operations
453 * clean and invalidate skips the invalidate step, so we need to issue
454 * separate operations. We also require the above debug workaround
455 * enclosing this code fragment on affected parts. On unaffected parts,
456 * we must not use this workaround without the debug register writes
457 * to avoid exposing a problem similar to 727915.
458 *
459 * 727915: PL310 R2P0->R3P0, fixed R3P1.
460 * Affects: clean+invalidate by way
461 * clean and invalidate by way runs in the background, and a store can
462 * hit the line between the clean operation and invalidate operation,
463 * resulting in the store being lost.
464 *
465 * 753970: PL310 R3P0, fixed R3P1.
466 * Affects: sync
467 * prevents merging writes after the sync operation, until another L2C
468 * operation is performed (or a number of other conditions.)
469 *
470 * 769419: PL310 R0P0->R3P1, fixed R3P2.
471 * Affects: store buffer
472 * store buffer is not automatically drained.
473 */
Russell Kingbda0b742014-03-15 16:48:16 +0000474static void l2c310_set_debug(unsigned long val)
475{
476 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
477}
478
Russell Kingebd4219f2014-03-15 19:08:11 +0000479static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
480{
481 void __iomem *base = l2x0_base;
482
483 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
484 unsigned long flags;
485
486 /* Erratum 588369 for both clean+invalidate operations */
487 raw_spin_lock_irqsave(&l2x0_lock, flags);
488 l2c_set_debug(base, 0x03);
489
490 if (start & (CACHE_LINE_SIZE - 1)) {
491 start &= ~(CACHE_LINE_SIZE - 1);
492 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
493 writel_relaxed(start, base + L2X0_INV_LINE_PA);
494 start += CACHE_LINE_SIZE;
495 }
496
497 if (end & (CACHE_LINE_SIZE - 1)) {
498 end &= ~(CACHE_LINE_SIZE - 1);
499 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
500 writel_relaxed(end, base + L2X0_INV_LINE_PA);
501 }
502
503 l2c_set_debug(base, 0x00);
504 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
505 }
506
507 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
508 __l2c210_cache_sync(base);
509}
510
511static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
512{
513 raw_spinlock_t *lock = &l2x0_lock;
514 unsigned long flags;
515 void __iomem *base = l2x0_base;
516
517 raw_spin_lock_irqsave(lock, flags);
518 while (start < end) {
519 unsigned long blk_end = start + min(end - start, 4096UL);
520
521 l2c_set_debug(base, 0x03);
522 while (start < blk_end) {
523 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
524 writel_relaxed(start, base + L2X0_INV_LINE_PA);
525 start += CACHE_LINE_SIZE;
526 }
527 l2c_set_debug(base, 0x00);
528
529 if (blk_end < end) {
530 raw_spin_unlock_irqrestore(lock, flags);
531 raw_spin_lock_irqsave(lock, flags);
532 }
533 }
534 raw_spin_unlock_irqrestore(lock, flags);
535 __l2c210_cache_sync(base);
536}
537
Russell King99ca17722014-03-15 16:48:18 +0000538static void l2c310_flush_all_erratum(void)
539{
540 void __iomem *base = l2x0_base;
541 unsigned long flags;
542
543 raw_spin_lock_irqsave(&l2x0_lock, flags);
544 l2c_set_debug(base, 0x03);
545 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
546 l2c_set_debug(base, 0x00);
547 __l2c210_cache_sync(base);
548 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
549}
550
Russell King09a5d182014-03-15 16:48:13 +0000551static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000552{
Russell King09a5d182014-03-15 16:48:13 +0000553 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000554
555 l2x0_saved_regs.tag_latency = readl_relaxed(base +
556 L2X0_TAG_LATENCY_CTRL);
557 l2x0_saved_regs.data_latency = readl_relaxed(base +
558 L2X0_DATA_LATENCY_CTRL);
559 l2x0_saved_regs.filter_end = readl_relaxed(base +
560 L2X0_ADDR_FILTER_END);
561 l2x0_saved_regs.filter_start = readl_relaxed(base +
562 L2X0_ADDR_FILTER_START);
563
Russell King09a5d182014-03-15 16:48:13 +0000564 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000565 L2X0_CACHE_ID_RTL_MASK;
566
Russell King09a5d182014-03-15 16:48:13 +0000567 /* From r2p0, there is Prefetch offset/control register */
568 if (revision >= L310_CACHE_ID_RTL_R2P0)
569 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
570 L2X0_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000571
Russell King09a5d182014-03-15 16:48:13 +0000572 /* From r3p0, there is Power control register */
573 if (revision >= L310_CACHE_ID_RTL_R3P0)
574 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
575 L2X0_POWER_CTRL);
576}
577
578static void l2c310_resume(void)
579{
580 void __iomem *base = l2x0_base;
581
582 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
583 unsigned revision;
584
585 /* restore pl310 setup */
586 writel_relaxed(l2x0_saved_regs.tag_latency,
587 base + L2X0_TAG_LATENCY_CTRL);
588 writel_relaxed(l2x0_saved_regs.data_latency,
589 base + L2X0_DATA_LATENCY_CTRL);
590 writel_relaxed(l2x0_saved_regs.filter_end,
591 base + L2X0_ADDR_FILTER_END);
592 writel_relaxed(l2x0_saved_regs.filter_start,
593 base + L2X0_ADDR_FILTER_START);
594
595 revision = readl_relaxed(base + L2X0_CACHE_ID) &
596 L2X0_CACHE_ID_RTL_MASK;
597
598 if (revision >= L310_CACHE_ID_RTL_R2P0)
599 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
600 base + L2X0_PREFETCH_CTRL);
601 if (revision >= L310_CACHE_ID_RTL_R3P0)
602 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
603 base + L2X0_POWER_CTRL);
604
605 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
606 }
Russell Kingb98556f22014-03-15 16:48:11 +0000607}
608
Russell King75461f52014-03-15 16:48:07 +0000609static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
610 struct outer_cache_fns *fns)
611{
612 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
613 const char *errata[4];
614 unsigned n = 0;
615
Russell Kingebd4219f2014-03-15 19:08:11 +0000616 /* For compatibility */
Russell King75461f52014-03-15 16:48:07 +0000617 if (revision <= L310_CACHE_ID_RTL_R3P0)
Russell Kingbda0b742014-03-15 16:48:16 +0000618 fns->set_debug = l2c310_set_debug;
Russell King75461f52014-03-15 16:48:07 +0000619
Russell Kingebd4219f2014-03-15 19:08:11 +0000620 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
621 revision < L310_CACHE_ID_RTL_R2P0 &&
622 /* For bcm compatibility */
Russell Kingf7773322014-03-15 20:51:47 +0000623 fns->inv_range == l2c210_inv_range) {
Russell Kingebd4219f2014-03-15 19:08:11 +0000624 fns->inv_range = l2c310_inv_range_erratum;
625 fns->flush_range = l2c310_flush_range_erratum;
626 errata[n++] = "588369";
627 }
628
Russell King99ca17722014-03-15 16:48:18 +0000629 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
630 revision >= L310_CACHE_ID_RTL_R2P0 &&
631 revision < L310_CACHE_ID_RTL_R3P1) {
632 fns->flush_all = l2c310_flush_all_erratum;
633 errata[n++] = "727915";
634 }
635
Russell King75461f52014-03-15 16:48:07 +0000636 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
637 revision == L310_CACHE_ID_RTL_R3P0) {
638 sync_reg_offset = L2X0_DUMMY_REG;
639 errata[n++] = "753970";
640 }
641
642 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
643 errata[n++] = "769419";
644
645 if (n) {
646 unsigned i;
647
648 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
649 for (i = 0; i < n; i++)
650 pr_cont(" %s", errata[i]);
651 pr_cont(" enabled\n");
652 }
653}
654
655static const struct l2c_init_data l2c310_init_fns __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000656 .type = "L2C-310",
Russell King75461f52014-03-15 16:48:07 +0000657 .num_lock = 8,
658 .enable = l2c_enable,
659 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000660 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000661 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000662 .inv_range = l2c210_inv_range,
663 .clean_range = l2c210_clean_range,
664 .flush_range = l2c210_flush_range,
665 .flush_all = l2c210_flush_all,
666 .disable = l2c_disable,
667 .sync = l2c210_sync,
668 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000669 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000670 },
671};
672
Russell King96054b02014-03-15 16:47:52 +0000673static void __init __l2c_init(const struct l2c_init_data *data,
674 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100675{
Russell King75461f52014-03-15 16:48:07 +0000676 struct outer_cache_fns fns;
Russell King3e175ca2011-09-18 11:27:30 +0100677 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100678 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100679 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100680 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Catalin Marinas382266a2007-02-05 14:48:19 +0100681
Russell Kingc40e7eb2014-03-15 16:48:04 +0000682 /*
683 * It is strange to save the register state before initialisation,
684 * but hey, this is what the DT implementations decided to do.
685 */
686 if (data->save)
687 data->save(l2x0_base);
688
Catalin Marinas6775a552010-07-28 22:01:25 +0100689 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100690
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100691 aux &= aux_mask;
692 aux |= aux_val;
693
Jason McMullan64039be2010-05-05 18:59:37 +0100694 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100695 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100696 case L2X0_CACHE_ID_PART_L310:
697 if (aux & (1 << 16))
698 ways = 16;
699 else
700 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100701 break;
Russell King75461f52014-03-15 16:48:07 +0000702
Jason McMullan64039be2010-05-05 18:59:37 +0100703 case L2X0_CACHE_ID_PART_L210:
704 ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100705 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100706
707 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100708 ways = (aux >> 13) & 0xf;
709 ways = 2 << ((ways + 1) >> 2);
710 way_size_shift = AURORA_WAY_SIZE_SHIFT;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100711 break;
Russell King75461f52014-03-15 16:48:07 +0000712
Jason McMullan64039be2010-05-05 18:59:37 +0100713 default:
714 /* Assume unknown chips have 8 ways */
715 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100716 break;
717 }
718
719 l2x0_way_mask = (1 << ways) - 1;
720
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100721 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530722 * L2 cache Size = Way size * Number of ways
723 */
724 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100725 way_size = 1 << (way_size + way_size_shift);
726
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530727 l2x0_size = ways * way_size * SZ_1K;
728
Russell King75461f52014-03-15 16:48:07 +0000729 fns = data->outer_cache;
730 if (data->fixup)
731 data->fixup(l2x0_base, cache_id, &fns);
732
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530733 /*
Russell King3b8bad52014-03-15 16:47:57 +0000734 * Check if l2x0 controller is already enabled. If we are booting
735 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100736 */
Russell King3b8bad52014-03-15 16:47:57 +0000737 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
738 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100739
Yilu Mao9d4876f2012-09-03 09:14:56 +0100740 /* Re-read it in case some bits are reserved. */
741 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
742
743 /* Save the value for resuming. */
744 l2x0_saved_regs.aux_ctrl = aux;
745
Russell King75461f52014-03-15 16:48:07 +0000746 outer_cache = fns;
Catalin Marinas382266a2007-02-05 14:48:19 +0100747
Russell Kingcdef8682014-03-15 16:48:08 +0000748 pr_info("%s cache controller enabled, %d ways, %d kB\n",
Russell King051334b2014-03-15 23:04:10 +0000749 data->type, ways, l2x0_size >> 10);
Russell Kingcdef8682014-03-15 16:48:08 +0000750 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
Russell King051334b2014-03-15 23:04:10 +0000751 data->type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100752}
Rob Herring8c369262011-08-03 18:12:05 +0100753
Russell King96054b02014-03-15 16:47:52 +0000754void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
755{
Russell King75461f52014-03-15 16:48:07 +0000756 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000757 u32 cache_id;
758
759 l2x0_base = base;
760
761 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
762
Russell King75461f52014-03-15 16:48:07 +0000763 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
764 default:
Russell King6a28cf52014-03-15 18:55:53 +0000765 case L2X0_CACHE_ID_PART_L210:
766 data = &l2c210_data;
767 break;
768
Russell King733c6bb2014-03-15 21:29:28 +0000769 case L2X0_CACHE_ID_PART_L220:
770 data = &l2c220_data;
771 break;
772
Russell King75461f52014-03-15 16:48:07 +0000773 case L2X0_CACHE_ID_PART_L310:
774 data = &l2c310_init_fns;
775 break;
776 }
777
778 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000779}
780
Rob Herring8c369262011-08-03 18:12:05 +0100781#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100782static int l2_wt_override;
783
Russell King96054b02014-03-15 16:47:52 +0000784/* Aurora don't have the cache ID register available, so we have to
785 * pass it though the device tree */
786static u32 cache_id_part_number_from_dt;
787
Russell Kingda3627f2014-03-15 16:48:06 +0000788static void __init l2x0_of_parse(const struct device_node *np,
789 u32 *aux_val, u32 *aux_mask)
790{
791 u32 data[2] = { 0, 0 };
792 u32 tag = 0;
793 u32 dirty = 0;
794 u32 val = 0, mask = 0;
795
796 of_property_read_u32(np, "arm,tag-latency", &tag);
797 if (tag) {
798 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
799 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
800 }
801
802 of_property_read_u32_array(np, "arm,data-latency",
803 data, ARRAY_SIZE(data));
804 if (data[0] && data[1]) {
805 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
806 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
807 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
808 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
809 }
810
811 of_property_read_u32(np, "arm,dirty-latency", &dirty);
812 if (dirty) {
813 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
814 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
815 }
816
817 *aux_val &= ~mask;
818 *aux_val |= val;
819 *aux_mask &= ~mask;
820}
821
Russell King6a28cf52014-03-15 18:55:53 +0000822static const struct l2c_init_data of_l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000823 .type = "L2C-210",
Russell King6a28cf52014-03-15 18:55:53 +0000824 .num_lock = 1,
825 .of_parse = l2x0_of_parse,
826 .enable = l2c_enable,
827 .outer_cache = {
828 .inv_range = l2c210_inv_range,
829 .clean_range = l2c210_clean_range,
830 .flush_range = l2c210_flush_range,
831 .flush_all = l2c210_flush_all,
832 .disable = l2c_disable,
833 .sync = l2c210_sync,
834 .resume = l2c210_resume,
835 },
836};
837
Russell King733c6bb2014-03-15 21:29:28 +0000838static const struct l2c_init_data of_l2c220_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000839 .type = "L2C-220",
Russell King733c6bb2014-03-15 21:29:28 +0000840 .num_lock = 1,
Russell Kingda3627f2014-03-15 16:48:06 +0000841 .of_parse = l2x0_of_parse,
Russell King733c6bb2014-03-15 21:29:28 +0000842 .enable = l2c_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000843 .outer_cache = {
Russell King733c6bb2014-03-15 21:29:28 +0000844 .inv_range = l2c220_inv_range,
845 .clean_range = l2c220_clean_range,
846 .flush_range = l2c220_flush_range,
847 .flush_all = l2c220_flush_all,
848 .disable = l2c_disable,
849 .sync = l2c220_sync,
850 .resume = l2c210_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000851 },
852};
853
Russell Kingf7773322014-03-15 20:51:47 +0000854static void __init l2c310_of_parse(const struct device_node *np,
855 u32 *aux_val, u32 *aux_mask)
Russell Kingda3627f2014-03-15 16:48:06 +0000856{
857 u32 data[3] = { 0, 0, 0 };
858 u32 tag[3] = { 0, 0, 0 };
859 u32 filter[2] = { 0, 0 };
860
861 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
862 if (tag[0] && tag[1] && tag[2])
863 writel_relaxed(
864 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
865 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
866 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
867 l2x0_base + L2X0_TAG_LATENCY_CTRL);
868
869 of_property_read_u32_array(np, "arm,data-latency",
870 data, ARRAY_SIZE(data));
871 if (data[0] && data[1] && data[2])
872 writel_relaxed(
873 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
874 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
875 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
876 l2x0_base + L2X0_DATA_LATENCY_CTRL);
877
878 of_property_read_u32_array(np, "arm,filter-ranges",
879 filter, ARRAY_SIZE(filter));
880 if (filter[1]) {
881 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
882 l2x0_base + L2X0_ADDR_FILTER_END);
883 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
884 l2x0_base + L2X0_ADDR_FILTER_START);
885 }
886}
887
Russell Kingf7773322014-03-15 20:51:47 +0000888static const struct l2c_init_data of_l2c310_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000889 .type = "L2C-310",
Russell King3b8bad52014-03-15 16:47:57 +0000890 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +0000891 .of_parse = l2c310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000892 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000893 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000894 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000895 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000896 .inv_range = l2c210_inv_range,
897 .clean_range = l2c210_clean_range,
898 .flush_range = l2c210_flush_range,
899 .flush_all = l2c210_flush_all,
900 .disable = l2c_disable,
901 .sync = l2c210_sync,
902 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000903 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000904 },
905};
906
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100907/*
908 * Note that the end addresses passed to Linux primitives are
909 * noninclusive, while the hardware cache range operations use
910 * inclusive start and end addresses.
911 */
912static unsigned long calc_range_end(unsigned long start, unsigned long end)
913{
914 /*
915 * Limit the number of cache lines processed at once,
916 * since cache range operations stall the CPU pipeline
917 * until completion.
918 */
919 if (end > start + MAX_RANGE_SIZE)
920 end = start + MAX_RANGE_SIZE;
921
922 /*
923 * Cache range operations can't straddle a page boundary.
924 */
925 if (end > PAGE_ALIGN(start+1))
926 end = PAGE_ALIGN(start+1);
927
928 return end;
929}
930
931/*
932 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
933 * and range operations only do a TLB lookup on the start address.
934 */
935static void aurora_pa_range(unsigned long start, unsigned long end,
936 unsigned long offset)
937{
938 unsigned long flags;
939
940 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100941 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
942 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100943 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
944
945 cache_sync();
946}
947
948static void aurora_inv_range(unsigned long start, unsigned long end)
949{
950 /*
951 * round start and end adresses up to cache line size
952 */
953 start &= ~(CACHE_LINE_SIZE - 1);
954 end = ALIGN(end, CACHE_LINE_SIZE);
955
956 /*
957 * Invalidate all full cache lines between 'start' and 'end'.
958 */
959 while (start < end) {
960 unsigned long range_end = calc_range_end(start, end);
961 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
962 AURORA_INVAL_RANGE_REG);
963 start = range_end;
964 }
965}
966
967static void aurora_clean_range(unsigned long start, unsigned long end)
968{
969 /*
970 * If L2 is forced to WT, the L2 will always be clean and we
971 * don't need to do anything here.
972 */
973 if (!l2_wt_override) {
974 start &= ~(CACHE_LINE_SIZE - 1);
975 end = ALIGN(end, CACHE_LINE_SIZE);
976 while (start != end) {
977 unsigned long range_end = calc_range_end(start, end);
978 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
979 AURORA_CLEAN_RANGE_REG);
980 start = range_end;
981 }
982 }
983}
984
985static void aurora_flush_range(unsigned long start, unsigned long end)
986{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100987 start &= ~(CACHE_LINE_SIZE - 1);
988 end = ALIGN(end, CACHE_LINE_SIZE);
989 while (start != end) {
990 unsigned long range_end = calc_range_end(start, end);
991 /*
992 * If L2 is forced to WT, the L2 will always be clean and we
993 * just need to invalidate.
994 */
995 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100996 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100997 AURORA_INVAL_RANGE_REG);
998 else
999 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1000 AURORA_FLUSH_RANGE_REG);
1001 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001002 }
1003}
1004
Russell Kingda3627f2014-03-15 16:48:06 +00001005static void aurora_save(void __iomem *base)
1006{
1007 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1008 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1009}
1010
1011static void aurora_resume(void)
1012{
Russell King09a5d182014-03-15 16:48:13 +00001013 void __iomem *base = l2x0_base;
1014
1015 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1016 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1017 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +00001018 }
1019}
1020
Russell King40266d62014-03-15 16:47:59 +00001021/*
1022 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1023 * broadcasting of cache commands to L2.
1024 */
1025static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1026 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001027{
Russell King40266d62014-03-15 16:47:59 +00001028 u32 u;
1029
1030 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001031 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001032 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1033
Russell Kingda3627f2014-03-15 16:48:06 +00001034 isb();
Russell King40266d62014-03-15 16:47:59 +00001035
1036 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001037}
1038
Russell King75461f52014-03-15 16:48:07 +00001039static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1040 struct outer_cache_fns *fns)
1041{
1042 sync_reg_offset = AURORA_SYNC_REG;
1043}
1044
Russell Kingda3627f2014-03-15 16:48:06 +00001045static void __init aurora_of_parse(const struct device_node *np,
1046 u32 *aux_val, u32 *aux_mask)
1047{
1048 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1049 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1050
1051 of_property_read_u32(np, "cache-id-part",
1052 &cache_id_part_number_from_dt);
1053
1054 /* Determine and save the write policy */
1055 l2_wt_override = of_property_read_bool(np, "wt-override");
1056
1057 if (l2_wt_override) {
1058 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1059 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1060 }
1061
1062 *aux_val &= ~mask;
1063 *aux_val |= val;
1064 *aux_mask &= ~mask;
1065}
1066
1067static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001068 .type = "Aurora",
Russell King3b8bad52014-03-15 16:47:57 +00001069 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001070 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001071 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001072 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001073 .save = aurora_save,
1074 .outer_cache = {
1075 .inv_range = aurora_inv_range,
1076 .clean_range = aurora_clean_range,
1077 .flush_range = aurora_flush_range,
1078 .flush_all = l2x0_flush_all,
1079 .disable = l2x0_disable,
1080 .sync = l2x0_cache_sync,
1081 .resume = aurora_resume,
1082 },
1083};
1084
1085static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001086 .type = "Aurora",
Russell King3b8bad52014-03-15 16:47:57 +00001087 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001088 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001089 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001090 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001091 .save = aurora_save,
1092 .outer_cache = {
1093 .resume = aurora_resume,
1094 },
1095};
1096
Christian Daudt3b656fe2013-05-09 22:21:01 +01001097/*
1098 * For certain Broadcom SoCs, depending on the address range, different offsets
1099 * need to be added to the address before passing it to L2 for
1100 * invalidation/clean/flush
1101 *
1102 * Section Address Range Offset EMI
1103 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1104 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1105 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1106 *
1107 * When the start and end addresses have crossed two different sections, we
1108 * need to break the L2 operation into two, each within its own section.
1109 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1110 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1111 * 0xC0000000 - 0xC0001000
1112 *
1113 * Note 1:
1114 * By breaking a single L2 operation into two, we may potentially suffer some
1115 * performance hit, but keep in mind the cross section case is very rare
1116 *
1117 * Note 2:
1118 * We do not need to handle the case when the start address is in
1119 * Section 1 and the end address is in Section 3, since it is not a valid use
1120 * case
1121 *
1122 * Note 3:
1123 * Section 1 in practical terms can no longer be used on rev A2. Because of
1124 * that the code does not need to handle section 1 at all.
1125 *
1126 */
1127#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1128#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1129
1130#define BCM_SYS_EMI_OFFSET 0x40000000UL
1131#define BCM_VC_EMI_OFFSET 0x80000000UL
1132
1133static inline int bcm_addr_is_sys_emi(unsigned long addr)
1134{
1135 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1136 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1137}
1138
1139static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1140{
1141 if (bcm_addr_is_sys_emi(addr))
1142 return addr + BCM_SYS_EMI_OFFSET;
1143 else
1144 return addr + BCM_VC_EMI_OFFSET;
1145}
1146
1147static void bcm_inv_range(unsigned long start, unsigned long end)
1148{
1149 unsigned long new_start, new_end;
1150
1151 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1152
1153 if (unlikely(end <= start))
1154 return;
1155
1156 new_start = bcm_l2_phys_addr(start);
1157 new_end = bcm_l2_phys_addr(end);
1158
1159 /* normal case, no cross section between start and end */
1160 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001161 l2c210_inv_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001162 return;
1163 }
1164
1165 /* They cross sections, so it can only be a cross from section
1166 * 2 to section 3
1167 */
Russell King90811142014-03-19 19:14:13 +00001168 l2c210_inv_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001169 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001170 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001171 new_end);
1172}
1173
1174static void bcm_clean_range(unsigned long start, unsigned long end)
1175{
1176 unsigned long new_start, new_end;
1177
1178 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1179
1180 if (unlikely(end <= start))
1181 return;
1182
Christian Daudt3b656fe2013-05-09 22:21:01 +01001183 new_start = bcm_l2_phys_addr(start);
1184 new_end = bcm_l2_phys_addr(end);
1185
1186 /* normal case, no cross section between start and end */
1187 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001188 l2c210_clean_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001189 return;
1190 }
1191
1192 /* They cross sections, so it can only be a cross from section
1193 * 2 to section 3
1194 */
Russell King90811142014-03-19 19:14:13 +00001195 l2c210_clean_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001196 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001197 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001198 new_end);
1199}
1200
1201static void bcm_flush_range(unsigned long start, unsigned long end)
1202{
1203 unsigned long new_start, new_end;
1204
1205 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1206
1207 if (unlikely(end <= start))
1208 return;
1209
1210 if ((end - start) >= l2x0_size) {
Russell King90811142014-03-19 19:14:13 +00001211 outer_cache.flush_all();
Christian Daudt3b656fe2013-05-09 22:21:01 +01001212 return;
1213 }
1214
1215 new_start = bcm_l2_phys_addr(start);
1216 new_end = bcm_l2_phys_addr(end);
1217
1218 /* normal case, no cross section between start and end */
1219 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001220 l2c210_flush_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001221 return;
1222 }
1223
1224 /* They cross sections, so it can only be a cross from section
1225 * 2 to section 3
1226 */
Russell King90811142014-03-19 19:14:13 +00001227 l2c210_flush_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001228 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001229 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001230 new_end);
1231}
1232
Russell King90811142014-03-19 19:14:13 +00001233/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
Russell Kingda3627f2014-03-15 16:48:06 +00001234static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001235 .type = "BCM-L2C-310",
Russell King3b8bad52014-03-15 16:47:57 +00001236 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +00001237 .of_parse = l2c310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001238 .enable = l2c_enable,
Russell King09a5d182014-03-15 16:48:13 +00001239 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001240 .outer_cache = {
1241 .inv_range = bcm_inv_range,
1242 .clean_range = bcm_clean_range,
1243 .flush_range = bcm_flush_range,
Russell Kingf7773322014-03-15 20:51:47 +00001244 .flush_all = l2c210_flush_all,
1245 .disable = l2c_disable,
1246 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +00001247 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001248 },
1249};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001250
Russell King9846dfc2014-03-15 16:47:55 +00001251static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001252{
1253 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001254 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001255 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001256 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001257}
1258
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001259static void tauros3_resume(void)
1260{
Russell King09a5d182014-03-15 16:48:13 +00001261 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001262
Russell King09a5d182014-03-15 16:48:13 +00001263 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1264 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1265 base + TAUROS3_AUX2_CTRL);
1266 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1267 base + L2X0_PREFETCH_CTRL);
1268
1269 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1270 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001271}
1272
Russell Kingc02642b2014-03-15 16:47:54 +00001273static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001274 .type = "Tauros3",
Russell King3b8bad52014-03-15 16:47:57 +00001275 .num_lock = 8,
1276 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001277 .save = tauros3_save,
1278 /* Tauros3 broadcasts L1 cache operations to L2 */
1279 .outer_cache = {
1280 .resume = tauros3_resume,
1281 },
1282};
1283
Russell Kinga65bb922014-03-15 16:48:01 +00001284#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001285static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001286 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell King733c6bb2014-03-15 21:29:28 +00001287 L2C_ID("arm,l220-cache", of_l2c220_data),
Russell Kingf7773322014-03-15 20:51:47 +00001288 L2C_ID("arm,pl310-cache", of_l2c310_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001289 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1290 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1291 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1292 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001293 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001294 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001295 {}
1296};
1297
Russell King3e175ca2011-09-18 11:27:30 +01001298int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001299{
Russell Kingc02642b2014-03-15 16:47:54 +00001300 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001301 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001302 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001303 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001304
1305 np = of_find_matching_node(NULL, l2x0_ids);
1306 if (!np)
1307 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001308
1309 if (of_address_to_resource(np, 0, &res))
1310 return -ENODEV;
1311
1312 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001313 if (!l2x0_base)
1314 return -ENOMEM;
1315
Barry Song91c2ebb2011-09-30 14:43:12 +01001316 l2x0_saved_regs.phy_base = res.start;
1317
1318 data = of_match_node(l2x0_ids, np)->data;
1319
Rob Herring8c369262011-08-03 18:12:05 +01001320 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001321 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001322 if (data->of_parse)
1323 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001324
Russell King96054b02014-03-15 16:47:52 +00001325 if (cache_id_part_number_from_dt)
1326 cache_id = cache_id_part_number_from_dt;
1327 else
1328 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1329
1330 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001331
Rob Herring8c369262011-08-03 18:12:05 +01001332 return 0;
1333}
1334#endif