blob: 7e53214f7c362e247dbe74c7e14c8ddcfcdd5b30 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Russell King4374d642014-03-19 15:39:09 +000027#include <asm/cputype.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010028#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010029#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010030#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010031
Russell Kingc02642b2014-03-15 16:47:54 +000032struct l2c_init_data {
Russell King051334b2014-03-15 23:04:10 +000033 const char *type;
Russell King0493aef2014-03-15 23:26:24 +000034 unsigned way_size_0;
Russell King3b8bad52014-03-15 16:47:57 +000035 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000036 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000037 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000038 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000039 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000040 struct outer_cache_fns outer_cache;
41};
42
Catalin Marinas382266a2007-02-05 14:48:19 +010043#define CACHE_LINE_SIZE 32
44
45static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050046static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010047static u32 l2x0_way_mask; /* Bitmask of active ways */
48static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010049static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010050
Barry Song91c2ebb2011-09-30 14:43:12 +010051struct l2x0_regs l2x0_saved_regs;
52
Russell King37abcdb2014-03-15 16:47:50 +000053/*
54 * Common code for all cache controllers.
55 */
Russell King83841fe2014-03-15 16:48:14 +000056static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010057{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010058 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010059 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010060 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010061}
62
Russell King2b2a87a2014-03-16 17:19:21 +000063/*
Russell King8abd2592014-03-16 17:38:08 +000064 * By default, we write directly to secure registers. Platforms must
65 * override this if they are running non-secure.
66 */
67static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
68{
69 if (val == readl_relaxed(base + reg))
70 return;
71 if (outer_cache.write_sec)
72 outer_cache.write_sec(val, reg);
73 else
74 writel_relaxed(val, base + reg);
75}
76
77/*
Russell King2b2a87a2014-03-16 17:19:21 +000078 * This should only be called when we have a requirement that the
79 * register be written due to a work-around, as platforms running
80 * in non-secure mode may not be able to access this register.
81 */
82static inline void l2c_set_debug(void __iomem *base, unsigned long val)
83{
Russell King8abd2592014-03-16 17:38:08 +000084 if (outer_cache.set_debug)
85 outer_cache.set_debug(val);
86 else
87 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
Russell King2b2a87a2014-03-16 17:19:21 +000088}
89
Russell Kingdf5dd4c2014-03-15 16:47:56 +000090static void __l2c_op_way(void __iomem *reg)
91{
92 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000093 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000094}
95
Russell King37abcdb2014-03-15 16:47:50 +000096static inline void l2c_unlock(void __iomem *base, unsigned num)
97{
98 unsigned i;
99
100 for (i = 0; i < num; i++) {
101 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
102 i * L2X0_LOCKDOWN_STRIDE);
103 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
104 i * L2X0_LOCKDOWN_STRIDE);
105 }
106}
107
Russell King3b8bad52014-03-15 16:47:57 +0000108/*
109 * Enable the L2 cache controller. This function must only be
110 * called when the cache controller is known to be disabled.
111 */
112static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
113{
114 unsigned long flags;
115
Russell King8abd2592014-03-16 17:38:08 +0000116 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000117
Russell King17f3f992014-03-17 17:15:02 +0000118 l2c_unlock(base, num_lock);
119
Russell King3b8bad52014-03-15 16:47:57 +0000120 local_irq_save(flags);
121 __l2c_op_way(base + L2X0_INV_WAY);
122 writel_relaxed(0, base + sync_reg_offset);
123 l2c_wait_mask(base + sync_reg_offset, 1);
124 local_irq_restore(flags);
125
Russell King8abd2592014-03-16 17:38:08 +0000126 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000127}
128
129static void l2c_disable(void)
130{
131 void __iomem *base = l2x0_base;
132
133 outer_cache.flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000134 l2c_write_sec(0, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000135 dsb(st);
136}
137
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100138#ifdef CONFIG_CACHE_PL310
139static inline void cache_wait(void __iomem *reg, unsigned long mask)
140{
141 /* cache operations by line are atomic on PL310 */
142}
143#else
Russell King83841fe2014-03-15 16:48:14 +0000144#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100145#endif
146
Catalin Marinas382266a2007-02-05 14:48:19 +0100147static inline void cache_sync(void)
148{
Russell King3d107432009-11-19 11:41:09 +0000149 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100150
Will Deaconf154fe92012-04-20 17:21:08 +0100151 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000152 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100153}
154
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100155#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100156static inline void debug_writel(unsigned long val)
157{
Russell King8abd2592014-03-16 17:38:08 +0000158 if (outer_cache.set_debug || outer_cache.write_sec)
Russell King2b2a87a2014-03-16 17:19:21 +0000159 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100160}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100161#else
162/* Optimised out for non-errata case */
163static inline void debug_writel(unsigned long val)
164{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100165}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100166#endif
167
Catalin Marinas23107c52010-03-24 16:48:53 +0100168static void l2x0_cache_sync(void)
169{
170 unsigned long flags;
171
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500172 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100173 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500174 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100175}
176
Will Deacon38a89142011-07-01 14:36:19 +0100177static void __l2x0_flush_all(void)
178{
179 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100181 cache_sync();
182 debug_writel(0x00);
183}
184
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530185static void l2x0_flush_all(void)
186{
187 unsigned long flags;
188
189 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500190 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100191 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500192 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530193}
194
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530195static void l2x0_disable(void)
196{
197 unsigned long flags;
198
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500199 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100200 __l2x0_flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000201 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100202 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500203 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530204}
205
Russell Kingddf7d792014-03-28 14:18:35 +0000206static void l2c_save(void __iomem *base)
207{
208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
209}
210
Russell King75461f52014-03-15 16:48:07 +0000211/*
Russell King6a28cf52014-03-15 18:55:53 +0000212 * L2C-210 specific code.
213 *
214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
215 * ensure that no background operation is running. The way operations
216 * are all background tasks.
217 *
218 * While a background operation is in progress, any new operation is
219 * ignored (unspecified whether this causes an error.) Thankfully, not
220 * used on SMP.
221 *
222 * Never has a different sync register other than L2X0_CACHE_SYNC, but
223 * we use sync_reg_offset here so we can share some of this with L2C-310.
224 */
225static void __l2c210_cache_sync(void __iomem *base)
226{
227 writel_relaxed(0, base + sync_reg_offset);
228}
229
230static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
231 unsigned long end)
232{
233 while (start < end) {
234 writel_relaxed(start, reg);
235 start += CACHE_LINE_SIZE;
236 }
237}
238
239static void l2c210_inv_range(unsigned long start, unsigned long end)
240{
241 void __iomem *base = l2x0_base;
242
243 if (start & (CACHE_LINE_SIZE - 1)) {
244 start &= ~(CACHE_LINE_SIZE - 1);
245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
246 start += CACHE_LINE_SIZE;
247 }
248
249 if (end & (CACHE_LINE_SIZE - 1)) {
250 end &= ~(CACHE_LINE_SIZE - 1);
251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
252 }
253
254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
255 __l2c210_cache_sync(base);
256}
257
258static void l2c210_clean_range(unsigned long start, unsigned long end)
259{
260 void __iomem *base = l2x0_base;
261
262 start &= ~(CACHE_LINE_SIZE - 1);
263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
264 __l2c210_cache_sync(base);
265}
266
267static void l2c210_flush_range(unsigned long start, unsigned long end)
268{
269 void __iomem *base = l2x0_base;
270
271 start &= ~(CACHE_LINE_SIZE - 1);
272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
273 __l2c210_cache_sync(base);
274}
275
276static void l2c210_flush_all(void)
277{
278 void __iomem *base = l2x0_base;
279
280 BUG_ON(!irqs_disabled());
281
282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
283 __l2c210_cache_sync(base);
284}
285
286static void l2c210_sync(void)
287{
288 __l2c210_cache_sync(l2x0_base);
289}
290
291static void l2c210_resume(void)
292{
293 void __iomem *base = l2x0_base;
294
295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
297}
298
299static const struct l2c_init_data l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000300 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000301 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000302 .num_lock = 1,
303 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000304 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000305 .outer_cache = {
306 .inv_range = l2c210_inv_range,
307 .clean_range = l2c210_clean_range,
308 .flush_range = l2c210_flush_range,
309 .flush_all = l2c210_flush_all,
310 .disable = l2c_disable,
311 .sync = l2c210_sync,
312 .resume = l2c210_resume,
313 },
314};
315
316/*
Russell King733c6bb2014-03-15 21:29:28 +0000317 * L2C-220 specific code.
318 *
319 * All operations are background operations: they have to be waited for.
320 * Conflicting requests generate a slave error (which will cause an
321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
322 * sync register here.
323 *
324 * However, we can re-use the l2c210_resume call.
325 */
326static inline void __l2c220_cache_sync(void __iomem *base)
327{
328 writel_relaxed(0, base + L2X0_CACHE_SYNC);
329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
330}
331
332static void l2c220_op_way(void __iomem *base, unsigned reg)
333{
334 unsigned long flags;
335
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
337 __l2c_op_way(base + reg);
338 __l2c220_cache_sync(base);
339 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
340}
341
342static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
343 unsigned long end, unsigned long flags)
344{
345 raw_spinlock_t *lock = &l2x0_lock;
346
347 while (start < end) {
348 unsigned long blk_end = start + min(end - start, 4096UL);
349
350 while (start < blk_end) {
351 l2c_wait_mask(reg, 1);
352 writel_relaxed(start, reg);
353 start += CACHE_LINE_SIZE;
354 }
355
356 if (blk_end < end) {
357 raw_spin_unlock_irqrestore(lock, flags);
358 raw_spin_lock_irqsave(lock, flags);
359 }
360 }
361
362 return flags;
363}
364
365static void l2c220_inv_range(unsigned long start, unsigned long end)
366{
367 void __iomem *base = l2x0_base;
368 unsigned long flags;
369
370 raw_spin_lock_irqsave(&l2x0_lock, flags);
371 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
372 if (start & (CACHE_LINE_SIZE - 1)) {
373 start &= ~(CACHE_LINE_SIZE - 1);
374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
375 start += CACHE_LINE_SIZE;
376 }
377
378 if (end & (CACHE_LINE_SIZE - 1)) {
379 end &= ~(CACHE_LINE_SIZE - 1);
380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
382 }
383 }
384
385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
386 start, end, flags);
387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
388 __l2c220_cache_sync(base);
389 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
390}
391
392static void l2c220_clean_range(unsigned long start, unsigned long end)
393{
394 void __iomem *base = l2x0_base;
395 unsigned long flags;
396
397 start &= ~(CACHE_LINE_SIZE - 1);
398 if ((end - start) >= l2x0_size) {
399 l2c220_op_way(base, L2X0_CLEAN_WAY);
400 return;
401 }
402
403 raw_spin_lock_irqsave(&l2x0_lock, flags);
404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
405 start, end, flags);
406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
407 __l2c220_cache_sync(base);
408 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
409}
410
411static void l2c220_flush_range(unsigned long start, unsigned long end)
412{
413 void __iomem *base = l2x0_base;
414 unsigned long flags;
415
416 start &= ~(CACHE_LINE_SIZE - 1);
417 if ((end - start) >= l2x0_size) {
418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
419 return;
420 }
421
422 raw_spin_lock_irqsave(&l2x0_lock, flags);
423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
424 start, end, flags);
425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
426 __l2c220_cache_sync(base);
427 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
428}
429
430static void l2c220_flush_all(void)
431{
432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
433}
434
435static void l2c220_sync(void)
436{
437 unsigned long flags;
438
439 raw_spin_lock_irqsave(&l2x0_lock, flags);
440 __l2c220_cache_sync(l2x0_base);
441 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
442}
443
444static const struct l2c_init_data l2c220_data = {
Russell King051334b2014-03-15 23:04:10 +0000445 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000446 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000447 .num_lock = 1,
448 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000449 .save = l2c_save,
Russell King733c6bb2014-03-15 21:29:28 +0000450 .outer_cache = {
451 .inv_range = l2c220_inv_range,
452 .clean_range = l2c220_clean_range,
453 .flush_range = l2c220_flush_range,
454 .flush_all = l2c220_flush_all,
455 .disable = l2c_disable,
456 .sync = l2c220_sync,
457 .resume = l2c210_resume,
458 },
459};
460
461/*
Russell King75461f52014-03-15 16:48:07 +0000462 * L2C-310 specific code.
463 *
Russell Kingf7773322014-03-15 20:51:47 +0000464 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
465 * and the way operations are all background tasks. However, issuing an
466 * operation while a background operation is in progress results in a
467 * SLVERR response. We can reuse:
468 *
469 * __l2c210_cache_sync (using sync_reg_offset)
470 * l2c210_sync
471 * l2c210_inv_range (if 588369 is not applicable)
472 * l2c210_clean_range
473 * l2c210_flush_range (if 588369 is not applicable)
474 * l2c210_flush_all (if 727915 is not applicable)
475 *
Russell King75461f52014-03-15 16:48:07 +0000476 * Errata:
477 * 588369: PL310 R0P0->R1P0, fixed R2P0.
478 * Affects: all clean+invalidate operations
479 * clean and invalidate skips the invalidate step, so we need to issue
480 * separate operations. We also require the above debug workaround
481 * enclosing this code fragment on affected parts. On unaffected parts,
482 * we must not use this workaround without the debug register writes
483 * to avoid exposing a problem similar to 727915.
484 *
485 * 727915: PL310 R2P0->R3P0, fixed R3P1.
486 * Affects: clean+invalidate by way
487 * clean and invalidate by way runs in the background, and a store can
488 * hit the line between the clean operation and invalidate operation,
489 * resulting in the store being lost.
490 *
Russell Kinga8875a02014-03-16 20:02:06 +0000491 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
492 * Affects: 8x64-bit (double fill) line fetches
493 * double fill line fetches can fail to cause dirty data to be evicted
494 * from the cache before the new data overwrites the second line.
495 *
Russell King75461f52014-03-15 16:48:07 +0000496 * 753970: PL310 R3P0, fixed R3P1.
497 * Affects: sync
498 * prevents merging writes after the sync operation, until another L2C
499 * operation is performed (or a number of other conditions.)
500 *
501 * 769419: PL310 R0P0->R3P1, fixed R3P2.
502 * Affects: store buffer
503 * store buffer is not automatically drained.
504 */
Russell Kingbda0b742014-03-15 16:48:16 +0000505static void l2c310_set_debug(unsigned long val)
506{
507 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
508}
509
Russell Kingebd4219f2014-03-15 19:08:11 +0000510static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
511{
512 void __iomem *base = l2x0_base;
513
514 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
515 unsigned long flags;
516
517 /* Erratum 588369 for both clean+invalidate operations */
518 raw_spin_lock_irqsave(&l2x0_lock, flags);
519 l2c_set_debug(base, 0x03);
520
521 if (start & (CACHE_LINE_SIZE - 1)) {
522 start &= ~(CACHE_LINE_SIZE - 1);
523 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
524 writel_relaxed(start, base + L2X0_INV_LINE_PA);
525 start += CACHE_LINE_SIZE;
526 }
527
528 if (end & (CACHE_LINE_SIZE - 1)) {
529 end &= ~(CACHE_LINE_SIZE - 1);
530 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
531 writel_relaxed(end, base + L2X0_INV_LINE_PA);
532 }
533
534 l2c_set_debug(base, 0x00);
535 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
536 }
537
538 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
539 __l2c210_cache_sync(base);
540}
541
542static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
543{
544 raw_spinlock_t *lock = &l2x0_lock;
545 unsigned long flags;
546 void __iomem *base = l2x0_base;
547
548 raw_spin_lock_irqsave(lock, flags);
549 while (start < end) {
550 unsigned long blk_end = start + min(end - start, 4096UL);
551
552 l2c_set_debug(base, 0x03);
553 while (start < blk_end) {
554 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
555 writel_relaxed(start, base + L2X0_INV_LINE_PA);
556 start += CACHE_LINE_SIZE;
557 }
558 l2c_set_debug(base, 0x00);
559
560 if (blk_end < end) {
561 raw_spin_unlock_irqrestore(lock, flags);
562 raw_spin_lock_irqsave(lock, flags);
563 }
564 }
565 raw_spin_unlock_irqrestore(lock, flags);
566 __l2c210_cache_sync(base);
567}
568
Russell King99ca17722014-03-15 16:48:18 +0000569static void l2c310_flush_all_erratum(void)
570{
571 void __iomem *base = l2x0_base;
572 unsigned long flags;
573
574 raw_spin_lock_irqsave(&l2x0_lock, flags);
575 l2c_set_debug(base, 0x03);
576 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
577 l2c_set_debug(base, 0x00);
578 __l2c210_cache_sync(base);
579 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
580}
581
Russell King09a5d182014-03-15 16:48:13 +0000582static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000583{
Russell King09a5d182014-03-15 16:48:13 +0000584 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000585
Russell Kingddf7d792014-03-28 14:18:35 +0000586 l2c_save(base);
587
Russell Kingb98556f22014-03-15 16:48:11 +0000588 l2x0_saved_regs.tag_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000589 L310_TAG_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000590 l2x0_saved_regs.data_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000591 L310_DATA_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000592 l2x0_saved_regs.filter_end = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000593 L310_ADDR_FILTER_END);
Russell Kingb98556f22014-03-15 16:48:11 +0000594 l2x0_saved_regs.filter_start = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000595 L310_ADDR_FILTER_START);
Russell Kingb98556f22014-03-15 16:48:11 +0000596
Russell King09a5d182014-03-15 16:48:13 +0000597 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000598 L2X0_CACHE_ID_RTL_MASK;
599
Russell King09a5d182014-03-15 16:48:13 +0000600 /* From r2p0, there is Prefetch offset/control register */
601 if (revision >= L310_CACHE_ID_RTL_R2P0)
602 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000603 L310_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000604
Russell King09a5d182014-03-15 16:48:13 +0000605 /* From r3p0, there is Power control register */
606 if (revision >= L310_CACHE_ID_RTL_R3P0)
607 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000608 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000609}
610
611static void l2c310_resume(void)
612{
613 void __iomem *base = l2x0_base;
614
615 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
616 unsigned revision;
617
618 /* restore pl310 setup */
619 writel_relaxed(l2x0_saved_regs.tag_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000620 base + L310_TAG_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000621 writel_relaxed(l2x0_saved_regs.data_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000622 base + L310_DATA_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000623 writel_relaxed(l2x0_saved_regs.filter_end,
Russell King1a5a9542014-03-16 20:52:25 +0000624 base + L310_ADDR_FILTER_END);
Russell King09a5d182014-03-15 16:48:13 +0000625 writel_relaxed(l2x0_saved_regs.filter_start,
Russell King1a5a9542014-03-16 20:52:25 +0000626 base + L310_ADDR_FILTER_START);
Russell King09a5d182014-03-15 16:48:13 +0000627
628 revision = readl_relaxed(base + L2X0_CACHE_ID) &
629 L2X0_CACHE_ID_RTL_MASK;
630
631 if (revision >= L310_CACHE_ID_RTL_R2P0)
Russell King8abd2592014-03-16 17:38:08 +0000632 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000633 L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000634 if (revision >= L310_CACHE_ID_RTL_R3P0)
Russell King8abd2592014-03-16 17:38:08 +0000635 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000636 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000637
638 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
639 }
Russell Kingb98556f22014-03-15 16:48:11 +0000640}
641
Russell King4374d642014-03-19 15:39:09 +0000642static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
643{
644 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
645 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
646
647 if (rev >= L310_CACHE_ID_RTL_R2P0) {
648 if (cortex_a9) {
649 aux |= L310_AUX_CTRL_EARLY_BRESP;
650 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
651 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
652 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
653 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
654 }
655 }
656
657 l2c_enable(base, aux, num_lock);
658}
659
Russell King75461f52014-03-15 16:48:07 +0000660static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
661 struct outer_cache_fns *fns)
662{
663 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
Russell Kinga8875a02014-03-16 20:02:06 +0000664 const char *errata[8];
Russell King75461f52014-03-15 16:48:07 +0000665 unsigned n = 0;
666
Russell Kingebd4219f2014-03-15 19:08:11 +0000667 /* For compatibility */
Russell King75461f52014-03-15 16:48:07 +0000668 if (revision <= L310_CACHE_ID_RTL_R3P0)
Russell Kingbda0b742014-03-15 16:48:16 +0000669 fns->set_debug = l2c310_set_debug;
Russell King75461f52014-03-15 16:48:07 +0000670
Russell Kingebd4219f2014-03-15 19:08:11 +0000671 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
672 revision < L310_CACHE_ID_RTL_R2P0 &&
673 /* For bcm compatibility */
Russell Kingf7773322014-03-15 20:51:47 +0000674 fns->inv_range == l2c210_inv_range) {
Russell Kingebd4219f2014-03-15 19:08:11 +0000675 fns->inv_range = l2c310_inv_range_erratum;
676 fns->flush_range = l2c310_flush_range_erratum;
677 errata[n++] = "588369";
678 }
679
Russell King99ca17722014-03-15 16:48:18 +0000680 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
681 revision >= L310_CACHE_ID_RTL_R2P0 &&
682 revision < L310_CACHE_ID_RTL_R3P1) {
683 fns->flush_all = l2c310_flush_all_erratum;
684 errata[n++] = "727915";
685 }
686
Russell Kinga8875a02014-03-16 20:02:06 +0000687 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
688 revision < L310_CACHE_ID_RTL_R3P2) {
Russell King1a5a9542014-03-16 20:52:25 +0000689 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000690 /* I don't think bit23 is required here... but iMX6 does so */
691 if (val & (BIT(30) | BIT(23))) {
692 val &= ~(BIT(30) | BIT(23));
Russell King1a5a9542014-03-16 20:52:25 +0000693 l2c_write_sec(val, base, L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000694 errata[n++] = "752271";
695 }
696 }
697
Russell King75461f52014-03-15 16:48:07 +0000698 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
699 revision == L310_CACHE_ID_RTL_R3P0) {
700 sync_reg_offset = L2X0_DUMMY_REG;
701 errata[n++] = "753970";
702 }
703
704 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
705 errata[n++] = "769419";
706
707 if (n) {
708 unsigned i;
709
710 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
711 for (i = 0; i < n; i++)
712 pr_cont(" %s", errata[i]);
713 pr_cont(" enabled\n");
714 }
715}
716
717static const struct l2c_init_data l2c310_init_fns __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000718 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000719 .way_size_0 = SZ_8K,
Russell King75461f52014-03-15 16:48:07 +0000720 .num_lock = 8,
Russell King4374d642014-03-19 15:39:09 +0000721 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000722 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000723 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000724 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000725 .inv_range = l2c210_inv_range,
726 .clean_range = l2c210_clean_range,
727 .flush_range = l2c210_flush_range,
728 .flush_all = l2c210_flush_all,
729 .disable = l2c_disable,
730 .sync = l2c210_sync,
731 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000732 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000733 },
734};
735
Russell King96054b02014-03-15 16:47:52 +0000736static void __init __l2c_init(const struct l2c_init_data *data,
737 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100738{
Russell King75461f52014-03-15 16:48:07 +0000739 struct outer_cache_fns fns;
Russell King0493aef2014-03-15 23:26:24 +0000740 unsigned way_size_bits, ways;
Russell King3e175ca2011-09-18 11:27:30 +0100741 u32 aux;
Catalin Marinas382266a2007-02-05 14:48:19 +0100742
Catalin Marinas6775a552010-07-28 22:01:25 +0100743 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100744
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100745 aux &= aux_mask;
746 aux |= aux_val;
747
Jason McMullan64039be2010-05-05 18:59:37 +0100748 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100749 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100750 case L2X0_CACHE_ID_PART_L310:
751 if (aux & (1 << 16))
752 ways = 16;
753 else
754 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100755 break;
Russell King75461f52014-03-15 16:48:07 +0000756
Jason McMullan64039be2010-05-05 18:59:37 +0100757 case L2X0_CACHE_ID_PART_L210:
Russell King5f47c382014-03-15 23:07:07 +0000758 case L2X0_CACHE_ID_PART_L220:
Jason McMullan64039be2010-05-05 18:59:37 +0100759 ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100760 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100761
762 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100763 ways = (aux >> 13) & 0xf;
764 ways = 2 << ((ways + 1) >> 2);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100765 break;
Russell King75461f52014-03-15 16:48:07 +0000766
Jason McMullan64039be2010-05-05 18:59:37 +0100767 default:
768 /* Assume unknown chips have 8 ways */
769 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100770 break;
771 }
772
773 l2x0_way_mask = (1 << ways) - 1;
774
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100775 /*
Russell King0493aef2014-03-15 23:26:24 +0000776 * way_size_0 is the size that a way_size value of zero would be
777 * given the calculation: way_size = way_size_0 << way_size_bits.
778 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
779 * then way_size_0 would be 8k.
780 *
781 * L2 cache size = number of ways * way size.
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530782 */
Russell King1a5a9542014-03-16 20:52:25 +0000783 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
784 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
Russell King0493aef2014-03-15 23:26:24 +0000785 l2x0_size = ways * (data->way_size_0 << way_size_bits);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530786
Russell King75461f52014-03-15 16:48:07 +0000787 fns = data->outer_cache;
Russell King8abd2592014-03-16 17:38:08 +0000788 fns.write_sec = outer_cache.write_sec;
Russell King75461f52014-03-15 16:48:07 +0000789 if (data->fixup)
790 data->fixup(l2x0_base, cache_id, &fns);
Russell King8abd2592014-03-16 17:38:08 +0000791 if (fns.write_sec)
792 fns.set_debug = NULL;
Russell King75461f52014-03-15 16:48:07 +0000793
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530794 /*
Russell King3b8bad52014-03-15 16:47:57 +0000795 * Check if l2x0 controller is already enabled. If we are booting
796 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100797 */
Russell King3b8bad52014-03-15 16:47:57 +0000798 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
799 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100800
Russell Kingddf7d792014-03-28 14:18:35 +0000801 outer_cache = fns;
802
803 /*
804 * It is strange to save the register state before initialisation,
805 * but hey, this is what the DT implementations decided to do.
806 */
807 if (data->save)
808 data->save(l2x0_base);
809
Yilu Mao9d4876f2012-09-03 09:14:56 +0100810 /* Re-read it in case some bits are reserved. */
811 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
812
Russell Kingcdef8682014-03-15 16:48:08 +0000813 pr_info("%s cache controller enabled, %d ways, %d kB\n",
Russell King051334b2014-03-15 23:04:10 +0000814 data->type, ways, l2x0_size >> 10);
Russell Kingcdef8682014-03-15 16:48:08 +0000815 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
Russell King051334b2014-03-15 23:04:10 +0000816 data->type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100817}
Rob Herring8c369262011-08-03 18:12:05 +0100818
Russell King96054b02014-03-15 16:47:52 +0000819void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
820{
Russell King75461f52014-03-15 16:48:07 +0000821 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000822 u32 cache_id;
823
824 l2x0_base = base;
825
826 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
827
Russell King75461f52014-03-15 16:48:07 +0000828 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
829 default:
Russell King6a28cf52014-03-15 18:55:53 +0000830 case L2X0_CACHE_ID_PART_L210:
831 data = &l2c210_data;
832 break;
833
Russell King733c6bb2014-03-15 21:29:28 +0000834 case L2X0_CACHE_ID_PART_L220:
835 data = &l2c220_data;
836 break;
837
Russell King75461f52014-03-15 16:48:07 +0000838 case L2X0_CACHE_ID_PART_L310:
839 data = &l2c310_init_fns;
840 break;
841 }
842
843 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000844}
845
Rob Herring8c369262011-08-03 18:12:05 +0100846#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100847static int l2_wt_override;
848
Russell King96054b02014-03-15 16:47:52 +0000849/* Aurora don't have the cache ID register available, so we have to
850 * pass it though the device tree */
851static u32 cache_id_part_number_from_dt;
852
Russell Kingda3627f2014-03-15 16:48:06 +0000853static void __init l2x0_of_parse(const struct device_node *np,
854 u32 *aux_val, u32 *aux_mask)
855{
856 u32 data[2] = { 0, 0 };
857 u32 tag = 0;
858 u32 dirty = 0;
859 u32 val = 0, mask = 0;
860
861 of_property_read_u32(np, "arm,tag-latency", &tag);
862 if (tag) {
863 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
864 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
865 }
866
867 of_property_read_u32_array(np, "arm,data-latency",
868 data, ARRAY_SIZE(data));
869 if (data[0] && data[1]) {
870 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
871 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
872 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
873 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
874 }
875
876 of_property_read_u32(np, "arm,dirty-latency", &dirty);
877 if (dirty) {
878 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
879 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
880 }
881
882 *aux_val &= ~mask;
883 *aux_val |= val;
884 *aux_mask &= ~mask;
885}
886
Russell King6a28cf52014-03-15 18:55:53 +0000887static const struct l2c_init_data of_l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000888 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000889 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000890 .num_lock = 1,
891 .of_parse = l2x0_of_parse,
892 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000893 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000894 .outer_cache = {
895 .inv_range = l2c210_inv_range,
896 .clean_range = l2c210_clean_range,
897 .flush_range = l2c210_flush_range,
898 .flush_all = l2c210_flush_all,
899 .disable = l2c_disable,
900 .sync = l2c210_sync,
901 .resume = l2c210_resume,
902 },
903};
904
Russell King733c6bb2014-03-15 21:29:28 +0000905static const struct l2c_init_data of_l2c220_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000906 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000907 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000908 .num_lock = 1,
Russell Kingda3627f2014-03-15 16:48:06 +0000909 .of_parse = l2x0_of_parse,
Russell King733c6bb2014-03-15 21:29:28 +0000910 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000911 .save = l2c_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000912 .outer_cache = {
Russell King733c6bb2014-03-15 21:29:28 +0000913 .inv_range = l2c220_inv_range,
914 .clean_range = l2c220_clean_range,
915 .flush_range = l2c220_flush_range,
916 .flush_all = l2c220_flush_all,
917 .disable = l2c_disable,
918 .sync = l2c220_sync,
919 .resume = l2c210_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000920 },
921};
922
Russell Kingf7773322014-03-15 20:51:47 +0000923static void __init l2c310_of_parse(const struct device_node *np,
924 u32 *aux_val, u32 *aux_mask)
Russell Kingda3627f2014-03-15 16:48:06 +0000925{
926 u32 data[3] = { 0, 0, 0 };
927 u32 tag[3] = { 0, 0, 0 };
928 u32 filter[2] = { 0, 0 };
929
930 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
931 if (tag[0] && tag[1] && tag[2])
932 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000933 L310_LATENCY_CTRL_RD(tag[0] - 1) |
934 L310_LATENCY_CTRL_WR(tag[1] - 1) |
935 L310_LATENCY_CTRL_SETUP(tag[2] - 1),
936 l2x0_base + L310_TAG_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000937
938 of_property_read_u32_array(np, "arm,data-latency",
939 data, ARRAY_SIZE(data));
940 if (data[0] && data[1] && data[2])
941 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000942 L310_LATENCY_CTRL_RD(data[0] - 1) |
943 L310_LATENCY_CTRL_WR(data[1] - 1) |
944 L310_LATENCY_CTRL_SETUP(data[2] - 1),
945 l2x0_base + L310_DATA_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000946
947 of_property_read_u32_array(np, "arm,filter-ranges",
948 filter, ARRAY_SIZE(filter));
949 if (filter[1]) {
950 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
Russell King1a5a9542014-03-16 20:52:25 +0000951 l2x0_base + L310_ADDR_FILTER_END);
952 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
953 l2x0_base + L310_ADDR_FILTER_START);
Russell Kingda3627f2014-03-15 16:48:06 +0000954 }
955}
956
Russell Kingf7773322014-03-15 20:51:47 +0000957static const struct l2c_init_data of_l2c310_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000958 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000959 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +0000960 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +0000961 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +0000962 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000963 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000964 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000965 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000966 .inv_range = l2c210_inv_range,
967 .clean_range = l2c210_clean_range,
968 .flush_range = l2c210_flush_range,
969 .flush_all = l2c210_flush_all,
970 .disable = l2c_disable,
971 .sync = l2c210_sync,
972 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000973 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000974 },
975};
976
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100977/*
978 * Note that the end addresses passed to Linux primitives are
979 * noninclusive, while the hardware cache range operations use
980 * inclusive start and end addresses.
981 */
982static unsigned long calc_range_end(unsigned long start, unsigned long end)
983{
984 /*
985 * Limit the number of cache lines processed at once,
986 * since cache range operations stall the CPU pipeline
987 * until completion.
988 */
989 if (end > start + MAX_RANGE_SIZE)
990 end = start + MAX_RANGE_SIZE;
991
992 /*
993 * Cache range operations can't straddle a page boundary.
994 */
995 if (end > PAGE_ALIGN(start+1))
996 end = PAGE_ALIGN(start+1);
997
998 return end;
999}
1000
1001/*
1002 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1003 * and range operations only do a TLB lookup on the start address.
1004 */
1005static void aurora_pa_range(unsigned long start, unsigned long end,
1006 unsigned long offset)
1007{
1008 unsigned long flags;
1009
1010 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +01001011 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1012 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001013 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1014
1015 cache_sync();
1016}
1017
1018static void aurora_inv_range(unsigned long start, unsigned long end)
1019{
1020 /*
1021 * round start and end adresses up to cache line size
1022 */
1023 start &= ~(CACHE_LINE_SIZE - 1);
1024 end = ALIGN(end, CACHE_LINE_SIZE);
1025
1026 /*
1027 * Invalidate all full cache lines between 'start' and 'end'.
1028 */
1029 while (start < end) {
1030 unsigned long range_end = calc_range_end(start, end);
1031 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1032 AURORA_INVAL_RANGE_REG);
1033 start = range_end;
1034 }
1035}
1036
1037static void aurora_clean_range(unsigned long start, unsigned long end)
1038{
1039 /*
1040 * If L2 is forced to WT, the L2 will always be clean and we
1041 * don't need to do anything here.
1042 */
1043 if (!l2_wt_override) {
1044 start &= ~(CACHE_LINE_SIZE - 1);
1045 end = ALIGN(end, CACHE_LINE_SIZE);
1046 while (start != end) {
1047 unsigned long range_end = calc_range_end(start, end);
1048 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1049 AURORA_CLEAN_RANGE_REG);
1050 start = range_end;
1051 }
1052 }
1053}
1054
1055static void aurora_flush_range(unsigned long start, unsigned long end)
1056{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001057 start &= ~(CACHE_LINE_SIZE - 1);
1058 end = ALIGN(end, CACHE_LINE_SIZE);
1059 while (start != end) {
1060 unsigned long range_end = calc_range_end(start, end);
1061 /*
1062 * If L2 is forced to WT, the L2 will always be clean and we
1063 * just need to invalidate.
1064 */
1065 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001066 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001067 AURORA_INVAL_RANGE_REG);
1068 else
1069 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1070 AURORA_FLUSH_RANGE_REG);
1071 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001072 }
1073}
1074
Russell Kingda3627f2014-03-15 16:48:06 +00001075static void aurora_save(void __iomem *base)
1076{
1077 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1078 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1079}
1080
1081static void aurora_resume(void)
1082{
Russell King09a5d182014-03-15 16:48:13 +00001083 void __iomem *base = l2x0_base;
1084
1085 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1086 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1087 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +00001088 }
1089}
1090
Russell King40266d62014-03-15 16:47:59 +00001091/*
1092 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1093 * broadcasting of cache commands to L2.
1094 */
1095static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1096 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001097{
Russell King40266d62014-03-15 16:47:59 +00001098 u32 u;
1099
1100 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001101 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001102 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1103
Russell Kingda3627f2014-03-15 16:48:06 +00001104 isb();
Russell King40266d62014-03-15 16:47:59 +00001105
1106 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001107}
1108
Russell King75461f52014-03-15 16:48:07 +00001109static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1110 struct outer_cache_fns *fns)
1111{
1112 sync_reg_offset = AURORA_SYNC_REG;
1113}
1114
Russell Kingda3627f2014-03-15 16:48:06 +00001115static void __init aurora_of_parse(const struct device_node *np,
1116 u32 *aux_val, u32 *aux_mask)
1117{
1118 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1119 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1120
1121 of_property_read_u32(np, "cache-id-part",
1122 &cache_id_part_number_from_dt);
1123
1124 /* Determine and save the write policy */
1125 l2_wt_override = of_property_read_bool(np, "wt-override");
1126
1127 if (l2_wt_override) {
1128 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1129 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1130 }
1131
1132 *aux_val &= ~mask;
1133 *aux_val |= val;
1134 *aux_mask &= ~mask;
1135}
1136
1137static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001138 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001139 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001140 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001141 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001142 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001143 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001144 .save = aurora_save,
1145 .outer_cache = {
1146 .inv_range = aurora_inv_range,
1147 .clean_range = aurora_clean_range,
1148 .flush_range = aurora_flush_range,
1149 .flush_all = l2x0_flush_all,
1150 .disable = l2x0_disable,
1151 .sync = l2x0_cache_sync,
1152 .resume = aurora_resume,
1153 },
1154};
1155
1156static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001157 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001158 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001159 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001160 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001161 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001162 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001163 .save = aurora_save,
1164 .outer_cache = {
1165 .resume = aurora_resume,
1166 },
1167};
1168
Christian Daudt3b656fe2013-05-09 22:21:01 +01001169/*
1170 * For certain Broadcom SoCs, depending on the address range, different offsets
1171 * need to be added to the address before passing it to L2 for
1172 * invalidation/clean/flush
1173 *
1174 * Section Address Range Offset EMI
1175 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1176 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1177 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1178 *
1179 * When the start and end addresses have crossed two different sections, we
1180 * need to break the L2 operation into two, each within its own section.
1181 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1182 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1183 * 0xC0000000 - 0xC0001000
1184 *
1185 * Note 1:
1186 * By breaking a single L2 operation into two, we may potentially suffer some
1187 * performance hit, but keep in mind the cross section case is very rare
1188 *
1189 * Note 2:
1190 * We do not need to handle the case when the start address is in
1191 * Section 1 and the end address is in Section 3, since it is not a valid use
1192 * case
1193 *
1194 * Note 3:
1195 * Section 1 in practical terms can no longer be used on rev A2. Because of
1196 * that the code does not need to handle section 1 at all.
1197 *
1198 */
1199#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1200#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1201
1202#define BCM_SYS_EMI_OFFSET 0x40000000UL
1203#define BCM_VC_EMI_OFFSET 0x80000000UL
1204
1205static inline int bcm_addr_is_sys_emi(unsigned long addr)
1206{
1207 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1208 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1209}
1210
1211static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1212{
1213 if (bcm_addr_is_sys_emi(addr))
1214 return addr + BCM_SYS_EMI_OFFSET;
1215 else
1216 return addr + BCM_VC_EMI_OFFSET;
1217}
1218
1219static void bcm_inv_range(unsigned long start, unsigned long end)
1220{
1221 unsigned long new_start, new_end;
1222
1223 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1224
1225 if (unlikely(end <= start))
1226 return;
1227
1228 new_start = bcm_l2_phys_addr(start);
1229 new_end = bcm_l2_phys_addr(end);
1230
1231 /* normal case, no cross section between start and end */
1232 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001233 l2c210_inv_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001234 return;
1235 }
1236
1237 /* They cross sections, so it can only be a cross from section
1238 * 2 to section 3
1239 */
Russell King90811142014-03-19 19:14:13 +00001240 l2c210_inv_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001241 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001242 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001243 new_end);
1244}
1245
1246static void bcm_clean_range(unsigned long start, unsigned long end)
1247{
1248 unsigned long new_start, new_end;
1249
1250 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1251
1252 if (unlikely(end <= start))
1253 return;
1254
Christian Daudt3b656fe2013-05-09 22:21:01 +01001255 new_start = bcm_l2_phys_addr(start);
1256 new_end = bcm_l2_phys_addr(end);
1257
1258 /* normal case, no cross section between start and end */
1259 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001260 l2c210_clean_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001261 return;
1262 }
1263
1264 /* They cross sections, so it can only be a cross from section
1265 * 2 to section 3
1266 */
Russell King90811142014-03-19 19:14:13 +00001267 l2c210_clean_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001268 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001269 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001270 new_end);
1271}
1272
1273static void bcm_flush_range(unsigned long start, unsigned long end)
1274{
1275 unsigned long new_start, new_end;
1276
1277 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1278
1279 if (unlikely(end <= start))
1280 return;
1281
1282 if ((end - start) >= l2x0_size) {
Russell King90811142014-03-19 19:14:13 +00001283 outer_cache.flush_all();
Christian Daudt3b656fe2013-05-09 22:21:01 +01001284 return;
1285 }
1286
1287 new_start = bcm_l2_phys_addr(start);
1288 new_end = bcm_l2_phys_addr(end);
1289
1290 /* normal case, no cross section between start and end */
1291 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001292 l2c210_flush_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001293 return;
1294 }
1295
1296 /* They cross sections, so it can only be a cross from section
1297 * 2 to section 3
1298 */
Russell King90811142014-03-19 19:14:13 +00001299 l2c210_flush_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001300 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001301 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001302 new_end);
1303}
1304
Russell King90811142014-03-19 19:14:13 +00001305/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
Russell Kingda3627f2014-03-15 16:48:06 +00001306static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001307 .type = "BCM-L2C-310",
Russell King0493aef2014-03-15 23:26:24 +00001308 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001309 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +00001310 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +00001311 .enable = l2c310_enable,
Russell King09a5d182014-03-15 16:48:13 +00001312 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001313 .outer_cache = {
1314 .inv_range = bcm_inv_range,
1315 .clean_range = bcm_clean_range,
1316 .flush_range = bcm_flush_range,
Russell Kingf7773322014-03-15 20:51:47 +00001317 .flush_all = l2c210_flush_all,
1318 .disable = l2c_disable,
1319 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +00001320 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001321 },
1322};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001323
Russell King9846dfc2014-03-15 16:47:55 +00001324static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001325{
Russell Kingddf7d792014-03-28 14:18:35 +00001326 l2c_save(base);
1327
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001328 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001329 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001330 l2x0_saved_regs.prefetch_ctrl =
Russell King1a5a9542014-03-16 20:52:25 +00001331 readl_relaxed(base + L310_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001332}
1333
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001334static void tauros3_resume(void)
1335{
Russell King09a5d182014-03-15 16:48:13 +00001336 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001337
Russell King09a5d182014-03-15 16:48:13 +00001338 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1339 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1340 base + TAUROS3_AUX2_CTRL);
1341 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
Russell King1a5a9542014-03-16 20:52:25 +00001342 base + L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +00001343
1344 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1345 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001346}
1347
Russell Kingc02642b2014-03-15 16:47:54 +00001348static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001349 .type = "Tauros3",
Russell King0493aef2014-03-15 23:26:24 +00001350 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001351 .num_lock = 8,
1352 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001353 .save = tauros3_save,
1354 /* Tauros3 broadcasts L1 cache operations to L2 */
1355 .outer_cache = {
1356 .resume = tauros3_resume,
1357 },
1358};
1359
Russell Kinga65bb922014-03-15 16:48:01 +00001360#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001361static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001362 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell King733c6bb2014-03-15 21:29:28 +00001363 L2C_ID("arm,l220-cache", of_l2c220_data),
Russell Kingf7773322014-03-15 20:51:47 +00001364 L2C_ID("arm,pl310-cache", of_l2c310_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001365 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1366 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1367 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1368 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001369 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001370 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001371 {}
1372};
1373
Russell King3e175ca2011-09-18 11:27:30 +01001374int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001375{
Russell Kingc02642b2014-03-15 16:47:54 +00001376 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001377 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001378 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001379 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001380
1381 np = of_find_matching_node(NULL, l2x0_ids);
1382 if (!np)
1383 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001384
1385 if (of_address_to_resource(np, 0, &res))
1386 return -ENODEV;
1387
1388 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001389 if (!l2x0_base)
1390 return -ENOMEM;
1391
Barry Song91c2ebb2011-09-30 14:43:12 +01001392 l2x0_saved_regs.phy_base = res.start;
1393
1394 data = of_match_node(l2x0_ids, np)->data;
1395
Russell Kingd9d1f3e2014-03-17 12:59:08 +00001396 /* All L2 caches are unified, so this property should be specified */
1397 if (!of_property_read_bool(np, "cache-unified"))
1398 pr_err("L2C: device tree omits to specify unified cache\n");
1399
Rob Herring8c369262011-08-03 18:12:05 +01001400 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001401 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001402 if (data->of_parse)
1403 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001404
Russell King96054b02014-03-15 16:47:52 +00001405 if (cache_id_part_number_from_dt)
1406 cache_id = cache_id_part_number_from_dt;
1407 else
1408 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1409
1410 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001411
Rob Herring8c369262011-08-03 18:12:05 +01001412 return 0;
1413}
1414#endif