blob: c4f3e8dc64ff07a24a292ccd40878f05631eeed3 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Russell King4374d642014-03-19 15:39:09 +000027#include <asm/cputype.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010028#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010029#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010030#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010031
Russell Kingc02642b2014-03-15 16:47:54 +000032struct l2c_init_data {
Russell King051334b2014-03-15 23:04:10 +000033 const char *type;
Russell King0493aef2014-03-15 23:26:24 +000034 unsigned way_size_0;
Russell King3b8bad52014-03-15 16:47:57 +000035 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000036 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000037 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000038 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000039 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000040 struct outer_cache_fns outer_cache;
41};
42
Catalin Marinas382266a2007-02-05 14:48:19 +010043#define CACHE_LINE_SIZE 32
44
45static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050046static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010047static u32 l2x0_way_mask; /* Bitmask of active ways */
48static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010049static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010050
Barry Song91c2ebb2011-09-30 14:43:12 +010051struct l2x0_regs l2x0_saved_regs;
52
Russell King37abcdb2014-03-15 16:47:50 +000053/*
54 * Common code for all cache controllers.
55 */
Russell King83841fe2014-03-15 16:48:14 +000056static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010057{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010058 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010059 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010060 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010061}
62
Russell King2b2a87a2014-03-16 17:19:21 +000063/*
Russell King8abd2592014-03-16 17:38:08 +000064 * By default, we write directly to secure registers. Platforms must
65 * override this if they are running non-secure.
66 */
67static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
68{
69 if (val == readl_relaxed(base + reg))
70 return;
71 if (outer_cache.write_sec)
72 outer_cache.write_sec(val, reg);
73 else
74 writel_relaxed(val, base + reg);
75}
76
77/*
Russell King2b2a87a2014-03-16 17:19:21 +000078 * This should only be called when we have a requirement that the
79 * register be written due to a work-around, as platforms running
80 * in non-secure mode may not be able to access this register.
81 */
82static inline void l2c_set_debug(void __iomem *base, unsigned long val)
83{
Russell King8abd2592014-03-16 17:38:08 +000084 if (outer_cache.set_debug)
85 outer_cache.set_debug(val);
86 else
87 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
Russell King2b2a87a2014-03-16 17:19:21 +000088}
89
Russell Kingdf5dd4c2014-03-15 16:47:56 +000090static void __l2c_op_way(void __iomem *reg)
91{
92 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000093 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000094}
95
Russell King37abcdb2014-03-15 16:47:50 +000096static inline void l2c_unlock(void __iomem *base, unsigned num)
97{
98 unsigned i;
99
100 for (i = 0; i < num; i++) {
101 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
102 i * L2X0_LOCKDOWN_STRIDE);
103 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
104 i * L2X0_LOCKDOWN_STRIDE);
105 }
106}
107
Russell King3b8bad52014-03-15 16:47:57 +0000108/*
109 * Enable the L2 cache controller. This function must only be
110 * called when the cache controller is known to be disabled.
111 */
112static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
113{
114 unsigned long flags;
115
Russell King8abd2592014-03-16 17:38:08 +0000116 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000117
Russell King17f3f992014-03-17 17:15:02 +0000118 l2c_unlock(base, num_lock);
119
Russell King3b8bad52014-03-15 16:47:57 +0000120 local_irq_save(flags);
121 __l2c_op_way(base + L2X0_INV_WAY);
122 writel_relaxed(0, base + sync_reg_offset);
123 l2c_wait_mask(base + sync_reg_offset, 1);
124 local_irq_restore(flags);
125
Russell King8abd2592014-03-16 17:38:08 +0000126 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000127}
128
129static void l2c_disable(void)
130{
131 void __iomem *base = l2x0_base;
132
133 outer_cache.flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000134 l2c_write_sec(0, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000135 dsb(st);
136}
137
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100138#ifdef CONFIG_CACHE_PL310
139static inline void cache_wait(void __iomem *reg, unsigned long mask)
140{
141 /* cache operations by line are atomic on PL310 */
142}
143#else
Russell King83841fe2014-03-15 16:48:14 +0000144#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100145#endif
146
Catalin Marinas382266a2007-02-05 14:48:19 +0100147static inline void cache_sync(void)
148{
Russell King3d107432009-11-19 11:41:09 +0000149 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100150
Will Deaconf154fe92012-04-20 17:21:08 +0100151 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000152 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100153}
154
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100155#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100156static inline void debug_writel(unsigned long val)
157{
Russell King8abd2592014-03-16 17:38:08 +0000158 if (outer_cache.set_debug || outer_cache.write_sec)
Russell King2b2a87a2014-03-16 17:19:21 +0000159 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100160}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100161#else
162/* Optimised out for non-errata case */
163static inline void debug_writel(unsigned long val)
164{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100165}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100166#endif
167
Catalin Marinas23107c52010-03-24 16:48:53 +0100168static void l2x0_cache_sync(void)
169{
170 unsigned long flags;
171
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500172 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100173 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500174 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100175}
176
Will Deacon38a89142011-07-01 14:36:19 +0100177static void __l2x0_flush_all(void)
178{
179 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100181 cache_sync();
182 debug_writel(0x00);
183}
184
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530185static void l2x0_flush_all(void)
186{
187 unsigned long flags;
188
189 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500190 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100191 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500192 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530193}
194
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530195static void l2x0_disable(void)
196{
197 unsigned long flags;
198
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500199 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100200 __l2x0_flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000201 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100202 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500203 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530204}
205
Russell Kingddf7d792014-03-28 14:18:35 +0000206static void l2c_save(void __iomem *base)
207{
208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
209}
210
Russell King75461f52014-03-15 16:48:07 +0000211/*
Russell King6a28cf52014-03-15 18:55:53 +0000212 * L2C-210 specific code.
213 *
214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
215 * ensure that no background operation is running. The way operations
216 * are all background tasks.
217 *
218 * While a background operation is in progress, any new operation is
219 * ignored (unspecified whether this causes an error.) Thankfully, not
220 * used on SMP.
221 *
222 * Never has a different sync register other than L2X0_CACHE_SYNC, but
223 * we use sync_reg_offset here so we can share some of this with L2C-310.
224 */
225static void __l2c210_cache_sync(void __iomem *base)
226{
227 writel_relaxed(0, base + sync_reg_offset);
228}
229
230static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
231 unsigned long end)
232{
233 while (start < end) {
234 writel_relaxed(start, reg);
235 start += CACHE_LINE_SIZE;
236 }
237}
238
239static void l2c210_inv_range(unsigned long start, unsigned long end)
240{
241 void __iomem *base = l2x0_base;
242
243 if (start & (CACHE_LINE_SIZE - 1)) {
244 start &= ~(CACHE_LINE_SIZE - 1);
245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
246 start += CACHE_LINE_SIZE;
247 }
248
249 if (end & (CACHE_LINE_SIZE - 1)) {
250 end &= ~(CACHE_LINE_SIZE - 1);
251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
252 }
253
254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
255 __l2c210_cache_sync(base);
256}
257
258static void l2c210_clean_range(unsigned long start, unsigned long end)
259{
260 void __iomem *base = l2x0_base;
261
262 start &= ~(CACHE_LINE_SIZE - 1);
263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
264 __l2c210_cache_sync(base);
265}
266
267static void l2c210_flush_range(unsigned long start, unsigned long end)
268{
269 void __iomem *base = l2x0_base;
270
271 start &= ~(CACHE_LINE_SIZE - 1);
272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
273 __l2c210_cache_sync(base);
274}
275
276static void l2c210_flush_all(void)
277{
278 void __iomem *base = l2x0_base;
279
280 BUG_ON(!irqs_disabled());
281
282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
283 __l2c210_cache_sync(base);
284}
285
286static void l2c210_sync(void)
287{
288 __l2c210_cache_sync(l2x0_base);
289}
290
291static void l2c210_resume(void)
292{
293 void __iomem *base = l2x0_base;
294
295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
297}
298
299static const struct l2c_init_data l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000300 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000301 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000302 .num_lock = 1,
303 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000304 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000305 .outer_cache = {
306 .inv_range = l2c210_inv_range,
307 .clean_range = l2c210_clean_range,
308 .flush_range = l2c210_flush_range,
309 .flush_all = l2c210_flush_all,
310 .disable = l2c_disable,
311 .sync = l2c210_sync,
312 .resume = l2c210_resume,
313 },
314};
315
316/*
Russell King733c6bb2014-03-15 21:29:28 +0000317 * L2C-220 specific code.
318 *
319 * All operations are background operations: they have to be waited for.
320 * Conflicting requests generate a slave error (which will cause an
321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
322 * sync register here.
323 *
324 * However, we can re-use the l2c210_resume call.
325 */
326static inline void __l2c220_cache_sync(void __iomem *base)
327{
328 writel_relaxed(0, base + L2X0_CACHE_SYNC);
329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
330}
331
332static void l2c220_op_way(void __iomem *base, unsigned reg)
333{
334 unsigned long flags;
335
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
337 __l2c_op_way(base + reg);
338 __l2c220_cache_sync(base);
339 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
340}
341
342static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
343 unsigned long end, unsigned long flags)
344{
345 raw_spinlock_t *lock = &l2x0_lock;
346
347 while (start < end) {
348 unsigned long blk_end = start + min(end - start, 4096UL);
349
350 while (start < blk_end) {
351 l2c_wait_mask(reg, 1);
352 writel_relaxed(start, reg);
353 start += CACHE_LINE_SIZE;
354 }
355
356 if (blk_end < end) {
357 raw_spin_unlock_irqrestore(lock, flags);
358 raw_spin_lock_irqsave(lock, flags);
359 }
360 }
361
362 return flags;
363}
364
365static void l2c220_inv_range(unsigned long start, unsigned long end)
366{
367 void __iomem *base = l2x0_base;
368 unsigned long flags;
369
370 raw_spin_lock_irqsave(&l2x0_lock, flags);
371 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
372 if (start & (CACHE_LINE_SIZE - 1)) {
373 start &= ~(CACHE_LINE_SIZE - 1);
374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
375 start += CACHE_LINE_SIZE;
376 }
377
378 if (end & (CACHE_LINE_SIZE - 1)) {
379 end &= ~(CACHE_LINE_SIZE - 1);
380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
382 }
383 }
384
385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
386 start, end, flags);
387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
388 __l2c220_cache_sync(base);
389 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
390}
391
392static void l2c220_clean_range(unsigned long start, unsigned long end)
393{
394 void __iomem *base = l2x0_base;
395 unsigned long flags;
396
397 start &= ~(CACHE_LINE_SIZE - 1);
398 if ((end - start) >= l2x0_size) {
399 l2c220_op_way(base, L2X0_CLEAN_WAY);
400 return;
401 }
402
403 raw_spin_lock_irqsave(&l2x0_lock, flags);
404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
405 start, end, flags);
406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
407 __l2c220_cache_sync(base);
408 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
409}
410
411static void l2c220_flush_range(unsigned long start, unsigned long end)
412{
413 void __iomem *base = l2x0_base;
414 unsigned long flags;
415
416 start &= ~(CACHE_LINE_SIZE - 1);
417 if ((end - start) >= l2x0_size) {
418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
419 return;
420 }
421
422 raw_spin_lock_irqsave(&l2x0_lock, flags);
423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
424 start, end, flags);
425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
426 __l2c220_cache_sync(base);
427 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
428}
429
430static void l2c220_flush_all(void)
431{
432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
433}
434
435static void l2c220_sync(void)
436{
437 unsigned long flags;
438
439 raw_spin_lock_irqsave(&l2x0_lock, flags);
440 __l2c220_cache_sync(l2x0_base);
441 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
442}
443
Russell Kinga4b041a2014-04-11 00:48:25 +0100444static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
445{
446 /*
447 * Always enable non-secure access to the lockdown registers -
448 * we write to them as part of the L2C enable sequence so they
449 * need to be accessible.
450 */
451 aux |= L220_AUX_CTRL_NS_LOCKDOWN;
452
453 l2c_enable(base, aux, num_lock);
454}
455
Russell King733c6bb2014-03-15 21:29:28 +0000456static const struct l2c_init_data l2c220_data = {
Russell King051334b2014-03-15 23:04:10 +0000457 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000458 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000459 .num_lock = 1,
Russell Kinga4b041a2014-04-11 00:48:25 +0100460 .enable = l2c220_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000461 .save = l2c_save,
Russell King733c6bb2014-03-15 21:29:28 +0000462 .outer_cache = {
463 .inv_range = l2c220_inv_range,
464 .clean_range = l2c220_clean_range,
465 .flush_range = l2c220_flush_range,
466 .flush_all = l2c220_flush_all,
467 .disable = l2c_disable,
468 .sync = l2c220_sync,
469 .resume = l2c210_resume,
470 },
471};
472
473/*
Russell King75461f52014-03-15 16:48:07 +0000474 * L2C-310 specific code.
475 *
Russell Kingf7773322014-03-15 20:51:47 +0000476 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
477 * and the way operations are all background tasks. However, issuing an
478 * operation while a background operation is in progress results in a
479 * SLVERR response. We can reuse:
480 *
481 * __l2c210_cache_sync (using sync_reg_offset)
482 * l2c210_sync
483 * l2c210_inv_range (if 588369 is not applicable)
484 * l2c210_clean_range
485 * l2c210_flush_range (if 588369 is not applicable)
486 * l2c210_flush_all (if 727915 is not applicable)
487 *
Russell King75461f52014-03-15 16:48:07 +0000488 * Errata:
489 * 588369: PL310 R0P0->R1P0, fixed R2P0.
490 * Affects: all clean+invalidate operations
491 * clean and invalidate skips the invalidate step, so we need to issue
492 * separate operations. We also require the above debug workaround
493 * enclosing this code fragment on affected parts. On unaffected parts,
494 * we must not use this workaround without the debug register writes
495 * to avoid exposing a problem similar to 727915.
496 *
497 * 727915: PL310 R2P0->R3P0, fixed R3P1.
498 * Affects: clean+invalidate by way
499 * clean and invalidate by way runs in the background, and a store can
500 * hit the line between the clean operation and invalidate operation,
501 * resulting in the store being lost.
502 *
Russell Kinga8875a02014-03-16 20:02:06 +0000503 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
504 * Affects: 8x64-bit (double fill) line fetches
505 * double fill line fetches can fail to cause dirty data to be evicted
506 * from the cache before the new data overwrites the second line.
507 *
Russell King75461f52014-03-15 16:48:07 +0000508 * 753970: PL310 R3P0, fixed R3P1.
509 * Affects: sync
510 * prevents merging writes after the sync operation, until another L2C
511 * operation is performed (or a number of other conditions.)
512 *
513 * 769419: PL310 R0P0->R3P1, fixed R3P2.
514 * Affects: store buffer
515 * store buffer is not automatically drained.
516 */
Russell Kingbda0b742014-03-15 16:48:16 +0000517static void l2c310_set_debug(unsigned long val)
518{
519 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
520}
521
Russell Kingebd4219f2014-03-15 19:08:11 +0000522static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
523{
524 void __iomem *base = l2x0_base;
525
526 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
527 unsigned long flags;
528
529 /* Erratum 588369 for both clean+invalidate operations */
530 raw_spin_lock_irqsave(&l2x0_lock, flags);
531 l2c_set_debug(base, 0x03);
532
533 if (start & (CACHE_LINE_SIZE - 1)) {
534 start &= ~(CACHE_LINE_SIZE - 1);
535 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
536 writel_relaxed(start, base + L2X0_INV_LINE_PA);
537 start += CACHE_LINE_SIZE;
538 }
539
540 if (end & (CACHE_LINE_SIZE - 1)) {
541 end &= ~(CACHE_LINE_SIZE - 1);
542 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
543 writel_relaxed(end, base + L2X0_INV_LINE_PA);
544 }
545
546 l2c_set_debug(base, 0x00);
547 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
548 }
549
550 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
551 __l2c210_cache_sync(base);
552}
553
554static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
555{
556 raw_spinlock_t *lock = &l2x0_lock;
557 unsigned long flags;
558 void __iomem *base = l2x0_base;
559
560 raw_spin_lock_irqsave(lock, flags);
561 while (start < end) {
562 unsigned long blk_end = start + min(end - start, 4096UL);
563
564 l2c_set_debug(base, 0x03);
565 while (start < blk_end) {
566 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
567 writel_relaxed(start, base + L2X0_INV_LINE_PA);
568 start += CACHE_LINE_SIZE;
569 }
570 l2c_set_debug(base, 0x00);
571
572 if (blk_end < end) {
573 raw_spin_unlock_irqrestore(lock, flags);
574 raw_spin_lock_irqsave(lock, flags);
575 }
576 }
577 raw_spin_unlock_irqrestore(lock, flags);
578 __l2c210_cache_sync(base);
579}
580
Russell King99ca17722014-03-15 16:48:18 +0000581static void l2c310_flush_all_erratum(void)
582{
583 void __iomem *base = l2x0_base;
584 unsigned long flags;
585
586 raw_spin_lock_irqsave(&l2x0_lock, flags);
587 l2c_set_debug(base, 0x03);
588 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
589 l2c_set_debug(base, 0x00);
590 __l2c210_cache_sync(base);
591 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
592}
593
Russell King09a5d182014-03-15 16:48:13 +0000594static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000595{
Russell King09a5d182014-03-15 16:48:13 +0000596 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000597
Russell Kingddf7d792014-03-28 14:18:35 +0000598 l2c_save(base);
599
Russell Kingb98556f22014-03-15 16:48:11 +0000600 l2x0_saved_regs.tag_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000601 L310_TAG_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000602 l2x0_saved_regs.data_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000603 L310_DATA_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000604 l2x0_saved_regs.filter_end = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000605 L310_ADDR_FILTER_END);
Russell Kingb98556f22014-03-15 16:48:11 +0000606 l2x0_saved_regs.filter_start = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000607 L310_ADDR_FILTER_START);
Russell Kingb98556f22014-03-15 16:48:11 +0000608
Russell King09a5d182014-03-15 16:48:13 +0000609 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000610 L2X0_CACHE_ID_RTL_MASK;
611
Russell King09a5d182014-03-15 16:48:13 +0000612 /* From r2p0, there is Prefetch offset/control register */
613 if (revision >= L310_CACHE_ID_RTL_R2P0)
614 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000615 L310_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000616
Russell King09a5d182014-03-15 16:48:13 +0000617 /* From r3p0, there is Power control register */
618 if (revision >= L310_CACHE_ID_RTL_R3P0)
619 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000620 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000621}
622
623static void l2c310_resume(void)
624{
625 void __iomem *base = l2x0_base;
626
627 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
628 unsigned revision;
629
630 /* restore pl310 setup */
631 writel_relaxed(l2x0_saved_regs.tag_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000632 base + L310_TAG_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000633 writel_relaxed(l2x0_saved_regs.data_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000634 base + L310_DATA_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000635 writel_relaxed(l2x0_saved_regs.filter_end,
Russell King1a5a9542014-03-16 20:52:25 +0000636 base + L310_ADDR_FILTER_END);
Russell King09a5d182014-03-15 16:48:13 +0000637 writel_relaxed(l2x0_saved_regs.filter_start,
Russell King1a5a9542014-03-16 20:52:25 +0000638 base + L310_ADDR_FILTER_START);
Russell King09a5d182014-03-15 16:48:13 +0000639
640 revision = readl_relaxed(base + L2X0_CACHE_ID) &
641 L2X0_CACHE_ID_RTL_MASK;
642
643 if (revision >= L310_CACHE_ID_RTL_R2P0)
Russell King8abd2592014-03-16 17:38:08 +0000644 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000645 L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000646 if (revision >= L310_CACHE_ID_RTL_R3P0)
Russell King8abd2592014-03-16 17:38:08 +0000647 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000648 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000649
650 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
651 }
Russell Kingb98556f22014-03-15 16:48:11 +0000652}
653
Russell King4374d642014-03-19 15:39:09 +0000654static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
655{
656 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
657 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
658
659 if (rev >= L310_CACHE_ID_RTL_R2P0) {
660 if (cortex_a9) {
661 aux |= L310_AUX_CTRL_EARLY_BRESP;
662 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
663 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
664 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
665 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
666 }
667 }
668
Russell King3a43b582014-03-28 14:22:04 +0000669 /* r3p0 or later has power control register */
670 if (rev >= L310_CACHE_ID_RTL_R3P0) {
671 u32 power_ctrl;
672
673 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
674 base, L310_POWER_CTRL);
675 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
676 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
677 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
678 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
679 }
680
Russell Kinga4b041a2014-04-11 00:48:25 +0100681 /*
682 * Always enable non-secure access to the lockdown registers -
683 * we write to them as part of the L2C enable sequence so they
684 * need to be accessible.
685 */
686 aux |= L310_AUX_CTRL_NS_LOCKDOWN;
687
Russell King4374d642014-03-19 15:39:09 +0000688 l2c_enable(base, aux, num_lock);
689}
690
Russell King75461f52014-03-15 16:48:07 +0000691static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
692 struct outer_cache_fns *fns)
693{
694 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
Russell Kinga8875a02014-03-16 20:02:06 +0000695 const char *errata[8];
Russell King75461f52014-03-15 16:48:07 +0000696 unsigned n = 0;
697
Russell Kingebd4219f2014-03-15 19:08:11 +0000698 /* For compatibility */
Russell King75461f52014-03-15 16:48:07 +0000699 if (revision <= L310_CACHE_ID_RTL_R3P0)
Russell Kingbda0b742014-03-15 16:48:16 +0000700 fns->set_debug = l2c310_set_debug;
Russell King75461f52014-03-15 16:48:07 +0000701
Russell Kingebd4219f2014-03-15 19:08:11 +0000702 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
703 revision < L310_CACHE_ID_RTL_R2P0 &&
704 /* For bcm compatibility */
Russell Kingf7773322014-03-15 20:51:47 +0000705 fns->inv_range == l2c210_inv_range) {
Russell Kingebd4219f2014-03-15 19:08:11 +0000706 fns->inv_range = l2c310_inv_range_erratum;
707 fns->flush_range = l2c310_flush_range_erratum;
708 errata[n++] = "588369";
709 }
710
Russell King99ca17722014-03-15 16:48:18 +0000711 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
712 revision >= L310_CACHE_ID_RTL_R2P0 &&
713 revision < L310_CACHE_ID_RTL_R3P1) {
714 fns->flush_all = l2c310_flush_all_erratum;
715 errata[n++] = "727915";
716 }
717
Russell Kinga8875a02014-03-16 20:02:06 +0000718 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
719 revision < L310_CACHE_ID_RTL_R3P2) {
Russell King1a5a9542014-03-16 20:52:25 +0000720 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000721 /* I don't think bit23 is required here... but iMX6 does so */
722 if (val & (BIT(30) | BIT(23))) {
723 val &= ~(BIT(30) | BIT(23));
Russell King1a5a9542014-03-16 20:52:25 +0000724 l2c_write_sec(val, base, L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000725 errata[n++] = "752271";
726 }
727 }
728
Russell King75461f52014-03-15 16:48:07 +0000729 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
730 revision == L310_CACHE_ID_RTL_R3P0) {
731 sync_reg_offset = L2X0_DUMMY_REG;
732 errata[n++] = "753970";
733 }
734
735 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
736 errata[n++] = "769419";
737
738 if (n) {
739 unsigned i;
740
741 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
742 for (i = 0; i < n; i++)
743 pr_cont(" %s", errata[i]);
744 pr_cont(" enabled\n");
745 }
746}
747
748static const struct l2c_init_data l2c310_init_fns __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000749 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000750 .way_size_0 = SZ_8K,
Russell King75461f52014-03-15 16:48:07 +0000751 .num_lock = 8,
Russell King4374d642014-03-19 15:39:09 +0000752 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000753 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000754 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000755 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000756 .inv_range = l2c210_inv_range,
757 .clean_range = l2c210_clean_range,
758 .flush_range = l2c210_flush_range,
759 .flush_all = l2c210_flush_all,
760 .disable = l2c_disable,
761 .sync = l2c210_sync,
762 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000763 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000764 },
765};
766
Russell King96054b02014-03-15 16:47:52 +0000767static void __init __l2c_init(const struct l2c_init_data *data,
768 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100769{
Russell King75461f52014-03-15 16:48:07 +0000770 struct outer_cache_fns fns;
Russell King0493aef2014-03-15 23:26:24 +0000771 unsigned way_size_bits, ways;
Russell King3e175ca2011-09-18 11:27:30 +0100772 u32 aux;
Catalin Marinas382266a2007-02-05 14:48:19 +0100773
Catalin Marinas6775a552010-07-28 22:01:25 +0100774 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100775
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100776 aux &= aux_mask;
777 aux |= aux_val;
778
Jason McMullan64039be2010-05-05 18:59:37 +0100779 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100780 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100781 case L2X0_CACHE_ID_PART_L310:
782 if (aux & (1 << 16))
783 ways = 16;
784 else
785 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100786 break;
Russell King75461f52014-03-15 16:48:07 +0000787
Jason McMullan64039be2010-05-05 18:59:37 +0100788 case L2X0_CACHE_ID_PART_L210:
Russell King5f47c382014-03-15 23:07:07 +0000789 case L2X0_CACHE_ID_PART_L220:
Jason McMullan64039be2010-05-05 18:59:37 +0100790 ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100791 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100792
793 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100794 ways = (aux >> 13) & 0xf;
795 ways = 2 << ((ways + 1) >> 2);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100796 break;
Russell King75461f52014-03-15 16:48:07 +0000797
Jason McMullan64039be2010-05-05 18:59:37 +0100798 default:
799 /* Assume unknown chips have 8 ways */
800 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100801 break;
802 }
803
804 l2x0_way_mask = (1 << ways) - 1;
805
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100806 /*
Russell King0493aef2014-03-15 23:26:24 +0000807 * way_size_0 is the size that a way_size value of zero would be
808 * given the calculation: way_size = way_size_0 << way_size_bits.
809 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
810 * then way_size_0 would be 8k.
811 *
812 * L2 cache size = number of ways * way size.
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530813 */
Russell King1a5a9542014-03-16 20:52:25 +0000814 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
815 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
Russell King0493aef2014-03-15 23:26:24 +0000816 l2x0_size = ways * (data->way_size_0 << way_size_bits);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530817
Russell King75461f52014-03-15 16:48:07 +0000818 fns = data->outer_cache;
Russell King8abd2592014-03-16 17:38:08 +0000819 fns.write_sec = outer_cache.write_sec;
Russell King75461f52014-03-15 16:48:07 +0000820 if (data->fixup)
821 data->fixup(l2x0_base, cache_id, &fns);
Russell King8abd2592014-03-16 17:38:08 +0000822 if (fns.write_sec)
823 fns.set_debug = NULL;
Russell King75461f52014-03-15 16:48:07 +0000824
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530825 /*
Russell King3b8bad52014-03-15 16:47:57 +0000826 * Check if l2x0 controller is already enabled. If we are booting
827 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100828 */
Russell King3b8bad52014-03-15 16:47:57 +0000829 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
830 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100831
Russell Kingddf7d792014-03-28 14:18:35 +0000832 outer_cache = fns;
833
834 /*
835 * It is strange to save the register state before initialisation,
836 * but hey, this is what the DT implementations decided to do.
837 */
838 if (data->save)
839 data->save(l2x0_base);
840
Yilu Mao9d4876f2012-09-03 09:14:56 +0100841 /* Re-read it in case some bits are reserved. */
842 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
843
Russell Kingcdef8682014-03-15 16:48:08 +0000844 pr_info("%s cache controller enabled, %d ways, %d kB\n",
Russell King051334b2014-03-15 23:04:10 +0000845 data->type, ways, l2x0_size >> 10);
Russell Kingcdef8682014-03-15 16:48:08 +0000846 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
Russell King051334b2014-03-15 23:04:10 +0000847 data->type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100848}
Rob Herring8c369262011-08-03 18:12:05 +0100849
Russell King96054b02014-03-15 16:47:52 +0000850void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
851{
Russell King75461f52014-03-15 16:48:07 +0000852 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000853 u32 cache_id;
854
855 l2x0_base = base;
856
857 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
858
Russell King75461f52014-03-15 16:48:07 +0000859 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
860 default:
Russell King6a28cf52014-03-15 18:55:53 +0000861 case L2X0_CACHE_ID_PART_L210:
862 data = &l2c210_data;
863 break;
864
Russell King733c6bb2014-03-15 21:29:28 +0000865 case L2X0_CACHE_ID_PART_L220:
866 data = &l2c220_data;
867 break;
868
Russell King75461f52014-03-15 16:48:07 +0000869 case L2X0_CACHE_ID_PART_L310:
870 data = &l2c310_init_fns;
871 break;
872 }
873
874 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000875}
876
Rob Herring8c369262011-08-03 18:12:05 +0100877#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100878static int l2_wt_override;
879
Russell King96054b02014-03-15 16:47:52 +0000880/* Aurora don't have the cache ID register available, so we have to
881 * pass it though the device tree */
882static u32 cache_id_part_number_from_dt;
883
Russell Kingda3627f2014-03-15 16:48:06 +0000884static void __init l2x0_of_parse(const struct device_node *np,
885 u32 *aux_val, u32 *aux_mask)
886{
887 u32 data[2] = { 0, 0 };
888 u32 tag = 0;
889 u32 dirty = 0;
890 u32 val = 0, mask = 0;
891
892 of_property_read_u32(np, "arm,tag-latency", &tag);
893 if (tag) {
894 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
895 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
896 }
897
898 of_property_read_u32_array(np, "arm,data-latency",
899 data, ARRAY_SIZE(data));
900 if (data[0] && data[1]) {
901 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
902 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
903 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
904 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
905 }
906
907 of_property_read_u32(np, "arm,dirty-latency", &dirty);
908 if (dirty) {
909 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
910 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
911 }
912
913 *aux_val &= ~mask;
914 *aux_val |= val;
915 *aux_mask &= ~mask;
916}
917
Russell King6a28cf52014-03-15 18:55:53 +0000918static const struct l2c_init_data of_l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000919 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000920 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000921 .num_lock = 1,
922 .of_parse = l2x0_of_parse,
923 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000924 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000925 .outer_cache = {
926 .inv_range = l2c210_inv_range,
927 .clean_range = l2c210_clean_range,
928 .flush_range = l2c210_flush_range,
929 .flush_all = l2c210_flush_all,
930 .disable = l2c_disable,
931 .sync = l2c210_sync,
932 .resume = l2c210_resume,
933 },
934};
935
Russell King733c6bb2014-03-15 21:29:28 +0000936static const struct l2c_init_data of_l2c220_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000937 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000938 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000939 .num_lock = 1,
Russell Kingda3627f2014-03-15 16:48:06 +0000940 .of_parse = l2x0_of_parse,
Russell Kinga4b041a2014-04-11 00:48:25 +0100941 .enable = l2c220_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000942 .save = l2c_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000943 .outer_cache = {
Russell King733c6bb2014-03-15 21:29:28 +0000944 .inv_range = l2c220_inv_range,
945 .clean_range = l2c220_clean_range,
946 .flush_range = l2c220_flush_range,
947 .flush_all = l2c220_flush_all,
948 .disable = l2c_disable,
949 .sync = l2c220_sync,
950 .resume = l2c210_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000951 },
952};
953
Russell Kingf7773322014-03-15 20:51:47 +0000954static void __init l2c310_of_parse(const struct device_node *np,
955 u32 *aux_val, u32 *aux_mask)
Russell Kingda3627f2014-03-15 16:48:06 +0000956{
957 u32 data[3] = { 0, 0, 0 };
958 u32 tag[3] = { 0, 0, 0 };
959 u32 filter[2] = { 0, 0 };
960
961 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
962 if (tag[0] && tag[1] && tag[2])
963 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000964 L310_LATENCY_CTRL_RD(tag[0] - 1) |
965 L310_LATENCY_CTRL_WR(tag[1] - 1) |
966 L310_LATENCY_CTRL_SETUP(tag[2] - 1),
967 l2x0_base + L310_TAG_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000968
969 of_property_read_u32_array(np, "arm,data-latency",
970 data, ARRAY_SIZE(data));
971 if (data[0] && data[1] && data[2])
972 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000973 L310_LATENCY_CTRL_RD(data[0] - 1) |
974 L310_LATENCY_CTRL_WR(data[1] - 1) |
975 L310_LATENCY_CTRL_SETUP(data[2] - 1),
976 l2x0_base + L310_DATA_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000977
978 of_property_read_u32_array(np, "arm,filter-ranges",
979 filter, ARRAY_SIZE(filter));
980 if (filter[1]) {
981 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
Russell King1a5a9542014-03-16 20:52:25 +0000982 l2x0_base + L310_ADDR_FILTER_END);
983 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
984 l2x0_base + L310_ADDR_FILTER_START);
Russell Kingda3627f2014-03-15 16:48:06 +0000985 }
986}
987
Russell Kingf7773322014-03-15 20:51:47 +0000988static const struct l2c_init_data of_l2c310_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000989 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000990 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +0000991 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +0000992 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +0000993 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000994 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000995 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000996 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000997 .inv_range = l2c210_inv_range,
998 .clean_range = l2c210_clean_range,
999 .flush_range = l2c210_flush_range,
1000 .flush_all = l2c210_flush_all,
1001 .disable = l2c_disable,
1002 .sync = l2c210_sync,
1003 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +00001004 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001005 },
1006};
1007
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001008/*
1009 * Note that the end addresses passed to Linux primitives are
1010 * noninclusive, while the hardware cache range operations use
1011 * inclusive start and end addresses.
1012 */
1013static unsigned long calc_range_end(unsigned long start, unsigned long end)
1014{
1015 /*
1016 * Limit the number of cache lines processed at once,
1017 * since cache range operations stall the CPU pipeline
1018 * until completion.
1019 */
1020 if (end > start + MAX_RANGE_SIZE)
1021 end = start + MAX_RANGE_SIZE;
1022
1023 /*
1024 * Cache range operations can't straddle a page boundary.
1025 */
1026 if (end > PAGE_ALIGN(start+1))
1027 end = PAGE_ALIGN(start+1);
1028
1029 return end;
1030}
1031
1032/*
1033 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1034 * and range operations only do a TLB lookup on the start address.
1035 */
1036static void aurora_pa_range(unsigned long start, unsigned long end,
1037 unsigned long offset)
1038{
1039 unsigned long flags;
1040
1041 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +01001042 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1043 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001044 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1045
1046 cache_sync();
1047}
1048
1049static void aurora_inv_range(unsigned long start, unsigned long end)
1050{
1051 /*
1052 * round start and end adresses up to cache line size
1053 */
1054 start &= ~(CACHE_LINE_SIZE - 1);
1055 end = ALIGN(end, CACHE_LINE_SIZE);
1056
1057 /*
1058 * Invalidate all full cache lines between 'start' and 'end'.
1059 */
1060 while (start < end) {
1061 unsigned long range_end = calc_range_end(start, end);
1062 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1063 AURORA_INVAL_RANGE_REG);
1064 start = range_end;
1065 }
1066}
1067
1068static void aurora_clean_range(unsigned long start, unsigned long end)
1069{
1070 /*
1071 * If L2 is forced to WT, the L2 will always be clean and we
1072 * don't need to do anything here.
1073 */
1074 if (!l2_wt_override) {
1075 start &= ~(CACHE_LINE_SIZE - 1);
1076 end = ALIGN(end, CACHE_LINE_SIZE);
1077 while (start != end) {
1078 unsigned long range_end = calc_range_end(start, end);
1079 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1080 AURORA_CLEAN_RANGE_REG);
1081 start = range_end;
1082 }
1083 }
1084}
1085
1086static void aurora_flush_range(unsigned long start, unsigned long end)
1087{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001088 start &= ~(CACHE_LINE_SIZE - 1);
1089 end = ALIGN(end, CACHE_LINE_SIZE);
1090 while (start != end) {
1091 unsigned long range_end = calc_range_end(start, end);
1092 /*
1093 * If L2 is forced to WT, the L2 will always be clean and we
1094 * just need to invalidate.
1095 */
1096 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001097 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001098 AURORA_INVAL_RANGE_REG);
1099 else
1100 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1101 AURORA_FLUSH_RANGE_REG);
1102 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001103 }
1104}
1105
Russell Kingda3627f2014-03-15 16:48:06 +00001106static void aurora_save(void __iomem *base)
1107{
1108 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1109 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1110}
1111
1112static void aurora_resume(void)
1113{
Russell King09a5d182014-03-15 16:48:13 +00001114 void __iomem *base = l2x0_base;
1115
1116 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1117 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1118 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +00001119 }
1120}
1121
Russell King40266d62014-03-15 16:47:59 +00001122/*
1123 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1124 * broadcasting of cache commands to L2.
1125 */
1126static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1127 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001128{
Russell King40266d62014-03-15 16:47:59 +00001129 u32 u;
1130
1131 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001132 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001133 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1134
Russell Kingda3627f2014-03-15 16:48:06 +00001135 isb();
Russell King40266d62014-03-15 16:47:59 +00001136
1137 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001138}
1139
Russell King75461f52014-03-15 16:48:07 +00001140static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1141 struct outer_cache_fns *fns)
1142{
1143 sync_reg_offset = AURORA_SYNC_REG;
1144}
1145
Russell Kingda3627f2014-03-15 16:48:06 +00001146static void __init aurora_of_parse(const struct device_node *np,
1147 u32 *aux_val, u32 *aux_mask)
1148{
1149 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1150 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1151
1152 of_property_read_u32(np, "cache-id-part",
1153 &cache_id_part_number_from_dt);
1154
1155 /* Determine and save the write policy */
1156 l2_wt_override = of_property_read_bool(np, "wt-override");
1157
1158 if (l2_wt_override) {
1159 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1160 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1161 }
1162
1163 *aux_val &= ~mask;
1164 *aux_val |= val;
1165 *aux_mask &= ~mask;
1166}
1167
1168static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001169 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001170 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001171 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001172 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001173 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001174 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001175 .save = aurora_save,
1176 .outer_cache = {
1177 .inv_range = aurora_inv_range,
1178 .clean_range = aurora_clean_range,
1179 .flush_range = aurora_flush_range,
1180 .flush_all = l2x0_flush_all,
1181 .disable = l2x0_disable,
1182 .sync = l2x0_cache_sync,
1183 .resume = aurora_resume,
1184 },
1185};
1186
1187static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001188 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001189 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001190 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001191 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001192 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001193 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001194 .save = aurora_save,
1195 .outer_cache = {
1196 .resume = aurora_resume,
1197 },
1198};
1199
Christian Daudt3b656fe2013-05-09 22:21:01 +01001200/*
1201 * For certain Broadcom SoCs, depending on the address range, different offsets
1202 * need to be added to the address before passing it to L2 for
1203 * invalidation/clean/flush
1204 *
1205 * Section Address Range Offset EMI
1206 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1207 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1208 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1209 *
1210 * When the start and end addresses have crossed two different sections, we
1211 * need to break the L2 operation into two, each within its own section.
1212 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1213 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1214 * 0xC0000000 - 0xC0001000
1215 *
1216 * Note 1:
1217 * By breaking a single L2 operation into two, we may potentially suffer some
1218 * performance hit, but keep in mind the cross section case is very rare
1219 *
1220 * Note 2:
1221 * We do not need to handle the case when the start address is in
1222 * Section 1 and the end address is in Section 3, since it is not a valid use
1223 * case
1224 *
1225 * Note 3:
1226 * Section 1 in practical terms can no longer be used on rev A2. Because of
1227 * that the code does not need to handle section 1 at all.
1228 *
1229 */
1230#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1231#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1232
1233#define BCM_SYS_EMI_OFFSET 0x40000000UL
1234#define BCM_VC_EMI_OFFSET 0x80000000UL
1235
1236static inline int bcm_addr_is_sys_emi(unsigned long addr)
1237{
1238 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1239 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1240}
1241
1242static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1243{
1244 if (bcm_addr_is_sys_emi(addr))
1245 return addr + BCM_SYS_EMI_OFFSET;
1246 else
1247 return addr + BCM_VC_EMI_OFFSET;
1248}
1249
1250static void bcm_inv_range(unsigned long start, unsigned long end)
1251{
1252 unsigned long new_start, new_end;
1253
1254 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1255
1256 if (unlikely(end <= start))
1257 return;
1258
1259 new_start = bcm_l2_phys_addr(start);
1260 new_end = bcm_l2_phys_addr(end);
1261
1262 /* normal case, no cross section between start and end */
1263 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001264 l2c210_inv_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001265 return;
1266 }
1267
1268 /* They cross sections, so it can only be a cross from section
1269 * 2 to section 3
1270 */
Russell King90811142014-03-19 19:14:13 +00001271 l2c210_inv_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001272 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001273 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001274 new_end);
1275}
1276
1277static void bcm_clean_range(unsigned long start, unsigned long end)
1278{
1279 unsigned long new_start, new_end;
1280
1281 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1282
1283 if (unlikely(end <= start))
1284 return;
1285
Christian Daudt3b656fe2013-05-09 22:21:01 +01001286 new_start = bcm_l2_phys_addr(start);
1287 new_end = bcm_l2_phys_addr(end);
1288
1289 /* normal case, no cross section between start and end */
1290 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001291 l2c210_clean_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001292 return;
1293 }
1294
1295 /* They cross sections, so it can only be a cross from section
1296 * 2 to section 3
1297 */
Russell King90811142014-03-19 19:14:13 +00001298 l2c210_clean_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001299 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001300 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001301 new_end);
1302}
1303
1304static void bcm_flush_range(unsigned long start, unsigned long end)
1305{
1306 unsigned long new_start, new_end;
1307
1308 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1309
1310 if (unlikely(end <= start))
1311 return;
1312
1313 if ((end - start) >= l2x0_size) {
Russell King90811142014-03-19 19:14:13 +00001314 outer_cache.flush_all();
Christian Daudt3b656fe2013-05-09 22:21:01 +01001315 return;
1316 }
1317
1318 new_start = bcm_l2_phys_addr(start);
1319 new_end = bcm_l2_phys_addr(end);
1320
1321 /* normal case, no cross section between start and end */
1322 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001323 l2c210_flush_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001324 return;
1325 }
1326
1327 /* They cross sections, so it can only be a cross from section
1328 * 2 to section 3
1329 */
Russell King90811142014-03-19 19:14:13 +00001330 l2c210_flush_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001331 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001332 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001333 new_end);
1334}
1335
Russell King90811142014-03-19 19:14:13 +00001336/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
Russell Kingda3627f2014-03-15 16:48:06 +00001337static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001338 .type = "BCM-L2C-310",
Russell King0493aef2014-03-15 23:26:24 +00001339 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001340 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +00001341 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +00001342 .enable = l2c310_enable,
Russell King09a5d182014-03-15 16:48:13 +00001343 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001344 .outer_cache = {
1345 .inv_range = bcm_inv_range,
1346 .clean_range = bcm_clean_range,
1347 .flush_range = bcm_flush_range,
Russell Kingf7773322014-03-15 20:51:47 +00001348 .flush_all = l2c210_flush_all,
1349 .disable = l2c_disable,
1350 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +00001351 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001352 },
1353};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001354
Russell King9846dfc2014-03-15 16:47:55 +00001355static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001356{
Russell Kingddf7d792014-03-28 14:18:35 +00001357 l2c_save(base);
1358
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001359 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001360 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001361 l2x0_saved_regs.prefetch_ctrl =
Russell King1a5a9542014-03-16 20:52:25 +00001362 readl_relaxed(base + L310_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001363}
1364
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001365static void tauros3_resume(void)
1366{
Russell King09a5d182014-03-15 16:48:13 +00001367 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001368
Russell King09a5d182014-03-15 16:48:13 +00001369 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1370 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1371 base + TAUROS3_AUX2_CTRL);
1372 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
Russell King1a5a9542014-03-16 20:52:25 +00001373 base + L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +00001374
1375 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1376 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001377}
1378
Russell Kingc02642b2014-03-15 16:47:54 +00001379static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001380 .type = "Tauros3",
Russell King0493aef2014-03-15 23:26:24 +00001381 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001382 .num_lock = 8,
1383 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001384 .save = tauros3_save,
1385 /* Tauros3 broadcasts L1 cache operations to L2 */
1386 .outer_cache = {
1387 .resume = tauros3_resume,
1388 },
1389};
1390
Russell Kinga65bb922014-03-15 16:48:01 +00001391#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001392static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001393 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell King733c6bb2014-03-15 21:29:28 +00001394 L2C_ID("arm,l220-cache", of_l2c220_data),
Russell Kingf7773322014-03-15 20:51:47 +00001395 L2C_ID("arm,pl310-cache", of_l2c310_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001396 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1397 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1398 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1399 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001400 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001401 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001402 {}
1403};
1404
Russell King3e175ca2011-09-18 11:27:30 +01001405int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001406{
Russell Kingc02642b2014-03-15 16:47:54 +00001407 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001408 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001409 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001410 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001411
1412 np = of_find_matching_node(NULL, l2x0_ids);
1413 if (!np)
1414 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001415
1416 if (of_address_to_resource(np, 0, &res))
1417 return -ENODEV;
1418
1419 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001420 if (!l2x0_base)
1421 return -ENOMEM;
1422
Barry Song91c2ebb2011-09-30 14:43:12 +01001423 l2x0_saved_regs.phy_base = res.start;
1424
1425 data = of_match_node(l2x0_ids, np)->data;
1426
Russell Kingd9d1f3e2014-03-17 12:59:08 +00001427 /* All L2 caches are unified, so this property should be specified */
1428 if (!of_property_read_bool(np, "cache-unified"))
1429 pr_err("L2C: device tree omits to specify unified cache\n");
1430
Rob Herring8c369262011-08-03 18:12:05 +01001431 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001432 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001433 if (data->of_parse)
1434 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001435
Russell King96054b02014-03-15 16:47:52 +00001436 if (cache_id_part_number_from_dt)
1437 cache_id = cache_id_part_number_from_dt;
1438 else
1439 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1440
1441 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001442
Rob Herring8c369262011-08-03 18:12:05 +01001443 return 0;
1444}
1445#endif